hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e162d03b2d444fb9c15d3bb29545b543108922e2
| 7,999
|
py
|
Python
|
uasyncio/uasyncio/__init__.py
|
ccooper21/micropython-lib
|
d19253a222fd6ec789ce04949ab36abb6b9d30e7
|
[
"PSF-2.0"
] | 1
|
2019-07-31T12:39:53.000Z
|
2019-07-31T12:39:53.000Z
|
uasyncio/uasyncio/__init__.py
|
ccooper21/micropython-lib
|
d19253a222fd6ec789ce04949ab36abb6b9d30e7
|
[
"PSF-2.0"
] | null | null | null |
uasyncio/uasyncio/__init__.py
|
ccooper21/micropython-lib
|
d19253a222fd6ec789ce04949ab36abb6b9d30e7
|
[
"PSF-2.0"
] | 2
|
2017-11-21T16:53:03.000Z
|
2021-07-29T08:47:14.000Z
|
import uerrno
import uselect as select
import usocket as _socket
from uasyncio.core import *
DEBUG = 0
log = None
def set_debug(val):
global DEBUG, log
DEBUG = val
if val:
import logging
log = logging.getLogger("uasyncio")
class PollEventLoop(EventLoop):
def __init__(self, len=42):
EventLoop.__init__(self, len)
self.poller = select.poll()
self.objmap = {}
def add_reader(self, sock, cb, *args):
if DEBUG and __debug__:
log.debug("add_reader%s", (sock, cb, args))
if args:
self.poller.register(sock, select.POLLIN)
self.objmap[id(sock)] = (cb, args)
else:
self.poller.register(sock, select.POLLIN)
self.objmap[id(sock)] = cb
def remove_reader(self, sock):
if DEBUG and __debug__:
log.debug("remove_reader(%s)", sock)
self.poller.unregister(sock)
del self.objmap[id(sock)]
def add_writer(self, sock, cb, *args):
if DEBUG and __debug__:
log.debug("add_writer%s", (sock, cb, args))
if args:
self.poller.register(sock, select.POLLOUT)
self.objmap[id(sock)] = (cb, args)
else:
self.poller.register(sock, select.POLLOUT)
self.objmap[id(sock)] = cb
def remove_writer(self, sock):
if DEBUG and __debug__:
log.debug("remove_writer(%s)", sock)
try:
self.poller.unregister(sock)
self.objmap.pop(id(sock), None)
except OSError as e:
# StreamWriter.awrite() first tries to write to a socket,
# and if that succeeds, yield IOWrite may never be called
# for that socket, and it will never be added to poller. So,
# ignore such error.
if e.args[0] != uerrno.ENOENT:
raise
def wait(self, delay):
if DEBUG and __debug__:
log.debug("poll.wait(%d)", delay)
# We need one-shot behavior (second arg of 1 to .poll())
res = self.poller.ipoll(delay, 1)
#log.debug("poll result: %s", res)
# Remove "if res" workaround after
# https://github.com/micropython/micropython/issues/2716 fixed.
if res:
for sock, ev in res:
cb = self.objmap[id(sock)]
if ev & (select.POLLHUP | select.POLLERR):
# These events are returned even if not requested, and
# are sticky, i.e. will be returned again and again.
# If the caller doesn't do proper error handling and
# unregister this sock, we'll busy-loop on it, so we
# as well can unregister it now "just in case".
self.remove_reader(sock)
if DEBUG and __debug__:
log.debug("Calling IO callback: %r", cb)
if isinstance(cb, tuple):
cb[0](*cb[1])
else:
self.call_soon(cb)
class StreamReader:
def __init__(self, polls, ios=None):
if ios is None:
ios = polls
self.polls = polls
self.ios = ios
def read(self, n=-1):
while True:
yield IORead(self.polls)
res = self.ios.read(n)
if res is not None:
break
# This should not happen for real sockets, but can easily
# happen for stream wrappers (ssl, websockets, etc.)
#log.warn("Empty read")
if not res:
yield IOReadDone(self.polls)
return res
def readexactly(self, n):
buf = b""
while n:
yield IORead(self.polls)
res = self.ios.read(n)
assert res is not None
if not res:
yield IOReadDone(self.polls)
break
buf += res
n -= len(res)
return buf
def readline(self):
if DEBUG and __debug__:
log.debug("StreamReader.readline()")
buf = b""
while True:
yield IORead(self.polls)
res = self.ios.readline()
assert res is not None
if not res:
yield IOReadDone(self.polls)
break
buf += res
if buf[-1] == 0x0a:
break
if DEBUG and __debug__:
log.debug("StreamReader.readline(): %s", buf)
return buf
def aclose(self):
yield IOReadDone(self.polls)
self.ios.close()
def __repr__(self):
return "<StreamReader %r %r>" % (self.polls, self.ios)
class StreamWriter:
def __init__(self, s, extra):
self.s = s
self.extra = extra
def awrite(self, buf, off=0, sz=-1):
# This method is called awrite (async write) to not proliferate
# incompatibility with original asyncio. Unlike original asyncio
# whose .write() method is both not a coroutine and guaranteed
# to return immediately (which means it has to buffer all the
# data), this method is a coroutine.
if sz == -1:
sz = len(buf) - off
if DEBUG and __debug__:
log.debug("StreamWriter.awrite(): spooling %d bytes", sz)
while True:
res = self.s.write(buf, off, sz)
# If we spooled everything, return immediately
if res == sz:
if DEBUG and __debug__:
log.debug("StreamWriter.awrite(): completed spooling %d bytes", res)
return
if res is None:
res = 0
if DEBUG and __debug__:
log.debug("StreamWriter.awrite(): spooled partial %d bytes", res)
assert res < sz
off += res
sz -= res
yield IOWrite(self.s)
#assert s2.fileno() == self.s.fileno()
if DEBUG and __debug__:
log.debug("StreamWriter.awrite(): can write more")
# Write piecewise content from iterable (usually, a generator)
def awriteiter(self, iterable):
for buf in iterable:
yield from self.awrite(buf)
def aclose(self):
yield IOWriteDone(self.s)
self.s.close()
def get_extra_info(self, name, default=None):
return self.extra.get(name, default)
def __repr__(self):
return "<StreamWriter %r>" % self.s
def open_connection(host, port):
if DEBUG and __debug__:
log.debug("open_connection(%s, %s)", host, port)
s = _socket.socket()
s.setblocking(False)
ai = _socket.getaddrinfo(host, port)
addr = ai[0][4]
try:
s.connect(addr)
except OSError as e:
if e.args[0] != uerrno.EINPROGRESS:
raise
if DEBUG and __debug__:
log.debug("open_connection: After connect")
yield IOWrite(s)
# if __debug__:
# assert s2.fileno() == s.fileno()
if DEBUG and __debug__:
log.debug("open_connection: After iowait: %s", s)
return StreamReader(s), StreamWriter(s, {})
def start_server(client_coro, host, port, backlog=10):
if DEBUG and __debug__:
log.debug("start_server(%s, %s)", host, port)
s = _socket.socket()
s.setblocking(False)
ai = _socket.getaddrinfo(host, port)
addr = ai[0][4]
s.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1)
s.bind(addr)
s.listen(backlog)
while True:
if DEBUG and __debug__:
log.debug("start_server: Before accept")
yield IORead(s)
if DEBUG and __debug__:
log.debug("start_server: After iowait")
s2, client_addr = s.accept()
s2.setblocking(False)
if DEBUG and __debug__:
log.debug("start_server: After accept: %s", s2)
extra = {"peername": client_addr}
yield client_coro(StreamReader(s2), StreamWriter(s2, extra))
import uasyncio.core
uasyncio.core._event_loop_class = PollEventLoop
| 31.868526
| 88
| 0.554694
|
49c540ff6369f48774894a4bccac4120314f0ac6
| 895
|
py
|
Python
|
meiduo_mall/meiduo_mall/apps/users/urls.py
|
infotit/mdmall
|
c1d215ca6314a4ebe4aa1ffdeb6ec6b3486ec751
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/users/urls.py
|
infotit/mdmall
|
c1d215ca6314a4ebe4aa1ffdeb6ec6b3486ec751
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/users/urls.py
|
infotit/mdmall
|
c1d215ca6314a4ebe4aa1ffdeb6ec6b3486ec751
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
url(r'^users/$', views.UserView.as_view()),
url(r'usernames/(?P<username>\w{5,20})/count/', views.UsernameCountView.as_view()),
url(r'mobiles/(?P<mobile>1[345789]\d{9})/count/', views.MobileCountView.as_view()),
url(r'^authorizations/$', obtain_jwt_token),
# 获取发送短信验证码的token
url(r'^accounts/(?P<account>\w{4,20})/sms/token/$', views.SMSCodeTokenView.as_view()),
# 获取修改密码的token
url(r'^accounts/(?P<account>\w{4,20})/password/token/$', views.PasswordTokenView.as_view()),
# 重置密码
url(r'^users/(?P<pk>\d+)/password/$', views.PasswordView.as_view()),
url(r'^user/$', views.UserDetailView.as_view()),
# 邮件
url(r'^emails/$', views.EmailView.as_view()),
url(r'^emails/verification/$', views.EmailVerifyView.as_view()),
]
| 38.913043
| 96
| 0.672626
|
45d71276637a8c0bd5e55e2999fb5a8f5d572a76
| 625
|
py
|
Python
|
manage.py
|
Guo-xuejian/-
|
100114eaba3fe4cc4f6ac7c67e2266b44d1a245b
|
[
"MulanPSL-1.0"
] | 1
|
2022-01-26T12:58:30.000Z
|
2022-01-26T12:58:30.000Z
|
manage.py
|
Guo-xuejian/bugua
|
100114eaba3fe4cc4f6ac7c67e2266b44d1a245b
|
[
"MulanPSL-1.0"
] | null | null | null |
manage.py
|
Guo-xuejian/bugua
|
100114eaba3fe4cc4f6ac7c67e2266b44d1a245b
|
[
"MulanPSL-1.0"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bugua.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.409091
| 73
| 0.6816
|
f94d7f702529a4b6955a17716f87ec4c8ed25ad4
| 98,270
|
py
|
Python
|
smlnjkernel.py
|
twyair/simple-ismlnj
|
f189108fac073b80a97597f587bfa769b20ee2a2
|
[
"MIT"
] | null | null | null |
smlnjkernel.py
|
twyair/simple-ismlnj
|
f189108fac073b80a97597f587bfa769b20ee2a2
|
[
"MIT"
] | null | null | null |
smlnjkernel.py
|
twyair/simple-ismlnj
|
f189108fac073b80a97597f587bfa769b20ee2a2
|
[
"MIT"
] | 1
|
2022-03-14T18:29:32.000Z
|
2022-03-14T18:29:32.000Z
|
import html
import json
import re
from typing import Dict, List, Optional, Any
import re
import signal
from ipykernel.kernelbase import Kernel
from ipykernel.kernelapp import IPKernelApp
import pexpect
from subprocess import check_output
crlf_pat = re.compile(r"[\r\n]+")
REGEX_WORD = re.compile(r"(\w+)$")
SML_KEYWORDS = sorted(
[
"fun",
"true",
"false",
"orelse",
"andalso",
"if",
"then",
"else",
"val",
"let",
"in",
"end",
"fn",
"type",
"datatype",
"of",
"case",
"raise",
"exception",
"handle",
"use",
"real",
"int",
]
)
class REPLWrapper:
def __init__(self, cmd, orig_prompt: str, continuation_prompt: str):
self.child = pexpect.spawn(cmd, echo=False, encoding="utf-8")
self.prompt = re.compile(orig_prompt)
self.continuation_prompt = re.compile(continuation_prompt)
self._expect_prompt(timeout=1)
def _expect_prompt(self, timeout=-1):
return self.child.expect_list(
[self.prompt, self.continuation_prompt], timeout=timeout
)
def run_command(self, command: str, timeout: Optional[int] = -1) -> Optional[str]:
if not command:
raise ValueError("No command was given")
self.child.sendline(command)
# Command was fully submitted, now wait for the next prompt
if self._expect_prompt(timeout=timeout) == 1:
# We got the continuation prompt - command was incomplete
self.child.kill(signal.SIGINT)
self._expect_prompt(timeout=1)
raise ValueError(
"Continuation prompt found - input was incomplete:\n" + command
)
return self.child.before
def get_output(self) -> Optional[str]:
return self.child.before
PRIMARY_PROMPT = ">>>"
SECONDARY_PROMPT = "="
ERRORS_INFO: List[Dict[str, Any]] = [
{
"code": 1,
"msg": "argument of raise is not an exception",
"verbose": "The expression following the <code><b>raise</b></code> keyword should evaluate to an exception value, i.e. a value of type <code>exn</code>. In this case, the value has some other, inappropriate type. E.g.:",
"info": "<dd>\n The expression following the <code><b>raise</b></code> keyword should\n evaluate to an exception value, i.e. a value of type <code>exn</code>.\n In this case, the value has some other, inappropriate type. E.g.:\n <pre> raise 3;\n <i>stdIn:16.7 Error: argument of raise is not an exception [literal]\n raised: int\n in expression:\n raise 3</i>\n </pre>\n \n "
},
{
"code": 2,
"msg": "argument type variables in datatype replication",
"verbose": "In a datatype replication declaration, neither the type name on the left hand side nor the type path (longid) on the right hand side should be preceeded by formal type variable arguments, even if the right hand side datatype is of arity n>0.",
"info": "<dd>\n In a datatype replication declaration, neither the type name on the\n left hand side nor the type path (longid) on the right hand side\n should be preceeded by formal type variable arguments, even if the\n right hand side datatype is of arity n>0.\n <pre> datatype 'a T = A of 'a;\n <i>datatype 'a T = A of 'a</i>\n \n datatype 'a T1 = datatype T;\n <i>stdIn:18.1-18.28 Error: argument type variables in datatype replication</i>\n \n datatype T1 = datatype T;\n <i>datatype 'a T = A of 'a</i>\n </pre>\n \n "
},
{
"code": 3,
"msg": "can't find function arguments in clause",
"verbose": "This occurs when an formal parameter pattern is not supplied on the left hand side in a <code>fun</code> declaration, or one of the formal parameters of an infix function symbol is missing.",
"info": "<dd>\n This occurs when an formal parameter pattern is not supplied on the\n left hand side in a <code>fun</code> declaration, or one of the\n formal parameters of an infix function symbol is missing.\n <pre> fun f = 3;\n <i>stdIn:1.5 Error: can't find function arguments in clause</i>\n \n infix 3 ++;\n <i>infix 3 ++</i>\n \n fun (x xx) = 3;\n <i>stdIn:1.5-2.6 Error: can't find function arguments in clause\n stdIn:1.5-2.6 Error: illegal function symbol in clause</i>\n </pre>\n \n "
},
{
"code": 4,
"msg": "case object and rules don't agree",
"verbose": "The <em> case object </em> is the expression following the <code><b>case</b></code> keyword. It's type must agree with the type of the lhs patterns in the <em>rules</em> (<code><i>pat</i> => <i>exp</i></code>) following the <code><b>of</b></code> keyword. All the patterns of the rules also have to agree in type, but that is another error.",
"info": "<dd>\n The <em> case object </em> is the expression following the <code><b>case</b></code>\n keyword. It's type must agree with the type of the lhs patterns in\n the <em>rules</em> (<code><i>pat</i> => <i>exp</i></code>) following the\n <code><b>of</b></code> keyword. All the patterns of the rules also have\n to agree in type, but that is another error.\n <pre> case 3\n of true => 1\n | false => 2;\n <i>stdIn:1.1-25.16 Error: case object and rules don't agree [literal]\n rule domain: bool\n object: int\n in expression:\n (case 3\n of true => 1\n | false => 2)</i>\n </pre>\n \n "
},
{
"code": 5,
"msg": "clauses don't all have function name",
"verbose": "In a <code>fun</code> definition, the function name must appear in each clause. If it is omitted from one or more clauses, this error results.",
"info": "<dd>\n In a <code>fun</code> definition, the function name must appear in\n each clause. If it is omitted from one or more clauses, this error results.\n <pre> fun f nil = 1\n | (x::y) = x;\n <i>stdIn:1.5-17.15 Error: clauses don't all have function name</i>\n </pre>\n This error is also reported when the function name in two\n clauses of the function definition differ, for instance because\n of a misspelling.\n <pre> fun test (SOME s) = true\n | teat (NONE) = false;\n stdIn:120.5-121.24 Error: clauses don't all have function name\n </pre>\n \n "
},
{
"code": 6,
"msg": "clauses don't all have same number of patterns",
"verbose": "In a <code>fun</code> declaration, each clause, or rule, separated by <code>|</code> (vertical bar symbol), has to have the same number of curried arguments.",
"info": "<dd>\n In a <code>fun</code> declaration, each clause, or rule, separated by\n <code>|</code> (vertical bar symbol), has to have the same number of\n curried arguments.\n <pre> fun f x y = 3\n | f a b c = 4;\n <i>stdIn:1.5-26.16 Error: clauses don't all have same number of patterns\n stdIn:24.6-26.16 Error: types of rules don't agree [tycon mismatch]\n earlier rule(s): 'Z * 'Y -> int\n this rule: 'X * 'W * 'V -> int\n in rule:\n (a,b,c) => 4</i>\n </pre>\n \n "
},
{
"code": 7,
"msg": "constant constructor applied to argument in pattern: %",
"verbose": "A constant constructor like <code>nil</code> can't be applied to an argument in a pattern.",
"info": "<dd>\n A constant constructor like <code>nil</code> can't be applied to an\n argument in a pattern.\n <pre> val nil x = [];\n <i>stdIn:1.5-24.8 Error: constant constructor applied to argument in pattern:nil</i>\n </pre>\n \n "
},
{
"code": 8,
"msg": "constructor and argument don't agree in pattern",
"verbose": "A nonconstant constructor in a pattern must be applied to an argument pattern of the appropriate type (i.e. the domain type of the constructor).",
"info": "<dd>\n A nonconstant constructor in a pattern must be applied to an argument\n pattern of the appropriate type (i.e. the domain type of the constructor).\n <pre> datatype t = A of int;\n val A true = A 3;\n <i>stdIn:1.1-26.3 Error: constructor and argument don't agree in pattern [tycon mismatch]\n constructor: int -> t\n argument: bool\n in pattern:\n A true</i>\n </pre>\n \n "
},
{
"code": 9,
"msg": "data constructor % used without argument in pattern",
"verbose": "A nonconstant constructor must be applied to an argument when it is used in a pattern (though not necessarily when it is used in an expression).",
"info": "<dd>\n A nonconstant constructor must be applied to an argument when it is\n used in a pattern (though not necessarily when it is used in an expression).\n <pre> datatype t = A of int\n val A = A 3;\n <i>stdIn:17.5-17.12 Error: data constructor A used without argument in pattern</i>\n </pre>\n \n "
},
{
"code": 10,
"msg": "datatype % does not match specification",
"verbose": "Usually occurs because the constructors for a datatype declared in a structure don't agree with the constructors (in names or number) of a signature that the structure must match.",
"info": "<dd>\n Usually occurs because the constructors for a datatype declared in a\n structure don't agree with the constructors (in names or number) of\n a signature that the structure must match.\n <pre> signature S =\n sig\n datatype t = A of int\n end;\n <i>signature S = sig datatype t = A of int end</i>\n \n structure A : S =\n struct\n datatype t = A of int | B\n end;\n <i>stdIn:1.1-27.4 Error: datatype t does not match specification\n constructors in actual only: B</i>\n </pre>\n \n "
},
{
"code": 11,
"msg": "datatype % has duplicate constructor name(s): %, %",
"verbose": "The names of the constructors of a given datatype must all be distinct.",
"info": "<dd>\n The names of the constructors of a given datatype must all be distinct.\n <pre> datatype t = A | B | A of int;\n <i>stdIn:1.1-26.5 Error: datatype t has duplicate constructor name(s): A</i>\n </pre>\n \n "
},
{
"code": 12,
"msg": "dependency cycle in instantiate",
"verbose": "The <em>instantiate</em> process takes a signature and creates a dummy structure matching that signature with no extraneous sharing (i.e. no types are identified that don't need to be). This process can fail because of various kinds of circularities. An example of one of the simpler forms of circularity would be:",
"info": "<dd>\n The <em>instantiate</em> process takes a signature and creates a dummy\n structure matching that signature with no extraneous sharing (i.e.\n no types are identified that don't need to be). This process can\n fail because of various kinds of circularities. An example of one\n of the simpler forms of circularity would be:\n <pre> signature S =\n sig\n type u\n datatype s = A of u\n sharing type u = s\n end;\n <i>stdIn:16.1-21.4 Error: dependency cycle in instantiate</i>\n </pre>\n By default, every signature is instantiated when it is declared, to\n detect errors as early as possible. However, signature instantiation\n is strictly only necessary when a signature is used as a functor\n parameter signature or in an opaque (<code>:></code>) signature constraint.\n <p>\n \n "
},
{
"code": 13,
"msg": "duplicate constructor specifications for % caused by include",
"verbose": "A signature should have only one specification of a given value or constructor name. A common way that multiple constructor specifications for a name can occur is if a constructor is specified explicitly, and also implicitly through an included signature.",
"info": "<dd>\n A signature should have only one specification of a given value or\n constructor name. A common way that multiple constructor\n specifications for a name can occur is if a constructor is\n specified explicitly, and also implicitly through an included\n signature.\n <pre> signature S =\n sig\n datatype t = A of int\n end;\n <i>signature S = sig datatype t = A of int end</i>\n \n signature T =\n sig\n datatype u = A\n include S\n end;\n <i>stdIn:27.3-28.13 Error: duplicate constructor specifications for A caused by include</i>\n </pre>\n \n "
},
{
"code": 14,
"msg": "duplicate exception declaration",
"verbose": "An exception name is declared multiple times in a single exception declaration.",
"info": "<dd>\n An exception name is declared multiple times in a single exception\n declaration.\n <pre> exception E of int\n and E of bool;\n <i>stdIn:17.1-18.14 Error: duplicate exception declaration: E</i>\n </pre>\n Note that it is ok if the same exception name is declared in different\n exception declarations, as in the following.\n <pre> exception E of int;\n <i>exception E of int</i>\n \n exception E of bool;\n <i>exception E of bool</i>\n </pre>\n \n "
},
{
"code": 15,
"msg": "duplicate function name in val rec dec",
"verbose": "When declaring several functions in a single <code>val rec</code> declaration, the names of the functions must be distinct.",
"info": "<dd>\n When declaring several functions in a single <code>val rec</code>\n declaration, the names of the functions must be distinct.\n <pre> val rec f = (fn x => x)\n and f = (fn y => y + 3);\n <i>stdIn:21.1-22.24 Error: duplicate function name in val rec dec: f</i>\n </pre>\n \n "
},
{
"code": 16,
"msg": "duplicate function names in fun dec",
"verbose": "When declaring several functions in a single <code>fun</code> declaration, the names of the functions must be distinct.",
"info": "<dd>\n When declaring several functions in a single <code>fun</code>\n declaration, the names of the functions must be distinct.\n <pre> fun f x = x\n and f y = y + 3;\n <i>stdIn:1.1-23.16 Error: duplicate function names in fun dec: f</i>\n </pre>\n \n "
},
{
"code": 17,
"msg": "duplicate label in record",
"verbose": "The label names in a record expression or pattern must be distinct.",
"info": "<dd>\n The label names in a record expression or pattern must be distinct.\n <pre> {a=3,b=true,a=\"abc\"};\n <i>stdIn:1.1-1.21 Error: duplicate label in record: a</i>\n fun f {a=x,a=y} = 3;\n <i>stdIn:2.2-2.11 Error: duplicate label in record: a</i>\n </pre>\n \n "
},
{
"code": 18,
"msg": "duplicate specifications for % % in signature",
"verbose": "Only one specification for a given name in a given name space is allowed in signatures. Values and constructors (including exception constructors) are in one name space; types, structures, and functors are disjoint name spaces. So <code>x</code> cannot be specified twice as a value or constructor, but it can be specified as a value, as a type, as a structure, and as a functor in the same signature.",
"info": "<dd>\n Only one specification for a given name in a given name space is\n allowed in signatures. Values and constructors (including exception\n constructors) are in one name space; types, structures, and functors\n are disjoint name spaces. So <code>x</code> cannot be specified\n twice as a value or constructor, but it can be specified as a\n value, as a type, as a structure, and as a functor in the same\n signature.\n <pre> signature S =\n sig\n val x : int\n val x : bool\n end;\n <i>stdIn:20.3-21.16 Error: duplicate specifications for variable or constructor x in signature</i>\n \n signature S =\n sig\n type t\n type t\n end;\n <i>stdIn:24.3-25.10 Error: duplicate specifications for type constructor t in signature</i>\n signature S =\n sig\n exception Foo\n exception Foo of int\n end;\n <i>stdIn:28.3-29.24 Error: duplicate specifications for variable or constructor Foo in signature</i>\n \n signature S =\n sig\n structure A : sig end\n structure A : sig end\n end;\n <i>stdIn:32.3-33.25 Error: duplicate specifications for structure A in signature</i>\n \n signature S =\n sig\n val x : int\n datatype t = x\n end;\n <i>stdIn:36.3-37.18 Error: duplicate specifications for variable or constructor x in signature</i>\n \n signature S =\n sig\n val x : int\n type x\n structure x : sig end\n end;\n <i>signature S =\n sig\n val x : int\n type x\n structure x : sig end\n end</i>\n </pre>\n \n "
},
{
"code": 19,
"msg": "duplicate specifications for functor % caused by include",
"verbose": "Multiple specifications for a functor name occur in a signature, with one of the later ones introduced via an <code>include</code> spec. If the included functor spec comes first, you get error [19] instead.",
"info": "<dd>\n Multiple specifications for a functor name occur in a signature, with one of\n the later ones introduced via an <code>include</code> spec. If the\n included functor spec comes first, you get error [19] instead.\n <pre> signature S1 =\n sig\n functor F () : sig end\n end;\n <i>signature S1 = sig functor F : (<param>: <sig>) : <sig> end</sig></sig></i>\n \n signature S2 =\n sig\n include S1\n functor F(X: sig val x : int end): sig end\n end;\n <i>stdIn:55.3-56.46 Error: duplicate specifications for functor F in signature</i>\n \n signature S2 =\n sig\n functor F(X: sig val x : int end): sig end\n include S1\n end;\n <i>stdIn:59.3-60.14 Error: duplicate specifications for functor F caused by include</i>\n </pre>\n \n "
},
{
"code": 20,
"msg": "duplicate specifications for structure % caused by include",
"verbose": "Multiple specifications for a structure name occur in a signature, with one of the later ones introduced via an <code>include</code> spec. If the included structure spec comes first, you get error [19] instead.",
"info": "<dd>\n Multiple specifications for a structure name occur in a signature, with one of\n the later ones introduced via an <code>include</code> spec. If the\n included structure spec comes first, you get error [19] instead.\n <pre> signature S1 =\n sig\n structure A : sig end\n end;\n <i>signature S1 = sig structure A : sig end end</i>\n \n signature S2 =\n sig\n structure A : sig val x : int end\n include S1\n end;\n <i>stdIn:67.3-68.14 Error: duplicate specifications for structure A caused by include</i>\n \n signature S3 =\n sig\n include S1\n structure A : sig val x : int end\n end;\n <i>stdIn:71.3-72.37 Error: duplicate specifications for structure A in signature</i>\n </pre>\n \n "
},
{
"code": 21,
"msg": "duplicate specifications for type % caused by include",
"verbose": "Multiple specifications for a type name occur in a signature, with one of the later ones introduced via an <code>include</code> spec. If the included structure spec comes first, you get error [19] instead.",
"info": "<dd>\n Multiple specifications for a type name occur in a signature, with one of\n the later ones introduced via an <code>include</code> spec. If the\n included structure spec comes first, you get error [19] instead.\n <pre> signature S1 =\n sig\n type t\n end;\n <i>signature S1 = sig type t end</i>\n \n signature S2 =\n sig\n type 'a t\n include S1\n end;\n <i>stdIn:79.3-80.14 Error: duplicate specifications for type t caused by include</i>\n \n signature S3 =\n sig\n include S1\n type 'a t\n end;\n <i>stdIn:83.3-84.13 Error: duplicate specifications for type constructor t in signature</i>\n </pre>\n \n "
},
{
"code": 22,
"msg": "duplicate type definition",
"verbose": "A type name is defined twice in a single simultaneous type declaration (i.e. type declarations separated by <code><strong>and</strong></code>. If the simultaneous declaration is split into separate declarations, there is no error.",
"info": "<dd>\n A type name is defined twice in a single simultaneous type\n declaration (i.e. type declarations separated by\n <code><strong>and</strong></code>.\n If the simultaneous declaration is split into\n separate declarations, there is no error.\n <pre> type t = int\n and t = bool;\n <i>stdIn:17.1-18.13 Error: duplicate type definition: t</i>\n \n type t = int;\n <i>type t = int</i>\n type t = bool;\n <i>type t = bool</i>\n </pre>\n \n "
},
{
"code": 23,
"msg": "duplicate type names in type declaration",
"verbose": "A type name is defined multiple times in a datatype declaration (including possibly in the <code><strong>withtype</strong></code> part.",
"info": "<dd>\n A type name is defined multiple times in a datatype declaration\n (including possibly in the <code><strong>withtype</strong></code> part.\n <pre> datatype t = A\n and t = B;\n <i>stdIn:1.1-19.10 Error: duplicate type names in type declaration: t</i>\n \n datatype t = A\n withtype t = int;\n <i>stdIn:1.1-20.17 Error: duplicate type names in type declaration: t</i>\n </pre>\n \n "
},
{
"code": 24,
"msg": "duplicate type variable name",
"verbose": "A type variable name is repeated in a type parameter list, when defining an n-ary type or datatype constructor, or explicitly binding types in a value declaration.",
"info": "<dd>\n A type variable name is repeated in a type parameter list, when\n defining an n-ary type or datatype constructor, or explicitly binding\n types in a value declaration.\n <pre> type ('a,'a) t = 'a * 'a;\n <i>stdIn:21.4-21.11 Error: duplicate type variable name: a</i>\n \n datatype ('a,'a) t = A of 'a;\n <i>stdIn:1.1-21.15 Error: duplicate type variable name: a</i>\n \n fun ('a,'a) f(x:'a) = x;\n <i>stdIn:1.1-21.10 Error: duplicate type variable name: a</i>\n </pre>\n \n "
},
{
"code": 25,
"msg": "duplicate value specifications for % caused by include",
"verbose": "Multiple specifications for a value name occur in a signature, with one of the later ones introduced via an <code>include</code> spec. If the included structure spec comes first, you get error [19] instead. It does not matter whether the multiple value specifications give the same type or not.",
"info": "<dd>\n Multiple specifications for a value name occur in a signature, with one of\n the later ones introduced via an <code>include</code> spec. If the\n included structure spec comes first, you get error [19] instead. It\n does not matter whether the multiple value specifications give the\n same type or not.\n <pre> signature S1 =\n sig\n val x : int\n end;\n <i>signature S1 = sig val x : int end</i>\n \n signature S2 =\n sig\n val x : bool\n include S1\n end;\n <i>stdIn:29.3-30.14 Error: duplicate value specifications for x caused by include</i>\n \n signature S3 =\n sig\n val x : int\n include S1\n end;\n <i>stdIn:33.3-34.14 Error: duplicate value specifications for x caused by include</i>\n \n signature S4 =\n sig\n include S1\n val x : int\n end;\n <i>stdIn:37.3-38.15 Error: duplicate specifications for variable or constructor x in signature</i>\n </pre>\n \n "
},
{
"code": 26,
"msg": "duplicate variable in pattern(s)",
"verbose": "A variable may only occur once in a pattern (or in the sequence of argument patterns of a curried function declaration.",
"info": "<dd>\n A variable may only occur once in a pattern (or in the sequence of argument\n patterns of a curried function declaration.\n <pre> fun f(x,x) = x;\n <i>stdIn:1.5-2.10 Error: duplicate variable in pattern(s): x</i>\n \n fun f x x = x;\n <i>stdIn:1.5-2.9 Error: duplicate variable in pattern(s): x</i>\n \n val (x,x) = (3,3);\n <i>stdIn:1.1-36.3 Error: duplicate variable in pattern(s): x</i>\n </pre>\n \n "
},
{
"code": 27,
"msg": "explicit type variable cannot be generalized at its binding declaration: %",
"verbose": "A type variable used in a type constraint within a value expression or declaration must be generalized at the appropriate point (determined either explicitly or implicitly). If the type variable cannot be generalized at that point because of the value restriction, this error message results.",
"info": "<dd>\n A type variable used in a type constraint within a value expression or\n declaration must be generalized at the appropriate point (determined\n either explicitly or implicitly). If the type variable cannot be generalized\n at that point because of the value restriction, this error message results.\n <pre> val x : 'a list = (fn x => x) nil;\n <i>stdIn:1.1-37.14 Error: explicit type variable cannot be generalized at its binding declaration: 'a</i>\n \n val 'a (x: 'a list) = (fn x => x) nil;\n <i>stdIn:1.1-38.5 Error: explicit type variable cannot be generalized at its binding declaration: 'a</i>\n </pre>\n \n "
},
{
"code": 28,
"msg": "expression and handler don't agree",
"verbose": "The type of the right hand side of the each rule in an exception handler must agree with the type of the base expression that the handler is attached to, because the value returned by the entire handle expression is either that of the base expression or the value returned by one of the handler rules.",
"info": "<dd>\n The type of the right hand side of the each rule in an exception\n handler must agree with the type of the base expression that the\n handler is attached to, because the value returned by the entire\n handle expression is either that of the base expression or the\n value returned by one of the handler rules.\n <pre> fun f x = (hd x)+1 handle Empty => true;\n <i>stdIn:2.6-38.7 Error: expression and handler don't agree [literal]\n body: int\n handler range: bool\n in expression:\n hd x + 1\n handle\n Empty => true\n | exn => raise exn</i>\n </pre>\n \n "
},
{
"code": 29,
"msg": "expression or pattern begins with infix identifier \"%\"",
"verbose": "An infix identifier cannot be the first identifier in an expression, unless it is preceded by the <code><strong>op</strong></code> keyword.",
"info": "<dd>\n An infix identifier cannot be the first identifier in an expression,\n unless it is preceded by the <code><strong>op</strong></code> keyword.\n <pre> +(2,3);\n <i>stdIn:1.1 Error: expression or pattern begins with infix identifier \"+\"</i>\n \n op +(2,3);\n <i>val it = 5 : int</i>\n </pre>\n \n "
},
{
"code": 30,
"msg": "expression or pattern ends with infix identifier \"%\"",
"verbose": "An expression cannot end with an infix identifier. Perhaps there is a missing <code><strong>op</strong></code> keyword.",
"info": "<dd>\n An expression cannot end with an infix identifier. Perhaps there is\n a missing <code><strong>op</strong></code> keyword.\n <pre> 2 +;\n <i>stdIn:40.4 Error: expression or pattern ends with infix identifier \"+\"\n stdIn:40.1-40.4 Error: operator is not a function [literal]\n operator: int\n in expression:\n 2 +</i>\n \n (fn x => x) +;\n <i>stdIn:40.3 Error: expression or pattern ends with infix identifier \"+\"</i>\n \n (fn x => x) op +;\n <i>val it = fn : int * int -> int</i>\n </pre>\n \n "
},
{
"code": 31,
"msg": "fixity precedence must be between 0 and 9",
"verbose": "This one is obvious. When defining new infix operators, you have to fit them into the existing precedence ranking, which is limited to ten levels, from 0 to 9, with higher numbers giving stronger precedence. See the <a href=\"basis/pages/top-level-chapter.html\">Top Level Environment</a> chapter of the Basis documentation for the precedences of the predefined infix operators.",
"info": "<dd>\n This one is obvious. When defining new infix operators, you have\n to fit them into the existing precedence ranking, which is limited to\n ten levels, from 0 to 9, with higher numbers giving stronger precedence.\n See the <a href=\"basis/pages/top-level-chapter.html\">Top Level Environment</a>\n chapter of the Basis documentation for the precedences of the predefined\n infix operators.\n <pre> infix 10 ++;\n <i>stdIn:43.7-43.9 Error: fixity precedence must be between 0 and 9</i>\n \n infix ~2 ++;\n <i>stdIn:2.2-2.4 Error: fixity precedence must be between 0 and 9</i>\n </pre>\n \n "
},
{
"code": 32,
"msg": "found data constructor instead of exception",
"verbose": "In a context where an exception constructor identifier was expected, a dataconstructor identifier was found instead.",
"info": "<dd>\n In a context where an exception constructor identifier was expected,\n a dataconstructor identifier was found instead.\n <pre> exception Foo = nil;\n <i>stdIn:17.1-17.20 Error: found data constructor instead of exception</i>\n </pre>\n \n "
},
{
"code": 33,
"msg": "found variable instead of exception",
"verbose": "In a context where an exception constructor identifier was expected, a value variable was found instead.",
"info": "<dd>\n In a context where an exception constructor identifier was expected,\n a value variable was found instead.\n <pre> val x = 3;\n <i>val x = 3 : int</i>\n exception Foo = x;\n <i>stdIn:18.1-18.18 Error: found variable instead of exception</i>\n </pre>\n \n "
},
{
"code": 34,
"msg": "handler domain is not exn",
"verbose": "In the rules following the <code><strong>handler</strong></code> keyword, the type of the patterns on the left hand side the rule must be <code>exn</code>. In the example below, the first error message is caused by a mismatch with an implicit default rule that handles exceptions not handled by the explicit rules of the handler.",
"info": "<dd>\n In the rules following the <code><strong>handler</strong></code>\n keyword, the type of the patterns on the left hand side the rule\n must be <code>exn</code>. In the example below, the first error\n message is caused by a mismatch with an implicit default rule that\n handles exceptions not handled by the explicit rules of the handler.\n <pre> 3 handle nil => 4;\n <i>stdIn:1.1-18.7 Error: types of rules don't agree [tycon mismatch]\n earlier rule(s): 'Z list -> int\n this rule: exn -> 'Y\n in rule:\n exn => raise exn\n stdIn:1.1-18.7 Error: handler domain is not exn [tycon mismatch]\n handler domain: 'Z list\n in expression:\n 3\n handle\n nil => 4\n | exn => raise exn</i>\n </pre>\n \n "
},
{
"code": 35,
"msg": "ill-formed datatype spec",
"verbose": "In a datatype replication specification in a signature, type parameters were found on the left hand side of the specification.",
"info": "<dd>\n In a datatype replication specification in a signature, type\n parameters were found on the left hand side of the specification.\n <pre> signature S =\n sig\n datatype 'a t = datatype bool\n end;\n <i>stdIn:26.3-26.33 Error: ill-formed datatype spec</i>\n </pre>\n \n "
},
{
"code": 36,
"msg": "illegal (multiple?) type constraints in AS pattern",
"verbose": "The value variable in front of the <code><strong>as</strong></code> keyword can have a type constraint, but only one. This error also occurs in other circumstances, as illustrated by the second example.",
"info": "<dd>\n The value variable in front of the <code><strong>as</strong></code>\n keyword can have a type constraint, but only one. This error also\n occurs in other circumstances, as illustrated by the second example.\n <pre> val x : int list : int list as y::z = [1,2];\n <i>stdIn:29.5-29.36 Error: illegal (multiple?) type constraints in AS pattern</i>\n \n val (x: int list) as (y::z : int list) = [1];\n <i>stdIn:1.5-24.10 Error: illegal (multiple?) type constraints in AS pattern\n stdIn:1.5-24.10 Error: parentheses illegal around variable in AS pattern</i>\n \n val x : int list as (y::z) = [1,2];\n <i>stdIn:1.1-24.6 Warning: binding not exhaustive\n x as y :: z = ...\n val x = [1,2] : int list\n val y = 1 : int\n val z = [2] : int list</i>\n </pre>\n \n "
},
{
"code": 37,
"msg": "illegal function symbol in clause",
"verbose": "In a function declaration, the left hand side between the keyword <code><strong>fun</strong></code> and the equal sign must be a well-formed applicative term, and the operator (i.e. the function part of the top-level application) of this term must be a simple identifier. If the application has an infix operator, it must be parenthesized (unless followed immediately by a type constraint or the equal sign); otherwise it may not be parenthesized.",
"info": "<dd>\n In a function declaration, the left hand side between\n the keyword <code><strong>fun</strong></code> and the equal sign\n must be a well-formed applicative term, and the operator (i.e. the function\n part of the top-level application) of this term must be a simple\n identifier. If the application has an infix operator, it must\n be parenthesized (unless followed immediately by a type constraint\n or the equal sign); otherwise it may not be parenthesized.\n <pre> fun (f x) = 3; (* bad parentheses *)\n <i>stdIn:1.5-2.5 Error: can't find function arguments in clause\n stdIn:1.5-2.5 Error: illegal function symbol in clause</i>\n \n fun (x+y) = 3; (* ok; redefines infix + operator *)\n <i>val + = fn : 'a * 'b -> int</i>\n </pre>\n \n "
},
{
"code": 38,
"msg": "inconsistent arities in type sharing % = %: % has arity % and % has arity %",
"verbose": "Two types involved in a type sharing constraint have different arities.",
"info": "<dd>\n Two types involved in a type sharing constraint have different\n arities.\n <pre> signature XSIG = sig\n datatype ('a, 'b) t = A of 'a | B of 'b\n end\n \n functor F\n (type u\n structure X: XSIG\n sharing type X.t = u) =\n struct\n end\n \n <i>stdIn:49.11-54.6 Error: inconsistent arities in type sharing t = u : t\n has arity 2 and u has arity 0.</i>\n </pre>\n \n "
},
{
"code": 39,
"msg": "inconsistent equality properties in type sharing",
"verbose": "This error occurs when type constructors with incompatible equality properties are equated by sharing constraints. When this happens, the signature is not consistent, and could not be successfully matched.",
"info": "<dd>\n This error occurs when type constructors with incompatible equality\n properties are equated by sharing constraints. When this happens, the\n signature is not consistent, and could not be successfully matched.\n <pre> signature S =\n sig\n eqtype t\n datatype u = U of int -> int (* not an equality type *)\n sharing type t = u\n end;\n <i>stdIn:17.1-22.4 Error: inconsistent equality properties in type sharing</i>\n </pre>\n \n "
},
{
"code": 40,
"msg": "infix operator \"%\" used without \"op\" in fun dec",
"verbose": "A function symbol declared to be an infix symbol is used in a function declaration used to declare nonfix functions.",
"info": "<dd>\n A function symbol declared to be an infix symbol is used in a\n function declaration used to declare nonfix functions.\n <pre> infix foo;\n <i>infix foo</i>\n fun foo (x,y) = x + y;\n <i>stdIn:34.5-34.8 Error: infix operator \"foo\" used without \"op\" in fun dec</i>\n </pre>\n The correct definition is:\n <pre> fun op foo(x,y) = x +y;\n <i>val foo = fn : int * int -> int</i>\n </pre>\n \n "
},
{
"code": 41,
"msg": "infix operator required, or delete parentheses",
"verbose": "The first term following keyword <code><strong>fun</strong></code> in a function declaration is a parenthesized application, implying an infix application, but the middle subterm is not an infix symbol.",
"info": "<dd>\n The first term following keyword <code><strong>fun</strong></code> in\n a function declaration is a parenthesized application, implying an\n infix application, but the middle subterm is not an infix symbol.\n <pre> fun (x f y) = (); (* would work if \"f\" were infix *)\n <i>stdIn:18.8 Error: infix operator required, or delete parentheses</i>\n \n fun x f y = (); (* ok, but maybe not what was intended *)\n <i>val x = fn : 'a -> 'b -> unit</i>\n </pre>\n \n "
},
{
"code": 42,
"msg": "infix symbol \"%\" used where a nonfix identifier was expected",
"verbose": "In a <code><strong>val rec</strong></code> declaration, the if the identifier being declared (on the left hand side of the declaration) is an infix symbol, it must be preceeded by the <code><strong>op</strong></code> keyword.",
"info": "<dd>\n In a <code><strong>val rec</strong></code> declaration, the if the\n identifier being declared (on the left hand side of the declaration) is\n an infix symbol, it must be preceeded by the <code><strong>op</strong></code>\n keyword.\n <pre> infix ++;\n <i>infix ++</i>\n val rec ++ = (fn x => x);\n <i>stdIn:17.9-17.11 Error: infix symbol \"++\" used where a nonfix identifier was expected</i>\n \n val rec op ++ = (fn x => x);\n <i>val ++ = fn : 'a -> 'a</i>\n </pre>\n \n "
},
{
"code": 43,
"msg": "install_pp: empty path",
"verbose": "The function <code>Compiler.PPTable.install_pp</code> installs a user-defined pretty printer function (the second argument) for a generative (i.e. datatype or abstype) designated by the first argument, which must be a nonempty list of strings that can be interpreted as a symbolic path (longTyCon) naming a datatype or abstract type in the current environment. This function should only be called at top level.",
"info": "<dd>\n The function <code>Compiler.PPTable.install_pp</code> installs a\n user-defined pretty printer function (the second argument) for a\n generative (i.e. datatype or abstype) designated by the first\n argument, which must be a nonempty list of strings that can be\n interpreted as a symbolic path (longTyCon) naming a\n datatype or abstract type in the current environment.\n This function should only be called at top level.\n <pre> Compiler.PPTable.install_pp [] (fn x => fn y => ());\n <i>Error: install_pp: empty path</i>\n </pre>\n \n "
},
{
"code": 44,
"msg": "install_pp: nongenerative type constructor",
"verbose": "The function <code>Compiler.PPTable.install_pp</code> installs a user-defined pretty printer function (the second argument) for a generative (i.e. datatype or abstype) designated by the first argument, which must be a nonempty list of strings that can be interpreted as a symbolic path (longTyCon) naming a datatype or abstype in the current environment. This function should only be called at top level.",
"info": "<dd>\n The function <code>Compiler.PPTable.install_pp</code> installs a\n user-defined pretty printer function (the second argument) for a\n generative (i.e. datatype or abstype) designated by the first\n argument, which must be a nonempty list of strings that can be\n interpreted as a symbolic path (longTyCon) naming a datatype\n or abstype in the current environment. This function should only\n be called at top level.\n <pre> Compiler.PPTable.install_pp [\"t\"] (fn x => fn y => ());\n <i>Error: install_pp: nongenerative type constructor</i>\n </pre>\n \n "
},
{
"code": 45,
"msg": "int constant too large",
"verbose": "Integer literal in program is too large. Default integers are represented using 31 bits, and range from ~1073741824 to 1073741823, or from:",
"info": "<dd>\n Integer literal in program is too large. Default integers are\n represented using 31 bits, and range from ~1073741824 to 1073741823,\n or from:\n <pre>\t Option.valOf(Int.minInt) to Option.valOf(Int.maxInt)\n </pre>\n \n <pre> val x = 1073741823;\n <i>val x = 1073741823 : int</i>\n \n val x = 1073741824;\n <i>stdIn:2.4-22.7 Error: int constant too large</i>\n \n val x = ~1073741824;\n <i>val x = ~1073741824 : int</i>\n \n val x = ~1073741825;\n <i>stdIn:30.10-30.21 Error: int constant too large</i>\n </pre>\n \n "
},
{
"code": 46,
"msg": "match nonexhaustive",
"verbose": "Insufficient patterns in clause to match against all the possible inputs. This is an error if the flag <code>Compiler.Control.MC.matchNonExhaustiveError</code> is set to true (the default is false), otherwise it is a warning if <code>Compiler.Control.MC.matchNonExhaustiveWarn</code> is set to true. If neither of these flags is true, then the compiler does not complain about nonexhaustive matches.",
"info": "<dd>\n Insufficient patterns in clause to match against all the\n possible inputs. This is an error if the flag\n <code>Compiler.Control.MC.matchNonExhaustiveError</code>\n is set to true (the default is false), otherwise it is a warning if\n <code>Compiler.Control.MC.matchNonExhaustiveWarn</code>\n is set to true. If neither of these flags is true, then\n the compiler does not complain about nonexhaustive matches.\n <pre> fun f 0 = 1\n | f 1 = 1;\n <i>stdIn:1.1-22.12 Error: match nonexhaustive\n 0 => ...\n 1 => ...\n \n val f = fn : int -> int</i>\n </pre>\n \n "
},
{
"code": 47,
"msg": "match redundant",
"verbose": "A pattern is provided that is covered by some earlier pattern. If the compiler flag <code>Compiler.Control.MC.matchRedundantError</code> is set to false (default is true), only a warning message is given. If <code>Compiler.Control.MC.matchRedundantWarn</code> is also false (default is true), no message is generated.",
"info": "<dd>\n A pattern is provided that is covered by some earlier pattern.\n If the compiler flag\n <code>Compiler.Control.MC.matchRedundantError</code>\n is set to false (default is true), only a warning message\n is given. If\n <code>Compiler.Control.MC.matchRedundantWarn</code>\n is also false (default is true), no message is generated.\n <pre> fun f (0, true) = 1\n | f (0, false) = 2\n | f (0, _) = 3\n | f _ = 4;\n <i>stdIn:24.1-27.14 Error: match redundant\n (0,true) => ...\n (0,false) => ...\n --> (0,_) => ...\n _ => ...</i>\n </pre>\n \n "
},
{
"code": 48,
"msg": "match redundant and nonexhaustive",
"verbose": "A pattern is provided that is covered by some earlier pattern, and the set of patterns do not cover all the possible inputs. Whether this message is generated, and its severity (Error or Warning), are controlled by the compiler flags",
"info": "<dd>\n A pattern is provided that is covered by some earlier pattern,\n and the set of patterns do not cover all the possible inputs.\n Whether this message is generated, and its severity (Error or\n Warning), are controlled by the compiler flags\n <pre> <code>Compiler.Control.MC.matchNonExhaustiveError</code>\n <code>Compiler.Control.MC.matchNonExhaustiveWarn</code>\n <code>Compiler.Control.MC.matchRedundantError</code>\n <code>Compiler.Control.MC.matchRedundantWarn</code>\n </pre>\n Example:\n <pre> fun f 1 = 1\n | f 2 = 3\n | f 1 = 4 ;\n <i>stdIn:1.1-24.12 Error: match redundant and nonexhaustive\n 1 => ...\n 2 => ...\n --> 1 => ...</i>\n </pre>\n \n "
},
{
"code": 49,
"msg": "multiple where defs for %",
"verbose": "The <tt>where</tt> clauses of a signature expression must not bind the same type-specification multiple times.",
"info": "<dd>\n The <tt>where</tt> clauses of a signature expression must not\n bind the same type-specification multiple times.\n <pre> signature S = sig\n type t\n end\n where type t = int\n and type t = bool;\n <i>stdIn:1.1-72.20 Error: multiple where defs for t</i>\n </pre>\n or even:\n <pre> signature S = sig\n type t\n end\n where type t = int\n and type t = int;\n <i>stdIn:1.1-76.19 Error: multiple where defs for t</i>\n </pre>\n \n "
},
{
"code": 50,
"msg": "non-constructor applied to argument in pattern",
"verbose": "The value applied to an argument in a pattern is not a constructor.",
"info": "<dd>\n The value applied to an argument in a pattern is not a constructor.\n <pre> fun f (0 0) = true;\n <i>stdIn:17.5-17.19 Error: non-constructor applied to argument in pattern</i>\n </pre>\n \n "
},
{
"code": 51,
"msg": "non-constructor applied to argument in pattern: %",
"verbose": "Same error as [58]. This particular error occurs when the applied value has a name that can be reported.",
"info": "<dd>\n Same error as [58]. This particular error occurs when the applied value has a\n name that can be reported.\n <pre> val a = 0;\n <i>val a = 0 : int</i>\n \n fun f (a 0) = true;\n <i>stdIn:18.5-18.19 Error: non-constructor applied to argument in pattern: a</i>\n </pre>\n \n "
},
{
"code": 52,
"msg": "nonlocal path in structure sharing: %",
"verbose": "A structure participating in a structure <strong>sharing</strong> specification was not declared in the current signature.",
"info": "<dd>\n A structure participating in a structure <strong>sharing</strong>\n specification was not declared in the current signature.\n <pre> signature S = sig\n structure A : sig end\n sharing A = B.C\n end;\n <i>stdIn:41.11-41.18 Error: nonlocal path in structure sharing: B.C</i>\n </pre>\n \n "
},
{
"code": 53,
"msg": "nonlocal path in type sharing: %",
"verbose": "A type participating in a type <strong>sharing</strong> specification was not declared in the current signature.",
"info": "<dd>\n A type participating in a type <strong>sharing</strong> specification\n was not declared in the current signature.\n <pre> signature S = sig\n type t\n sharing type t = B.t\n end;\n <i>stdIn:44.16-44.23 Error: nonlocal path in type sharing: B.t</i>\n </pre>\n \n "
},
{
"code": 54,
"msg": "operator and operand don't agree",
"verbose": "A function (operator) is applied to a value (operand) with a type different than the type expected by the function.",
"info": "<dd>\n A function (operator) is applied to a value (operand) with a type different than\n the type expected by the function.\n <pre> fun f true = 0\n | f false = 1;\n <i>val f = fn : bool -> int</i>\n \n f 3;\n <i>stdIn:25.1-25.4 Error: operator and operand don't agree [literal]\n operator domain: bool\n operand: int\n in expression:\n f 3</i>\n </pre>\n \n "
},
{
"code": 55,
"msg": "operator is not a function",
"verbose": "The value used in operator position is not a function.",
"info": "<dd>\n The value used in operator position is not a function.\n <pre> 3 true;\n <i>stdIn:1.1-19.6 Error: operator is not a function [literal]\n operator: int\n in expression:\n 3 true</i>\n </pre>\n \n "
},
{
"code": 56,
"msg": "or-patterns don't agree",
"verbose": "In a pattern that uses <it>or-ed subpatterns (via <code>|</code>), the type of all the subpatterns must agree.",
"info": "<dd>\n In a pattern that uses <it>or-ed subpatterns (via <code>|</code>), the type\n of all the subpatterns must agree.\n <pre> fun f (0 | 1 | true) = 0;\n <i>stdIn:1.1-21.4 Error: or-patterns don't agree [literal]\n expected: int\n found: bool\n in pattern:\n (1 | true)</i>\n </pre>\n \n </it>"
},
{
"code": 57,
"msg": "out-of-range word literal in pattern: 0w%",
"verbose": "A word literal used in a pattern is larger than the largest representable word.",
"info": "<dd>\n A word literal used in a pattern is larger than the largest representable word.\n <pre> fun f 0w100000000000 = 0\n | f _ = 1;\n <i>stdIn:1.1-27.12 Error: out-of-range word literal in pattern: 0w100000000000</i>\n </pre>\n \n "
},
{
"code": 58,
"msg": "overloaded variable not defined at type",
"verbose": "An overloaded variable is being instantiated at a type for which it has no definition. Typical overloaded variables include numerical operations, overloaded over the numerical types (int, word, etc.)",
"info": "<dd>\n An overloaded variable is being instantiated at a type for which it has no\n definition. Typical overloaded variables include numerical operations,\n overloaded over the numerical types (int, word, etc.)\n <pre> true + true;\n <i>stdIn:19.5 Error: overloaded variable not defined at type\n symbol: +\n type: bool</i>\n </pre>\n \n "
},
{
"code": 59,
"msg": "parameter or result constraints of clauses don't agree",
"verbose": "In a <code><strong>fun</strong></code> declaration, each clause, or rule, separated by <code><strong>|</strong></code> (vertical bar symbol), has to have the same type (both in the type accepted by the clauses, and the type returned by the clauses).",
"info": "<dd>\n In a <code><strong>fun</strong></code> declaration, each clause, or rule, separated by\n <code><strong>|</strong></code> (vertical bar symbol), has to have the same type (both\n in the type accepted by the clauses, and the type returned by the clauses).\n <pre> datatype typeA = A;\n <i>datatype typeA = A</i>\n datatype typeB = B;\n <i>datatype typeB = B</i>\n fun f A = 0\n | f B = 0;\n <i>stdIn:36.1-37.12 Error: parameter or result constraints of clauses don't agree [tycon mismatch]\n this clause: typeB -> 'Z\n previous clauses: typeA -> 'Z\n in declaration:\n f =\n (fn A => 0\n | B => 0)</i>\n </pre>\n \n "
},
{
"code": 60,
"msg": "parentheses illegal around variable in AS pattern",
"verbose": "In an \"as\"-pattern <code><i>pat</i> as <i>pat</i></code>, where the pattern to the left of the \"as\" is a simple variable, the variable must not be wrapped in parentheses.",
"info": "<dd>\n In an \"as\"-pattern <code><i>pat</i> as <i>pat</i></code>, where the pattern to the left\n of the \"as\" is a simple variable, the variable must not be wrapped in parentheses.\n <pre> val ((a) as (b,c)) = (4,5);\n <i>stdIn:19.5-31.2 Error: parentheses illegal around variable in AS pattern</i>\n </pre>\n \n "
},
{
"code": 61,
"msg": "pattern and constraint don't agree",
"verbose": "In a pattern, the type of the pattern and the constaint type of the pattern must agree.",
"info": "<dd>\n In a pattern, the type of the pattern and the constaint type of the pattern must agree.\n <pre> fun f (0:bool)=0;\n <i>stdIn:38.1-38.17 Error: pattern and constraint don't agree [literal]\n pattern: int\n constraint: bool\n in pattern:\n 0 : bool</i>\n </pre>\n \n "
},
{
"code": 62,
"msg": "pattern and expression in val dec don't agree",
"verbose": "In a declaration <code>val <i>pat</i> = <i>exp</i></code>, the type of <i>pat</i> must match the type of <i>exp</i>.",
"info": "<dd>\n In a declaration <code>val <i>pat</i> = <i>exp</i></code>, the type of\n <i>pat</i> must match the type of <i>exp</i>.\n <pre> val s:string = 6;\n <i>stdIn:1.1-18.6 Error: pattern and expression in val dec don't agree [literal]\n pattern: string\n expression: int\n in declaration:\n s : string = 6</i>\n </pre>\n \n "
},
{
"code": 63,
"msg": "pattern and expression in val dec don't agree",
"verbose": "In a declaration <code>val <i>pat</i> = <i>exp</i></code>, the type of <code><i>pat</i></code> must match the type of <code><i>exp</i></code>.",
"info": "<dd>\n In a declaration <code>val <i>pat</i> = <i>exp</i></code>, the type of\n <code><i>pat</i></code> must match the type of <code><i>exp</i></code>.\n <pre> val s:string = 6;\n <i>stdIn:1.1-18.6 Error: pattern and expression in val dec don't agree [literal]\n pattern: string\n expression: int\n in declaration:\n s : string = 6</i>\n </pre>\n \n "
},
{
"code": 64,
"msg": "pattern to left of \"as\" must be variable",
"verbose": "In an \"as\"-pattern <code><i>pat</i> as <i>pat</i></code>, the first pattern must be a simple variable, not a more complex pattern using tuples or data constructors.",
"info": "<dd>\n In an \"as\"-pattern <code><i>pat</i> as <i>pat</i></code>, the first pattern\n must be a simple variable, not a more complex pattern using tuples\n or data constructors.\n <pre> val (a,_) as (_,b) = (7,5);\n <i>stdIn:1.5-18.8 Error: pattern to left of AS must be variable</i>\n </pre>\n \n "
},
{
"code": 65,
"msg": "pattern to left of AS must be variable",
"verbose": "In an \"as\"-pattern <i>pat</i> <tt>as</tt> <i>pat</i>, the first pattern must be a simple variable, not a more complex pattern using tuples or data constructors.",
"info": "<dd>\n In an \"as\"-pattern <i>pat</i> <tt>as</tt> <i>pat</i>, the first pattern\n must be a simple variable, not a more complex pattern using tuples\n or data constructors.\n <pre> val (a,_) as (_,b) = (7,5);\n <i>stdIn:1.5-18.8 Error: pattern to left of AS must be variable</i>\n </pre>\n \n "
},
{
"code": 66,
"msg": "possibly inconsistent structure definitions at: %",
"verbose": "When a signature contains a sharing constraint between two structure-specifications, each of which is specified using a <tt>where</tt> clause, the compiler is unable to calculate whether the structures are compatible. This is a bug in the compiler and will be fixed in a future version.",
"info": "<dd>\n When a signature contains a sharing constraint between\n two structure-specifications, each of which is specified using a <tt>where</tt>\n clause, the compiler is unable to calculate whether the structures\n are compatible. This is a bug in the compiler and will be fixed\n in a future version.\n <pre> signature SIG =\n sig\n structure A : sig end\n structure B : sig structure Z : sig end\n end where Z = A\n structure C : sig structure Z : sig end\n end where Z = A\n sharing B = C\n end;\n \n <i>stdIn:1.1-38.4 Error: possibly inconsistent structure definitions at: B.Z</i>\n </pre>\n \n "
},
{
"code": 67,
"msg": "real constant out of range: %",
"verbose": "A real literal must have an exponent in the proper range for the floating-point representation of the target machine. At present all SML/NJ target machines use IEEE double-precision floating point, so real literals must be in the range ~1.79769313486e308 to 1.79769313486e308.",
"info": "<dd>\n A real literal must have an exponent in the proper range for the\n floating-point representation of the target machine. At present\n all SML/NJ target machines use IEEE double-precision floating point,\n so real literals must be in the range\n ~1.79769313486e308 to 1.79769313486e308.\n <pre> 2e309;\n \n <i>uncaught exception BadReal\n raised at: bignums/realconst.sml:228.54-228.63</i>\n </pre>\n At present, a bug in the compiler raises an exception instead of\n printing the appropriate error message.\n <p>\n \n "
},
{
"code": 68,
"msg": "rebinding data constructor \"%\" as variable",
"verbose": "An identifier bound as a data constructor cannot be rebound as a variable in a pattern.",
"info": "<dd>\n An identifier bound as a data constructor cannot be rebound as a variable\n in a pattern.\n <pre> fun nil x = x;\n <i>stdIn:1.5-2.9 Error: rebinding data constructor \"nil\" as variable</i>\n </pre>\n \n "
},
{
"code": 69,
"msg": "redundant patterns in match",
"verbose": "In a multi-clause pattern match, if one of the later patterns can only match cases that are covered by earlier patterns, then the later pattern is redundant and can never be matched. In SML '97 it is an error to have useless (redundant) patterns.",
"info": "<dd>\n In a multi-clause pattern match, if one of the later patterns can only\n match cases that are covered by earlier patterns, then the\n later pattern is redundant and can never be matched. In SML '97 it\n is an error to have useless (redundant) patterns.\n <pre> 4 handle Match => 5 | e => 6 | Bind => 7;\n <i>stdIn:1.1-20.15 Error: redundant patterns in match\n Match => ...\n e => ...\n --> Bind => ...</i>\n </pre>\n \n "
},
{
"code": 70,
"msg": "redundant where definitions",
"verbose": "The <tt>where</tt> clauses of a signature expression must not bind the same structure-specification to different structures.",
"info": "<dd>\n The <tt>where</tt> clauses of a signature expression must not\n bind the same structure-specification to different structures.\n <pre> signature S1 =\n sig\n structure A : sig type t end\n end\n where A=Int and A=Real;\n <i>stdIn:32.1-36.23 Error: redundant where definitions</i>\n </pre>\n \n "
},
{
"code": 71,
"msg": "rhs of datatype replication not a datatype",
"verbose": "The declaration",
"info": "<dd>\n The declaration\n <pre> datatype <i>id1</i> = datatype <i>id2</i>\n </pre>\n that binds the name <i>id1</i> to the existing datatype <i>id2</i>,\n requires that <i>id2</i> must be a datatype, and not an ordinary type.\n <pre> datatype myint = datatype int;\n <i>stdIn:38.1-38.30 Error: rhs of datatype replication not a datatype</i>\n </pre>\n \n "
},
{
"code": 72,
"msg": "rhs of datatype replication spec not a datatype",
"verbose": "The specification",
"info": "<dd>\n The specification\n <pre> datatype <i>id1</i> = datatype <i>id2</i>\n </pre>\n that binds the name <code><i>id1</i></code> to the existing datatype\n <code><i>id2</i></code>, requires that <code><i>id2</i></code> must be\n a datatype, and not an ordinary type.\n <pre> signature S = sig type t\n datatype d = datatype t\n end;\n <i>stdIn:37.18-40.17 Error: rhs of datatype replication spec not a datatype</i>\n </pre>\n \n "
},
{
"code": 73,
"msg": "right-hand-side of clause doesn't agree with function result type",
"verbose": "The body of (each clause of) a function must have the type specified in the function-result type constraint (if it is present).",
"info": "<dd>\n The body of (each clause of) a function must have the type specified\n in the function-result type constraint (if it is present).\n <pre> fun f(x) : int = \"hello\";\n <i>stdIn:1.1-37.24 Error: right-hand-side of clause doesn't agree with function result type [tycon mismatch]\n expression: string\n result type: int\n in declaration:\n f = (fn x => \"hello\": int)</i>\n </pre>\n \n "
},
{
"code": 74,
"msg": "sharing structure with a descendent substructure",
"verbose": "A structure cannot share with one of its components.",
"info": "<dd>\n A structure cannot share with one of its components.\n <pre> signature S = sig structure A : sig structure B : sig end end\n sharing A = A.B\n end;\n <i>stdIn:1.1-44.20 Error: Sharing structure with a descendent substructure</i>\n </pre>\n \n "
},
{
"code": 75,
"msg": "structure % defined by partially applied functor",
"verbose": "Functors in SML/NJ may be higher-order, so that the functor F in the example below returns (as its result) another functor, which in turn returns a structure. The result of applying F to an argument cannot, therefore, be bound to a structure name.",
"info": "<dd>\n Functors in SML/NJ may be higher-order, so that the functor F in the\n example below returns (as its result) another functor, which in turn\n returns a structure. The result of applying F to an argument cannot,\n therefore, be bound to a structure name.\n <pre> functor F()() = struct end;\n <i>functor F : <sig></sig></i>\n \n structure S = F();\n <i>stdIn:45.15-45.18 Error: structure S defined by partially applied functor</i>\n </pre>\n \n "
},
{
"code": 76,
"msg": "syntax error found at %",
"verbose": "This message is produced if the parser finds a syntax error and is unable to correct the problem using its built-in heuristics (<a href=#syndel>deletion</a>, <a href=#synins>insertion</a>, or <a href=#synrepl>replacement</a> of tokens). Example:",
"info": "<dd>\n This message is produced if the parser finds a syntax error and is\n unable to correct the problem using its built-in heuristics (<a href=#syndel>deletion</a>, <a href=#synins>insertion</a>, or <a href=#synrepl>replacement</a> of tokens). Example:\n <pre> x andalso val y orelse z;\n <i>stdIn:1.6 Error: syntax error found at VAL</i>\n </pre>\n <i>Note:</i> Error correction in the parser relies on lookahead.\n Different amounts of lookahead are used depending on whether input is\n taken from the interactive toplevel or from a source file. Therefore,\n error messages for the same program can vary depending on circumstances.\n (See also the <a href=#noteins>note on error [78]</a>.)\n <p>\n \n "
},
{
"code": 77,
"msg": "syntax error: deleting %",
"verbose": "This message indicates that the error-correcting parser attempted to rectify a syntax error by deleting (ignoring) some input token(s). <p> For example, let's assume that file <i>delete.sml</i> contains the following code:",
"info": "<dd>\n This message indicates that the error-correcting parser\n attempted to rectify a syntax error by deleting (ignoring) some input\n token(s).\n <p>\n For example, let's assume that file <i>delete.sml</i> contains the\n following code:\n <pre> structure 99 X =\n struct\n val x = 1\n end\n </pre>\n Compiling this file produces:\n <pre> - use \"delete.sml\";\n <i>[opening delete.sml]</i>\n <i>delete.sml:1.11-1.13 Error: syntax error: deleting INT</i>\n </pre>\n <i>Note:</i> Error correction in the parser relies on lookahead.\n Different amounts of lookahead are used depending on whether input is\n taken from the interactive toplevel or from a source file. Therefore,\n error messages for the same program can vary depending on circumstances.\n (See also the <a href=#noteins>note on error [78]</a>.)\n <p>\n \n "
},
{
"code": 78,
"msg": "syntax error: inserting %",
"verbose": "This error message, like the previous one, is generated by SML/NJ's error-correcting parser. It indicates that the parser was able to correct a syntactic error by inserting an additional token. <p> For example, let's assume that file <i>insert.sml</i> contains the following code:",
"info": "<dd>\n This error message, like the previous one, is generated by SML/NJ's\n error-correcting parser. It indicates that the parser was able to\n correct a syntactic error by inserting an additional token.\n <p>\n For example, let's assume that file <i>insert.sml</i> contains the\n following code:\n <pre> let\n val x = 1; y = x + x\n in\n x * y\n end\n </pre>\n Compiling this file produces:\n <pre> - use \"insert.sml\";\n <i>[opening insert.sml]\n insert.sml:2.16 Error: syntax error: inserting VAL</i>\n </pre>\n <a name=noteins><i>Note:</i></a> Error correction in the parser relies\n on lookahead.\n Since the interactive parser cannot use lookahead, it is likely that\n its syntax error messages differ from those that are generated when\n compiling files. For example, typing the contents of\n <i>insert.sml</i> directly into the interactive toplevel produces:\n <pre> let\n val x = 1; y = x + x\n in\n x * y\n end;\n <i>stdIn:2.14-2.19 Error: syntax error: deleting ID EQUALOP ID\n stdIn:2.20-3.3 Error: syntax error: deleting ID ID IN\n stdIn:4.3-4.8 Error: syntax error: deleting ID ASTERISK ID</i>\n </pre>\n \n "
},
{
"code": 79,
"msg": "syntax error: replacing % with %",
"verbose": "The parser found a syntax error and has attempted to fix the problem by replacing some token(s) by some other token(s). <p> For example, let's assume that file <i>replace.sml</i> contains the following code:",
"info": "<dd>\n The parser found a syntax error and has attempted to fix the problem\n by replacing some token(s) by some other token(s).\n <p>\n For example, let's assume that file <i>replace.sml</i> contains the\n following code:\n <pre> fn x = x\n </pre>\n Compiling this file produces:\n <pre> - use \"replace.sml\";\n <i>[opening replace.sml]\n replace.sml:1.6 Error: syntax error: replacing EQUALOP with DARROW</i>\n </pre>\n <i>Note:</i> Error correction in the parser relies on lookahead.\n Different amounts of lookahead are used depending on whether input is\n taken from the interactive toplevel or from a source file. Therefore,\n error messages for the same program can vary depending on circumstances.\n (See also the <a href=#noteins>note on error [78]</a>.)\n <p>\n \n "
},
{
"code": 80,
"msg": "tycon arity for % does not match specified arity",
"verbose": "The arity of a type constructor differs between the definition inside a structure and its declaration in the corresponding signature constraint. <p> Example:",
"info": "<dd>\n The arity of a type constructor differs between the definition inside\n a structure and its declaration in the corresponding signature\n constraint.\n <p>\n Example:\n <pre> signature S = sig type ('a, 'b) t end;\n <i>signature S = sig type ('a,'b) t end</i>\n \n structure S : S = struct\n type 'a t = 'a list\n end;\n <i>stdIn:75.1-77.4 Error: tycon arity for t does not match specified arity</i>\n </pre>\n \n "
},
{
"code": 81,
"msg": "type % must be a datatype",
"verbose": "This message indicates that the signature constraint for a given structure requires some type to be a <strong>datatype</strong> but the structure defines it as different type (i.e., not a datatype). <p> Example:",
"info": "<dd>\n This message indicates that the signature constraint for a given\n structure requires some type to be a <strong>datatype</strong> but the\n structure defines it as different type (i.e., not a datatype).\n <p>\n Example:\n <pre>signature S = sig datatype t = A | B end;\n <i>signature S = sig datatype t = A | B end</i>\n \n structure S : S = struct\n type t = int\n end;\n <i>stdIn:80.1-82.4 Error: type t must be a datatype\n stdIn:80.1-82.4 Error: unmatched constructor specification: A\n stdIn:80.1-82.4 Error: unmatched constructor specification: B</i>\n </pre>\n \n "
},
{
"code": 82,
"msg": "type % must be an equality type",
"verbose": "This error message is issued when the definition for some type inside a structure does not permit equality while the corresponding signature constraint for the structure specifies that type as an <strong>eqtype</strong>. <p> Example:",
"info": "<dd>\n This error message is issued when the definition for some type inside\n a structure does not permit equality while the corresponding signature\n constraint for the structure specifies that type as an\n <strong>eqtype</strong>.\n <p>\n Example:\n <pre> signature S = sig eqtype t end;\n <i>signature S = sig eqtype t end</i>\n \n structure S : S = struct\n type t = int -> int\n end;\n <i>stdIn:86.1-88.4 Error: type t must be an equality type</i>\n </pre>\n \n "
},
{
"code": 83,
"msg": "type constraint of val rec dec is not a function type",
"verbose": "Names that are defined using <strong>val rec</strong> must refer to function values. Therefore, their types must be function types. <p> Example:",
"info": "<dd>\n Names that are defined using <strong>val rec</strong> must refer to\n function values. Therefore, their types must be function types.\n <p>\n Example:\n <pre> val rec f : int = fn x => x;\n <i>stdIn:1.1-79.26 Error: type constraint of val rec dec is not a function type [tycon mismatch]\n constraint: int\n in declaration:\n f = (fn x => x)</i>\n </pre>\n \n "
},
{
"code": 84,
"msg": "type constraints on val rec declaraction [sic] disagree",
"verbose": "This error message occurs when a declaration has the form",
"info": "<dd>\n This error message occurs when a declaration has the form\n <pre> val rec <i>id</i> : <i>ty1</i> = <i>exp : ty2</i>\n </pre>\n and the types <code><i>ty1</i></code> and <code><i>ty2</i></code> disagree.\n <pre> val rec f : int -> int = (fn x => x) : bool -> bool;\n <i>stdIn:1.1-29.30 Error: type constraints on val rec declaraction disagree [tycon mismatch]\n this constraint: bool -> bool\n outer constraints: int -> int\n in declaration:\n f = (fn x => x): bool -> bool</i>\n </pre>\n \n <p>\n \n "
},
{
"code": 85,
"msg": "type constructor % given % arguments, wants %",
"verbose": "A type constructor was used with the wrong number of type arguments. <p> Example:",
"info": "<dd>\n A type constructor was used with the wrong number of type arguments.\n <p>\n Example:\n <pre> type ('a, 'b) t = 'a * 'b;\n <i>type ('a,'b) t = 'a * 'b</i>\n \n type u = (int, bool, real) t;\n <i>stdIn:103.28 Error: type constructor t given 3 arguments, wants 2</i>\n </pre>\n \n "
},
{
"code": 86,
"msg": "type variable % occurs with different equality\n properties in the same scope",
"verbose": "This message indicates that different occurences of the same type variable have inconsistent equality properties. In practice this means that the same name of a type variable was used both with one apostrophe and with two apostrophes. (Note that this would have been ok if the two occurences are clearly separated by scope.) <p> Example:",
"info": "<dd>\n This message indicates that different occurences of the same type\n variable have inconsistent equality properties. In practice this\n means that the same name of a type variable was used both with one\n apostrophe and with two apostrophes. (Note that this would have been\n ok if the two occurences are clearly separated by scope.)\n <p>\n Example:\n <pre> fun f (x: 'a, y: ''a) = (x, y);\n <i>stdIn:118.2-119.12 Error: type variable a occurs with different equality properties in the same scope</i>\n </pre>\n But:\n <pre> fun 'a f (x: 'a) = let\n fun ''a g (y: ''a) = y = y\n in x end;\n <i>val f = fn : 'a -> 'a</i>\n </pre>\n \n "
},
{
"code": 87,
"msg": "type variable in exception spec: %",
"verbose": "Exception declarations in signatures cannot contain type variables. <p> Example:",
"info": "<dd>\n Exception declarations in signatures cannot contain type variables.\n <p>\n Example:\n <pre> signature S = sig\n exception E of 'a list\n end;\n <i>stdIn:135.3-135.26 Error: type variable in exception spec: E</i>\n </pre>\n \n "
},
{
"code": 88,
"msg": "type variable in top level exception type",
"verbose": "Exception definitions at top level cannot contain type variables. <p> Example:",
"info": "<dd>\n Exception definitions at top level cannot contain type variables.\n <p>\n Example:\n <pre> exception E of 'a list;\n <i>stdIn:1.1-135.4 Error: type variable in top level exception type</i>\n </pre>\n \n "
},
{
"code": 89,
"msg": "types of rules don't agree",
"verbose": "The right-hand sides of the rules in a match must agree in type. Matches occur both in <code><strong>case</strong></code>- and in <code><strong>fn</strong></code>-expressions. <p> Examples:",
"info": "<dd>\n The right-hand sides of the rules in a match must agree in type.\n Matches occur both in <code><strong>case</strong></code>- and in\n <code><strong>fn</strong></code>-expressions.\n <p>\n Examples:\n <pre> fn true => false\n | false => 1;\n <i>stdIn:144.1-144.30 Error: types of rules don't agree [literal]\n earlier rule(s): bool -> bool\n this rule: bool -> int\n in rule:\n false => 1</i>\n \n fn x =>\n case x\n of true => false\n | false => 1;\n <i>stdIn:144.6-144.42 Error: types of rules don't agree [literal]\n earlier rule(s): bool -> bool\n this rule: bool -> int\n in rule:\n false => 1</i>\n </pre>\n \n "
},
{
"code": 90,
"msg": "unbound functor signature: %",
"verbose": "This error message is related to SML/NJ's higher-order module extension to Standard ML. The constraint on a functor declaration in some signature uses an undefined functor signature name. <p> Example:",
"info": "<dd>\n This error message is related to SML/NJ's higher-order module\n extension to Standard ML. The constraint on a functor declaration in\n some signature uses an undefined functor signature name.\n <p>\n Example:\n <pre> signature S = sig\n functor F: FS\n end;\n <i>stdIn:145.3-145.17 Error: unbound functor signature: FS</i>\n </pre>\n \n "
},
{
"code": 91,
"msg": "unbound functor: %",
"verbose": "The name of the functor being used is not defined. <p> Example:",
"info": "<dd>\n The name of the functor being used is not defined.\n <p>\n Example:\n <pre> structure S = F ();\n <i>stdIn:147.15-147.19 Error: unbound functor: F</i>\n </pre>\n \n "
},
{
"code": 92,
"msg": "unbound left hand side in where (structure): %",
"verbose": "A <strong>where</strong> specification refers to a structure inside a signature that was not declared there. <p> Example:",
"info": "<dd>\n A <strong>where</strong> specification refers to a structure inside a\n signature that was not declared there.\n <p>\n Example:\n <pre> structure A = struct end;\n <i>structure A : sig end</i>\n \n signature S = sig end;\n <i>signature S = sig end</i>\n \n signature S' = S where B = A;\n <i>stdIn:158.1-158.29 Error: unbound left hand side in where (structure): B</i>\n </pre>\n But:\n <pre> signature S = sig structure B : sig end end;\n <i>signature S = sig structure B : sig end end</i>\n \n signature S' = S where B = A;\n <i>signature S' = sig structure B : sig end end</i>\n </pre>\n \n "
},
{
"code": 93,
"msg": "unbound left hand side in where type: %",
"verbose": "A <code><strong>where type</strong></code> specification refers to a type inside a signature that was not declared there. <p> Example:",
"info": "<dd>\n A <code><strong>where type</strong></code> specification refers to a type inside a\n signature that was not declared there.\n <p>\n Example:\n <pre> type t = int;\n <i>type t = int</i>\n \n signature S = sig end;\n <i>signature S = sig end</i>\n \n signature S' = S where type u = t;\n <i>stdIn:169.1-169.34 Error: unbound left hand side in where type: u</i>\n </pre>\n But:\n <pre> signature S = sig type u end;\n <i>signature S = sig type u end</i>\n \n signature S' = S where type u = t;\n <i>signature S' = sig type u = t end</i>\n </pre>\n \n "
},
{
"code": 94,
"msg": "unbound signature: %",
"verbose": "A signature name is used but it has not been defined; for instance <code>S</code> in the following example:",
"info": "<dd>\n A signature name is used but it has not been defined; for instance\n <code>S</code> in the following example:\n <pre> structure A : S = struct end;\n <i>stdIn:16.15 Error: unbound signature: S</i>\n </pre>\n \n "
},
{
"code": 95,
"msg": "unbound structure: %",
"verbose": "A structure name is used but it has not been defined; for instance <code>B</code> in the following example:",
"info": "<dd>\n A structure name is used but it has not been defined; for instance\n <code>B</code> in the following example:\n <pre> - structure A = B;\n stdIn:2.10 Error: unbound structure: B\n </pre>\n \n "
},
{
"code": 96,
"msg": "unbound type constructor: %",
"verbose": "A type constructor name is used but it has not been defined, for instance <code>t</code> in the following example:",
"info": "<dd>\n A type constructor name is used but it has not been defined, for instance\n <code>t</code> in the following example:\n <pre> val x : t = ();\n <i>stdIn:2.4 Error: unbound type constructor: t</i>\n </pre>\n \n "
},
{
"code": 97,
"msg": "unbound type variable in type declaration: %",
"verbose": "A type variable occurs on the right hand side of a type or datatype declaration, without having been bound as a formal parameter on the left hand side.",
"info": "<dd>\n A type variable occurs on the right hand side of a type or datatype\n declaration, without having been bound as a formal parameter on the\n left hand side.\n <pre> type t = 'a list;\n <i>stdIn:2.5-2.12 Error: unbound type variable in type declaration: 'a</i>\n \n datatype 'a t = A of 'b;\n <i>stdIn:1.1-18.2 Error: unbound type variable in type declaration: 'b</i>\n </pre>\n \n "
},
{
"code": 98,
"msg": "unbound variable or constructor: %",
"verbose": "A value variable or constructor is used without having been defined or bound as a formal parameter.",
"info": "<dd>\n A value variable or constructor is used without having been defined\n or bound as a formal parameter.\n <pre> x;\n <i>stdIn:1.1 Error: unbound variable or constructor: x</i>\n \n fun f x = x+y;\n <i>stdIn:2.8 Error: unbound variable or constructor: y</i>\n </pre>\n \n "
},
{
"code": 99,
"msg": "unresolved flex record (can't tell what fields there are besides %)",
"verbose": "When a flexible record pattern (one containing <code><strong>...</strong></code>) is used, the context must provide enough type information to determine what all the fields are (though not necessarily their types).",
"info": "<dd>\n When a flexible record pattern (one containing <code><strong>...</strong></code>)\n is used, the context must provide enough type information to determine\n what all the fields are (though not necessarily their types).\n <pre> fun f {x,...} = x;\n <i>stdIn:37.1-37.18 Error: unresolved flex record\n (can't tell what fields there are besides #x)</i>\n \n fun f ({x,...} : {x: int, y:bool}) = x;\n <i>val f = fn : {x:int, y:bool} -> int</i>\n </pre>\n If more than one field occurs in the flexible record pattern, then\n a different variant of this error message is generated. See error [100].\n <p>\n \n "
},
{
"code": 100,
"msg": "unresolved flex record (need to know the names of ALL\n the fields in this context)",
"verbose": "The pattern in a pattern match was a <i>flexible record</i>. The pattern omitted some of the record's members and summarized their existence using ellipses (\"<strong>...</strong>\"). But in the given context there was not enough information for the type checker to be able to infer the missing field names.",
"info": "<dd>\n The pattern in a pattern match was a <i>flexible record</i>. The\n pattern omitted some of the record's members and summarized their\n existence using ellipses (\"<strong>...</strong>\"). But in the given\n context there was not enough information for the type checker to be\n able to infer the missing field names.\n <pre> fun f {x,y,...} = (x,y);\n <i>stdIn:118.1-118.24 Error: unresolved flex record (need to know the names of ALL the fields\n in this context)\n type: {x:'Y, y:'X; 'Z}</i>\n </pre>\n \n "
},
{
"code": 101,
"msg": "value type in structure doesn't match signature spec",
"verbose": "A value component of a structure has a different type than that specified in a signature that the structure is matched against.",
"info": "<dd>\n A value component of a structure has a different type than that\n specified in a signature that the structure is matched against.\n <pre> signature S =\n sig\n val x : int\n end;\n <i>signature S = sig val x : int end</i>\n \n structure A : S =\n struct\n val x = true\n end;\n <i>stdIn:21.1-24.4 Error: value type in structure doesn't match signature spec\n name: x\n spec: int\n actual: bool</i>\n </pre>\n \n "
},
{
"code": 102,
"msg": "variable % does not occur in all branches of or-pattern",
"verbose": "SML/NJ supports or-patterns, where a single rule can have several patterns separated with the <code><strong>|</strong></code> symbol. The component patterns of an or-pattern are required to have exactly the same variables with the same types.",
"info": "<dd>\n SML/NJ supports or-patterns, where a single rule can have several patterns\n separated with the <code><strong>|</strong></code> symbol. The\n component patterns of an or-pattern are required to have exactly the\n same variables with the same types.\n <pre> fun f(nil | x::_) = 1;\n <i>stdIn:1.5-2.18 Error: variable x does not occur in all branches of or-pattern</i>\n </pre>\n Here the component patterns are <code>nil</code> and\n <code>x::_</code>, and the variable <code>x</code> doesn't\n occur in the first pattern.\n <p>\n \n "
},
{
"code": 103,
"msg": "variable found where constructor is required: %",
"verbose": "A symbolic path (longid) of length greater than 1 occurring in a pattern must designate a data constructor.",
"info": "<dd>\n A symbolic path (longid) of length greater than 1 occurring in a\n pattern must designate a data constructor.\n <pre> fun f(Int.+) = 3;\n <i>stdIn:1.5-2.12 Error: variable found where constructor is required: Int.+</i>\n </pre>\n \n "
},
{
"code": 104,
"msg": "vector expression type failure",
"verbose": "In a vector expression of the form <code>#[<i>exp<sub>1</sub></i>,exp<sub>2</sub>,...]</code>, all the vector element expressions must be of the same type.",
"info": "<dd>\n In a vector expression of the form\n <code>#[<i>exp<sub>1</sub></i>,exp<sub>2</sub>,...]</code>,\n all the vector element expressions must be of the same type.\n <pre> #[1,true];\n <i>stdIn:1.1-2.5 Error: vector expression type failure [literal]</i>\n \n fun f(x:int) = #[x,true];\n <i>stdIn:2.11-2.20 Error: vector expression type failure [tycon mismatch]</i>\n </pre>\n \n "
},
{
"code": 105,
"msg": "vector pattern type failure",
"verbose": "In a vector pattern of the form <code>#[<i>pat<sub>1</sub></i>,pat<sub>2</sub>,...]</code>, all the vector element patterns must be of the same type.",
"info": "<dd>\n In a vector pattern of the form\n <code>#[<i>pat<sub>1</sub></i>,pat<sub>2</sub>,...]</code>,\n all the vector element patterns must be of the same type.\n <pre> fun f(#[x:int,y:bool]) = (x + 1; not y);\n <i>stdIn:1.1-2.35 Error: vector pattern type failure [tycon mismatch]</i>\n </pre>\n \n "
},
{
"code": 106,
"msg": "where defn applied to definitional spec",
"verbose": "SML/NJ does not allow multiple definitions of a structure in a signature (one through a definitional spec, another through a <code><strong>where</strong></code> clause).",
"info": "<dd>\n SML/NJ does not allow multiple definitions of a structure in a signature\n (one through a definitional spec, another through a\n <code><strong>where</strong></code> clause).\n <pre> structure A = struct end;\n <i>structure A : sig end</i>\n \n signature S =\n sig\n structure X : sig end = A\n end\n where X = A;\n <i>stdIn:27.1-31.12 Error: where defn applied to definitional spec</i>\n </pre>\n \n "
},
{
"code": 107,
"msg": "where type definition has wrong arity: %",
"verbose": "The arity implied by a <code><strong>where type</strong></code> definition must agree with the arity in type specification that it applies to.",
"info": "<dd>\n The arity implied by a <code><strong>where type</strong></code>\n definition must agree with the arity in type specification that\n it applies to.\n <pre> signature S =\n sig\n type 'a t\n end\n where type t = int;\n <i>stdIn:1.1-26.19 Error: where type definition has wrong arity: t</i>\n </pre>\n \n "
},
{
"code": 108,
"msg": "where type defn applied to definitional spec: %",
"verbose": "SML/NJ does not allow multiple definitions of a type in a signature (one through a definitional spec, another through a <code><strong>where type</strong></code> clause).",
"info": "<dd>\n SML/NJ does not allow multiple definitions of a type in a signature\n (one through a definitional spec, another through a\n <code><strong>where type</strong></code> clause).\n <pre> signature S =\n sig\n type t = int\n end\n where type t = int;\n <i>stdIn:1.1-22.19 Error: where type defn applied to definitional spec: t</i>\n </pre>\n \n "
},
{
"code": 109,
"msg": "withtype not allowed in datatype replication",
"verbose": "One can't attach a <code><strong>withtype</strong></code> clause to a datatype replication declaration or specification.",
"info": "<dd>\n One can't attach a <code><strong>withtype</strong></code> clause to a\n datatype replication declaration or specification.\n <pre> datatype t = A;\n <i>datatype t = A</i>\n \n datatype s = datatype t\n withtype u = s list;\n <i>stdIn:37.1-38.20 Error: withtype not allowed in datatype replication</i>\n </pre>\n \n "
},
{
"code": 110,
"msg": "word constant too large",
"verbose": "Word constants (by default Word31.word) are limited to values less than <code>0w2147483648</code> (<code>0wx80000000</code>). Similarly for word literals of type Word32.word (bound <code>0w4294967296</code>) and Word8.word (bound 0w256).",
"info": "<dd>\n Word constants (by default Word31.word) are limited to values less\n than <code>0w2147483648</code> (<code>0wx80000000</code>). Similarly\n for word literals of type Word32.word (bound <code>0w4294967296</code>)\n and Word8.word (bound 0w256).\n <pre> 0w2147483648;\n <i>stdIn:1.1-18.3 Error: word constant too large</i>\n 0wx80000000;\n <i>stdIn:1.1-18.2 Error: word constant too large</i>\n \n 0w4294967296 : Word32.word;\n <i>stdIn:25.1-25.13 Error: word constant too large</i>\n 0wx100000000 : Word32.word;\n <i>stdIn:23.1-23.13 Error: word constant too large</i>\n \n 0w256: Word8.word;\n <i>stdIn:1.1-1.6 Error: word constant too large</i>\n 0wx100 : Word8.word;\n <i>stdIn:1.1-24.2 Error: word constant too large</i>\n </pre>\n "
}
]
ERROR_MSGS = [re.compile(re.escape(m["msg"]).replace("%", ".+?") + r"( \[.+\])?") for m in ERRORS_INFO]
def find_error(msg: str) -> Optional[int]:
# FIXME
for i, r in enumerate(ERROR_MSGS):
if r.match(msg):
return i
return None
def prettify_error_message(msg: str) -> str:
msg += "\n"
msg = re.sub(r"^stdIn:.*?\s", "", msg)
lines = msg.splitlines()
first_line = lines[0]
if m := re.match(r"^Error: (.*?)(\s+\[.*\])?$", first_line):
index = find_error(m.group(1))
if index is not None:
msg += ERRORS_INFO[index]["verbose"] + "\n"
return msg
class SMLNJKernel(Kernel):
implementation = "SML/NJ"
implementation_version = "0.0.1"
language_info = {
"name": "SML/NJ",
"codemirror_mode": "fsharp",
"mimetype": "text/plain",
"file_extension": ".sml",
}
@property
def language_version(self) -> str:
if self._language_version is None:
self._language_version = check_output(["sml", ""]).decode("utf-8")
return self._language_version
@property
def banner(self) -> str:
return f"Simple SML/NJ Kernel {self.language_version}"
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self._language_version = None
self._start_smlnj()
def _start_smlnj(self):
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
self.smlnjwrapper = REPLWrapper(
f"sml -Cparser.primary-prompt={PRIMARY_PROMPT}",
# f"sml -Cparser.primary-prompt={PRIMARY_PROMPT} -Cparser.seconday-prompt={SECONDARY_PROMPT}",
"(\n|^)" + PRIMARY_PROMPT,
"(\n|^)" + SECONDARY_PROMPT,
)
finally:
signal.signal(signal.SIGINT, sig)
def do_complete(self, code: str, cursor_pos: int) -> Dict[str, Any]:
m = REGEX_WORD.search(code[:cursor_pos])
if m is not None:
keyword = m.group(1)
matches = [s for s in SML_KEYWORDS if s.startswith(keyword)]
if matches:
return {
"status": "ok",
"matches": matches,
"cursor_start": cursor_pos - len(keyword),
"cursor_end": cursor_pos,
"metadata": {},
}
return {
"status": "ok",
"matches": [],
"cursor_start": cursor_pos,
"cursor_end": cursor_pos,
"metadata": {},
}
def do_is_complete(self, code: str) -> Dict[str, Any]:
stripped = code.rstrip()
if not stripped:
return {
"status": "complete",
}
elif stripped.endswith("*)") or stripped.endswith(";"):
return {
"status": "unknown",
}
else:
return {
"status": "incomplete",
"indent": "",
}
def stdout_print(self, text: str) -> None:
stream_content = {"name": "stdout", "text": text}
self.send_msg("stream", stream_content)
def stderr_print(self, text: str) -> None:
self.send_msg("stream", {"name": "stderr", "text": text})
def send_msg(self, message_type: str, content: dict) -> None:
self.send_response(self.iopub_socket, message_type, content)
def do_execute(
self,
code: str,
silent,
store_history: bool = True,
user_expressions=None,
allow_stdin: bool = False,
) -> Dict[str, Any]:
code = crlf_pat.sub(" ", code.strip())
if not code:
return {
"status": "ok",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
interrupted = False
try:
output = self.smlnjwrapper.run_command(code)
except KeyboardInterrupt:
self.smlnjwrapper.child.sendintr()
interrupted = True
self.smlnjwrapper._expect_prompt()
output = self.smlnjwrapper.get_output()
except pexpect.EOF:
output = self.smlnjwrapper.get_output() + "Restarting SML/NJ"
self._start_smlnjang()
except ValueError as e:
# Continuation prompt found - input was incomplete
self.stderr_print(e.args[0])
return {"status": "error", "execution_count": self.execution_count}
if not silent and output is not None:
if output.startswith("stdIn"):
self.stderr_print(prettify_error_message(output))
# self.send_msg("display_data", {"data": {"text/html": f"<p style='color:red;'>{html.escape(output)}</p>"}, "metadata": {}})
return {"status": "error", "execution_count": self.execution_count}
self.stdout_print(output)
if interrupted:
return {"status": "abort", "execution_count": self.execution_count}
return {
"status": "ok",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
if __name__ == "__main__":
IPKernelApp.launch_instance(kernel_class=SMLNJKernel)
| 108.585635
| 1,720
| 0.612293
|
4a782c64ecb206670ce7ca4a27db98416c29344a
| 2,096
|
py
|
Python
|
conanfile.py
|
mjvk/conan-coverxygen_installer
|
132cdc3695255e264509a114899c9ee496ba0e75
|
[
"MIT"
] | null | null | null |
conanfile.py
|
mjvk/conan-coverxygen_installer
|
132cdc3695255e264509a114899c9ee496ba0e75
|
[
"MIT"
] | null | null | null |
conanfile.py
|
mjvk/conan-coverxygen_installer
|
132cdc3695255e264509a114899c9ee496ba0e75
|
[
"MIT"
] | null | null | null |
from conans import ConanFile
from conans import tools
import subprocess
import os
import codecs
class CoverxygenConan(ConanFile):
name = "coverxygen_installer"
version = "1.5.0"
url = "https://github.com/mjvk/conan-covergygen_installer"
homepage = "https://github.com/psycofdj/coverxygen"
topics = ("coverage", "documentation", "doxygen")
author = "mjvk <>"
description = ("Covergygen can generate documentation coverage statistics")
license = "MIT"
settings = "os_build", "arch_build"
_source_subfolder = "sourcefolder"
def _makeAbsoluteImport(self,input_name):
tmp_name = input_name + ".bak"
with codecs.open(input_name, 'r', encoding='utf8') as fi, \
codecs.open(tmp_name, 'w', encoding='utf8') as fo:
for line in fi:
fo.write(line.replace("from .", "from coverxygen."))
os.remove(input_name) # remove original
os.rename(tmp_name, input_name) # rename temp to original name
def source(self):
tools.get("{0}/archive/{1}.tar.gz".format(self.homepage, self.version))
os.rename("coverxygen-%s" % self.version, self._source_subfolder)
def build(self):
subprocess.call("pip install pyinstaller", shell=True)
mainfilename = os.path.join(self._source_subfolder,"coverxygen","__main__.py")
self._makeAbsoluteImport(mainfilename)
subprocess.call('pyinstaller %s --name coverxygen --onefile --workpath %s --distpath %s --specpath %s' % (mainfilename, os.path.join(self.build_folder,"build"), os.path.join(self.build_folder,"bin"), self.build_folder), shell=True)
def package(self):
self.copy("*coverxygen", dst="bin", src="bin", keep_path=False)
self.copy("*coverxygen.exe", dst="bin", src="bin", keep_path=False)
def deploy(self):
self.copy("*", src="bin", dst="bin")
def package_id(self):
self.info.include_build_settings()
def package_info(self):
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
| 40.307692
| 239
| 0.649809
|
dcc2bb0a96f51c1c26238e9505d601f65fd21ec1
| 477
|
py
|
Python
|
example_apps/shopping_list.py
|
jock-dalby/pythonTreehouse
|
ba24d36ba9f4013c99a0e081c06bc4891fe78212
|
[
"MIT"
] | null | null | null |
example_apps/shopping_list.py
|
jock-dalby/pythonTreehouse
|
ba24d36ba9f4013c99a0e081c06bc4891fe78212
|
[
"MIT"
] | null | null | null |
example_apps/shopping_list.py
|
jock-dalby/pythonTreehouse
|
ba24d36ba9f4013c99a0e081c06bc4891fe78212
|
[
"MIT"
] | null | null | null |
# make a list to hold onto items
shopping_list = []
# print out instructions on how to use the app
print("What should we pick up from the store?")
print("Enter 'DONE' to stop adding items")
while True:
# ask for new items
new_item = input("> ")
#be able to quit the app
if new_item.lower() == "done":
break
# add new items to our list
shopping_list.append(new_item)
# print out the list
print("Here's your list:")
for item in shopping_list:
print(item)
| 19.875
| 47
| 0.687631
|
05a3a44d4488f753f52923ed8867aeab2cb12938
| 576
|
py
|
Python
|
0 - CURSOS/11_Introducao a programacao com Python/aula8_lambda.py
|
saldanhayg/Bootcamp_Banco_Carrefour_Data_Engineer
|
00c3b070e1c9f6e31293399521d7c5d1e97dc309
|
[
"MIT"
] | null | null | null |
0 - CURSOS/11_Introducao a programacao com Python/aula8_lambda.py
|
saldanhayg/Bootcamp_Banco_Carrefour_Data_Engineer
|
00c3b070e1c9f6e31293399521d7c5d1e97dc309
|
[
"MIT"
] | null | null | null |
0 - CURSOS/11_Introducao a programacao com Python/aula8_lambda.py
|
saldanhayg/Bootcamp_Banco_Carrefour_Data_Engineer
|
00c3b070e1c9f6e31293399521d7c5d1e97dc309
|
[
"MIT"
] | null | null | null |
contador_letras = lambda lista: [len(x) for x in lista]
lista_animais = ['cachorro', 'gato', 'elefante']
print(contador_letras(lista_animais))
soma = lambda a,b: a + b
subtracao = lambda a,b: a - b
print(soma(10,5))
print(subtracao(10,5))
calculadora = {
'soma': lambda a, b: a + b,
'subtracao': lambda a, b: a - b,
'multiplicacao': lambda a, b: a * b,
'divisao': lambda a, b: a / b
}
soma = calculadora['soma']
multiplicacao = calculadora['multiplicacao']
print('A soma é: {}'.format(soma(10,6)))
print('A multiplicacao é: {}'.format(multiplicacao(15,2)))
| 27.428571
| 58
| 0.651042
|
163587f86f3862f0a9d77fb1b5e79f89ede505c4
| 355
|
py
|
Python
|
fabric_cf/orchestrator/swagger_server/__init__.py
|
fabric-testbed/ActorBase
|
3c7dd040ee79fef0759e66996c93eeec57c790b2
|
[
"MIT"
] | null | null | null |
fabric_cf/orchestrator/swagger_server/__init__.py
|
fabric-testbed/ActorBase
|
3c7dd040ee79fef0759e66996c93eeec57c790b2
|
[
"MIT"
] | 67
|
2020-12-21T15:39:49.000Z
|
2022-02-27T17:55:00.000Z
|
fabric_cf/orchestrator/swagger_server/__init__.py
|
fabric-testbed/ControlFramework
|
95ab745e32f15c993bc7a017aa97a5a0f67f210f
|
[
"MIT"
] | null | null | null |
import logging
import prometheus_client
received_counter = prometheus_client.Counter('Requests_Received', 'HTTP Requests', ['method', 'endpoint'])
success_counter = prometheus_client.Counter('Requests_Success', 'HTTP Success', ['method', 'endpoint'])
failure_counter = prometheus_client.Counter('Requests_Failed', 'HTTP Failures', ['method', 'endpoint'])
| 59.166667
| 106
| 0.785915
|
b61df28e0aee6c0b12a0579b77f3a2614044bb73
| 2,010
|
py
|
Python
|
src/logger.py
|
jorgediazjr/fast_dp
|
972fe7f09fb28b07053de595faa6857692320cbe
|
[
"Apache-2.0"
] | null | null | null |
src/logger.py
|
jorgediazjr/fast_dp
|
972fe7f09fb28b07053de595faa6857692320cbe
|
[
"Apache-2.0"
] | null | null | null |
src/logger.py
|
jorgediazjr/fast_dp
|
972fe7f09fb28b07053de595faa6857692320cbe
|
[
"Apache-2.0"
] | null | null | null |
import os
class _writer:
'''A specialist class to write to the screen and fast_dp.log.'''
def __init__(self):
self._fout = None
self._afout = None
self._filename = 'fast_dp.log'
self._afilename = None
self._afilepath = None
self._afileprefix = None
return
def set_filename(self, filename):
self._filename = filename
def set_afilename(self, afilename):
self._afilename = afilename
def set_afilepath(self, afilepath):
self._afilepath = afilepath
def get_afilepath(self):
return self._afilepath
def set_afileprefix(self, afileprefix):
self._afileprefix = afileprefix
def get_afileprefix(self):
return self._afileprefix
def __del__(self):
if self._fout:
self._fout.close()
self._fout = None
if self._afout:
self._afout.close()
self._afout = None
self._afilename = None
self._afileprefix = None
return
def __call__(self, record):
self.write(record)
def write(self, record):
if not self._fout:
self._fout = open(self._filename, 'w')
self._fout.write('{}\n'.format(record))
print(record)
if self._afilename:
try:
if not self._afout:
self._afout = open(self._afilename, 'w')
self._afout.write('{}\n'.format(record))
except:
print(self._afilename + ' not available for writing')
self._afilename = None
return
write = _writer()
def set_filename(filename):
write.set_filename(filename)
def set_afilename(afilename):
write.set_afilename(afilename)
def set_afilepath(afilepath):
write.set_afilepath(afilepath)
def get_afilepath():
return write.get_afilepath()
def set_afileprefix(afileprefix):
write.set_afileprefix(afileprefix)
def get_afileprefix():
return write.get_afileprefix()
| 24.216867
| 69
| 0.61194
|
c721f0bda8241811538b87c1314d2cacb2fb82f5
| 568
|
py
|
Python
|
setup.py
|
willu47/lcoe
|
9aa2d04ec6134ee21686bfad87fad58451b5af62
|
[
"MIT"
] | 3
|
2020-09-09T15:06:21.000Z
|
2021-09-16T12:14:17.000Z
|
setup.py
|
willu47/lcoe
|
9aa2d04ec6134ee21686bfad87fad58451b5af62
|
[
"MIT"
] | null | null | null |
setup.py
|
willu47/lcoe
|
9aa2d04ec6134ee21686bfad87fad58451b5af62
|
[
"MIT"
] | 1
|
2021-10-03T14:00:14.000Z
|
2021-10-03T14:00:14.000Z
|
# -*- coding: utf-8 -*-
"""
Setup file for lcoe.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 3.2.3.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require('setuptools>=38.3')
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| 23.666667
| 75
| 0.700704
|
e9f4ea40d9d99069542095bffc1b9039cbf36e9a
| 12,127
|
py
|
Python
|
SocialRobotCustom/python/social_bot/teacher.py
|
jesbu1/h-baselines
|
f6f775bb18de22527f2d01d73bd733ed2e435ba3
|
[
"MIT"
] | null | null | null |
SocialRobotCustom/python/social_bot/teacher.py
|
jesbu1/h-baselines
|
f6f775bb18de22527f2d01d73bd733ed2e435ba3
|
[
"MIT"
] | null | null | null |
SocialRobotCustom/python/social_bot/teacher.py
|
jesbu1/h-baselines
|
f6f775bb18de22527f2d01d73bd733ed2e435ba3
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Teacher framework."""
import numpy as np
import random
import gym
from absl import logging
class DiscreteSequence(gym.Space):
"""
gym.Space object for language sequence
"""
def __init__(self, vocab_size, max_length):
"""
Args:
vocab_size (int): number of different tokens
max_length (int): maximal length of the sequence
"""
super().__init__(shape=(max_length, ), dtype=np.int32)
self._vocab_size = vocab_size
self._max_length = max_length
class TeacherAction(object):
def __init__(self, reward=0.0, sentence="", done=False, is_idle=False,
success=False):
"""
Args:
done: end of an episode if true
success: if the episode is successful or not
"""
self.reward = reward
self.sentence = sentence
self.done = done
self.is_idle = is_idle
self.success = success
class TaskGroup(object):
"""A group of tasks.
Each task group consists of one or more tasks. Within one task group, one
task can run at one time. A random task is chosen after the current task is
finished.
"""
def __init__(self):
self._tasks = []
self._current_tid = None
self._current_task = None
self._current_reward_weight = 1.0
self._agent = None
self._world = None
self._is_idle = True
def add_task(self, task):
"""Add a task to the group.
Args:
task (Task): an instance of Task
Returns:
None
"""
self._tasks.append(task)
def teach(self, agent_sentence):
"""Generate TeacherAction.
Args:
agent_sentence (str): sentence from the agent
Returns:
TeacherAction
"""
task = self._get_current_task()
try:
# teacher_action is the value yielded in task
teacher_action = task.send(agent_sentence)
self._is_idle = teacher_action.is_idle
if teacher_action.done:
task.close()
self._current_task = None
self._is_idle = True
except StopIteration:
task.close()
self._current_task = None
self._is_idle = True
teacher_action = TeacherAction()
return teacher_action
def is_idle(self):
return self._is_idle
def reset(self, agent, world):
"""Reset the task group.
Current task will be closed and a random new one will be chosen.
Args:
agent (GazeboAgent): the learning agent in the world
world (pygazebo.World): the world containing the agent
Returns:
None
"""
self._agent = agent
self._world = world
if self._current_task is not None:
self._current_task.close()
self._current_task = None
# This function only returns a generator function.
# To get the task object use self._tasks[self._current_tid]
def _get_current_task(self):
if self._current_task is None:
tid = random.randint(0, len(self._tasks) - 1)
self._current_tid = tid
self._current_task = self._tasks[tid].run()
self._current_reward_weight = self._tasks[tid].reward_weight
# This send will cause self._current_task to execute until the first
# yield. We ignore the first yielded value.
self._current_task.send(None)
return self._current_task
def get_current_reward_weight(self):
"""Get reward weight for current task of the group
Args:
None
Returns:
float, the reward weight of current task
"""
return self._current_reward_weight
def get_tasks(self):
"""Get current tasks in the group.
Args:
None
Returns:
list, a list of current tasks in the group
"""
return self._tasks
class Teacher(object):
"""Teacher is for teaching the agent.
It is responsible for:
1. Giving reward
2. Arranging the environment
3. Generating sentences
4. Interpreting sentences from the agent
A teacher has several task groups. At each step
* If task_groups_exclusive is True
Only one task group will run at the same time. After the active become
idle, another one will be chosen randomly.
* If task_groups_exclusive is False
All the task groups run concurrently. The reward are sum together. The
first nonempty sentence will be used. If one of the action has done=True,
the resulted done will be True.
Each task group consists of one or more tasks. Within one task group, one
task can run at one time. A random task is chosen after the current task is
finished.
"""
def __init__(self, task_groups_exclusive=True):
"""Create a Teacher instance.
Args:
task_groups_exclusive (bool): If True, only one task group is active
at one time. Otherwise, multiple task groups run concurrently.
"""
self._task_groups_exclusive = task_groups_exclusive
self._vocab_list = None
self._task_groups = []
self._weights = []
self.vocab_size = 0
def add_task_group(self, task_group, weight=1):
"""Add a task group to teacher.
Args:
task_group (TaskGroup): TaskGroup to be added
weight (float): In task_groups_exclusive=True mode, the probability
of a TaskGroup being chosen is proportional to this value.
Returns:
None
"""
self._task_groups.append(task_group)
self._weights.append(weight)
def get_task_groups(self):
"""Get current task groups of teacher.
Args:
None
Returns:
list, a list of current task group
"""
return self._task_groups
def get_task_specific_observation(self, agent):
"""Get the task specific observation of all the tasks added to the teacher
Args:
agent (GazeboAgent): the agent
Returns:
numpy.array, the specific observation for all the tasks added
"""
task_specific_ob = np.array([])
for task_group in self.get_task_groups():
for task in task_group.get_tasks():
task_specific_ob = np.append(
task_specific_ob, task.task_specific_observation(agent))
return task_specific_ob
def _build_vocab_from_tasks(self):
"""Build vocabulary table."""
# Initialize vocab with '0' by index 0, which is used for padding
vocab_list = [
0,
]
for g in self._task_groups:
for t in g._tasks:
vocab_list = vocab_list + t.task_vocab
# Remove repeated words and convert to dict
self._vocab_list = sorted(set(vocab_list), key=vocab_list.index)
self.vocab_size = len(self._vocab_list)
self._vocab_dict = dict(
zip(self._vocab_list, list(range(0, self.vocab_size))))
def sentence_to_sequence(self, sentence, max_sequence_length):
"""Convert sentence string to numpy integer sequence.
Args:
sentence (str): string for the sentence. Note the currently, the
tokenization is case-sensitive. For example, "This" and "this"
are treated as word.
max_sequence_length (int): The length of the generated numpy array.
If number of words in sentence is smaller than this value, 0 is
padded at the end.
Returns:
numpy.array
"""
if self._vocab_list is None:
self._build_vocab_from_tasks()
word_list = sentence.split()
for word in word_list:
assert word in self._vocab_dict.keys(), \
"Word is out of vocab: " + word + \
", during encoding sentence to sequence"
sequence = list(map(lambda x: self._vocab_dict[x], word_list))
padding_num = max_sequence_length - len(sequence)
assert padding_num >= 0, "Sequence " + str(sequence) + \
" exceed max_sequence_length: " + str(max_sequence_length) + \
", consider to increase the max_sequence_length"
return np.pad(sequence, (0, padding_num), 'constant')
def sequence_to_sentence(self, sequence):
"""Convert integer sequence to str based on vocabulary table.
Values after the first 0 in the sequence are ignored. In the generated
string, words are separated by space ' '.
Args:
sequence (int[]): integer sequence
Returns:
str
"""
if self._vocab_list is None:
self._build_vocab_from_tasks()
for seq_index in range(len(sequence)):
assert sequence[seq_index] < self.vocab_size, \
"Unknown word id: " + str(sequence[seq_index]) + \
", during decoding sequence to sentence"
if sequence[seq_index] == 0:
break
word_list = list(
map(lambda x: self._vocab_list[x], sequence[:seq_index]))
return " ".join(word_list)
def reset(self, agent, world):
"""Reset teacher.
All the task group will be reset, that is, current task in each task
group is closed and a random new one will be chosen.
Args:
agent (GazeboAgent): the learning agent in the world
world (pygazebo.World): the world containing the agent
Returns:
None
"""
for g in self._task_groups:
g.reset(agent, world)
self._switch_task_group()
def _switch_task_group(self):
self._current_task_group = np.random.choice(
self._task_groups, p=np.array(self._weights) / sum(self._weights))
def teach(self, agent_sentence):
"""Generate TeacherAction.
Args:
agent_sentence (str): sentence from the agent
Returns:
TeacherAction
"""
return_action = None
if self._task_groups_exclusive:
if self._current_task_group.is_idle():
self._switch_task_group()
return_action = self._current_task_group.teach(agent_sentence)
else:
final_sentence = ''
final_reward = 0.
done = False
active_group_id = -1
success = False
# run all groups in parallel
for i, g in enumerate(self._task_groups):
teacher_action = g.teach(agent_sentence)
if teacher_action.done:
done = True
if teacher_action.success:
success = True
weight = g.get_current_reward_weight()
final_reward += weight * teacher_action.reward
if not final_sentence:
final_sentence = teacher_action.sentence
active_group_id = i
if active_group_id != -1:
g = self._task_groups.pop(active_group_id)
self._task_groups.insert(0, g)
return_action = TeacherAction(final_reward, final_sentence, done,
success=success)
return return_action
| 34.160563
| 82
| 0.601385
|
38e6944eea9df33e992e819607051c52a913265c
| 5,101
|
py
|
Python
|
dags/mwaalib/workflow_lib.py
|
aws-samples/amazon-mwaa-complex-workflow-using-step-functions
|
73835d3398ced067b40129c8cc9d9197f38e99a8
|
[
"MIT-0"
] | 21
|
2021-01-12T14:43:06.000Z
|
2021-10-01T16:47:19.000Z
|
dags/mwaalib/workflow_lib.py
|
aws-samples/amazon-mwaa-complex-workflow-using-step-functions
|
73835d3398ced067b40129c8cc9d9197f38e99a8
|
[
"MIT-0"
] | null | null | null |
dags/mwaalib/workflow_lib.py
|
aws-samples/amazon-mwaa-complex-workflow-using-step-functions
|
73835d3398ced067b40129c8cc9d9197f38e99a8
|
[
"MIT-0"
] | 11
|
2021-02-10T16:37:18.000Z
|
2022-01-18T21:35:12.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import boto3, json, pprint, requests, textwrap, time, logging, requests
import os
from datetime import datetime
from typing import Optional, Union
sfn_non_terminal_states = {"RUNNING"}
sfn_failed_states = {"FAILED", "TIMED_OUT", "ABORTED"}
def detect_running_region():
"""Dynamically determine the region from a running MWAA Setup ."""
easy_checks = [
# check if set through ENV vars
os.environ.get('AWS_REGION'),
os.environ.get('AWS_DEFAULT_REGION'),
# else check if set in config or in boto already
boto3.DEFAULT_SESSION.region_name if boto3.DEFAULT_SESSION else None,
boto3.Session().region_name,
]
for region in easy_checks:
if region:
return region
# else Assuming Airflow is running in an EC2 environment
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
r = requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document")
response_json = r.json()
return response_json.get('region')
def get_region():
return 'us-west-2'
def client(region_name):
global emr
emr = boto3.client('emr', region_name=region_name)
global sfn
sfn = boto3.client('stepfunctions', region_name=region_name)
global ssm
ssm = boto3.client('ssm', region_name=region_name)
def create_cluster(region_name, cluster_name='mwaa-emr-' + str(datetime.now()), release_label='emr-5.31.0',master_instance_type='m5.xlarge', num_core_nodes=2, core_node_instance_type='m5.xlarge'):
cluster_response = emr.run_job_flow(
Name=cluster_name,
ReleaseLabel=release_label,
Instances={
'InstanceGroups': [
{
'Name': "Master nodes",
'Market': 'ON_DEMAND',
'InstanceRole': 'MASTER',
'InstanceType': master_instance_type,
'InstanceCount': 1
},
{
'Name': "Slave nodes",
'Market': 'ON_DEMAND',
'InstanceRole': 'CORE',
'InstanceType': core_node_instance_type,
'InstanceCount': num_core_nodes
}
],
'KeepJobFlowAliveWhenNoSteps': True
},
VisibleToAllUsers=True,
JobFlowRole='EMR_EC2_DefaultRole',
ServiceRole='EMR_DefaultRole',
Applications=[
{ 'Name': 'hadoop' },
{ 'Name': 'spark' }
]
)
return cluster_response['JobFlowId']
def wait_for_cluster_creation(cluster_id):
emr.get_waiter('cluster_running').wait(ClusterId=cluster_id)
def terminate_cluster(cluster_id):
emr.terminate_job_flows(JobFlowIds=[cluster_id])
def get_demo_bucket_name():
demo_bucket=ssm.get_parameter(Name="/mwaa/S3/DemoBucket")['Parameter']['Value']
return demo_bucket
def get_stepfunction_arn():
sfn_arn=ssm.get_parameter(Name="/mwaa/sfn/movielens")['Parameter']['Value']
return sfn_arn
def start_execution(
state_machine_arn: str,
name: Optional[str] = None,
state_machine_input: Union[dict, str, None] = None,
) -> str:
execution_args = {'stateMachineArn': state_machine_arn}
if name is not None:
execution_args['name'] = name
if state_machine_input is not None:
if isinstance(state_machine_input, str):
execution_args['input'] = state_machine_input
elif isinstance(state_machine_input, dict):
execution_args['input'] = json.dumps(state_machine_input)
logging.info('Executing Step Function State Machine: %s', state_machine_arn)
response = sfn.start_execution(**execution_args)
logging.info('Execution arn: %s', response)
return response.get('executionArn')
def monitor_stepfunction_run(execution_arn):
sfn.describe_execution(executionArn=execution_arn)
sec = 0
running = True
check_interval = 30
while running:
time.sleep(30)
sec += check_interval
try:
response = sfn.describe_execution(executionArn=execution_arn)
status = response["status"]
logging.info(
"Step function still running for %s seconds... " "current status is %s",
sec,
status,
)
except KeyError:
raise AirflowException("Could not get status of the Step function workflow")
except ClientError:
raise AirflowException("AWS request failed, check logs for more info")
if status in sfn_non_terminal_states:
running = True
elif status in sfn_failed_states:
raise AirflowException(
"Step function failed.Check Step functions log for details"
)
else:
running = False
logging.info("Step function workflow completed")
response = sfn.describe_execution(executionArn=execution_arn)
return response.get('executionArn')
| 35.17931
| 196
| 0.639678
|
c5f35e8acfd6fb4b84766cbd81753d9236985d08
| 5,226
|
py
|
Python
|
SEIR_model.py
|
MarcusRainbow/Covid19
|
ef6c7ba130493c889ca187214e3b5b123b2de928
|
[
"MIT"
] | null | null | null |
SEIR_model.py
|
MarcusRainbow/Covid19
|
ef6c7ba130493c889ca187214e3b5b123b2de928
|
[
"MIT"
] | null | null | null |
SEIR_model.py
|
MarcusRainbow/Covid19
|
ef6c7ba130493c889ca187214e3b5b123b2de928
|
[
"MIT"
] | null | null | null |
import numpy as np
from topography import nearest_neighbour_topography, stratified_topography
class SEIR_Model:
"""
Model represents the topographical SEIR model as vectors of
floating point numbers. The vectors are Susceptible, Exposed,
Infected and Resistant (recovered, quarantined or dead).
The process is defined by the differential equations, where
S, E, I, R are as defined above and N = S + E + I + R (constant):
dS/dt = -beta * S * I / N
dE/dt = beta * S * I / N - sigma * E
dI/dt = sigma * E - gamma * I
dR/dt = gamma * I
The model is made topographical by making each of S, E, I, R
vectors and the topography matrix T is used to multiply the
beta term thus:
dS/dt = -beta * (S / N) . T * I
dE/dt = beta * (S / N) . T * I - sigma * E
Where . T represents matrix multiplication
"""
def __init__(
self,
populations: np.ndarray,
beta: float,
sigma: float,
gamma: float):
"""
Initial state is with all cells susceptible.
"""
self.beta = beta
self.sigma = sigma
self.gamma = gamma
shape = populations.shape
self.s = populations.copy()
self.e = np.zeros(shape)
self.i = np.zeros(shape)
self.r = np.zeros(shape)
self.n = populations
self.scale = 1.0 / populations
def infected(self):
return self.i
def __str__(self) -> str:
NL = "\n"
return f"s{self.s}{NL}e{self.e}{NL}i{self.i}{NL}r{self.r}"
def infect(self, cell: (int, int), infection: float = 1.0):
"""
Infect just one cell by converting a susceptible individual to infected
"""
self.s[cell] -= infection
self.i[cell] += infection
def timestep(
self,
dt: float,
topography: np.ndarray):
"""
Time evolve each cell of the model by one timestep. The size of the timestep
is dt. The rate of exposure is beta. The rate of infection is
sigma. The rate of recovery/quarantine/death is gamma.
"""
size = self.n.size
assert topography.shape == (size, size)
# This is the standard SEIR model, except that we also allow neighbouring cells
# to expose susceptible individuals according to the given topology matrix.
infectious = self.beta * dt * self.scale * self.i
infectious.shape = (1, size)
exposure = infectious.dot(topography)
exposure.shape = self.s.shape
newly_exposed = exposure * self.s
newly_infected = self.sigma * dt * self.e
newly_resistant = self.gamma * dt * self.i
self.s -= newly_exposed
self.e += newly_exposed - newly_infected
self.i += newly_infected - newly_resistant
self.r += newly_resistant
def test_one_step_identity_topography():
populations = np.full((3, 4), 100.0)
model = SEIR_Model(populations, 1.0, 1.0, 1.0)
model.infect((0, 0))
print("model before:")
print(f"{model}")
topology = np.identity(populations.size)
model.timestep(1.0 / 365.0, topology)
print("model after:")
print(f"{model}")
def test_365_steps_identity_topography():
beta = 3.0 * 26.0 # infect three people in the space of two weeks
sigma = 52.0 # about one week to change from exposed to infected
gamma = 26.0 # about two weeks infected
populations = np.full((3, 4), 100.0)
model = SEIR_Model(populations, beta, sigma, gamma)
model.infect((0, 0))
print("model before:")
print(f"{model}")
topology = np.identity(populations.size)
for _ in range(365):
model.timestep(1.0 / 365.0, topology)
print("model after:")
print(f"{model}")
def test_365_steps_nearest_neighbour_topography():
beta = 3.0 * 26.0 # infect three people in the space of two weeks
sigma = 52.0 # about one week to change from exposed to infected
gamma = 26.0 # about two weeks infected
populations = np.full((3, 4), 100.0)
model = SEIR_Model(populations, beta, sigma, gamma)
model.infect((0, 0))
print("model before:")
print(f"{model}")
topology = nearest_neighbour_topography(populations.shape, 1.0, 0.1)
for _ in range(365):
model.timestep(1.0 / 365.0, topology)
print("model after:")
print(f"{model}")
def test_365_steps_stratified_topography():
beta = 3.0 * 26.0 # infect three people in the space of two weeks
sigma = 52.0 # about one week to change from exposed to infected
gamma = 26.0 # about two weeks infected
populations = np.full((3, 4), 100.0)
model = SEIR_Model(populations, beta, sigma, gamma)
model.infect((0, 0))
print("model before:")
print(f"{model}")
topology = stratified_topography(populations.shape, 1.0, 0.1)
for _ in range(365):
model.timestep(1.0 / 365.0, topology)
print("model after:")
print(f"{model}")
if __name__ == "__main__":
test_one_step_identity_topography()
test_365_steps_identity_topography()
test_365_steps_nearest_neighbour_topography()
test_365_steps_stratified_topography()
| 29.525424
| 87
| 0.618829
|
a417b5a6cb69730c1d64389c46c267cc19cf7b8b
| 68
|
py
|
Python
|
src/wann_genetic/postopt/vis/__init__.py
|
plonerma/wann-genetic
|
c4a8a1db81665b2549994d615e1d347dbe00226a
|
[
"MIT"
] | null | null | null |
src/wann_genetic/postopt/vis/__init__.py
|
plonerma/wann-genetic
|
c4a8a1db81665b2549994d615e1d347dbe00226a
|
[
"MIT"
] | null | null | null |
src/wann_genetic/postopt/vis/__init__.py
|
plonerma/wann-genetic
|
c4a8a1db81665b2549994d615e1d347dbe00226a
|
[
"MIT"
] | null | null | null |
from .vis_network import draw_graph, draw_weight_matrix, node_names
| 34
| 67
| 0.867647
|
d7109572a9a49fba3d81c6bb0fe11f18372736ef
| 8,161
|
py
|
Python
|
mmdetection/mmdet/core/evaluation/kitti_utils.py
|
lidongyv/Reppoint-Tracking
|
81b81e921f6b905e68aba117ffc4fca8ffcfcfd6
|
[
"MIT"
] | null | null | null |
mmdetection/mmdet/core/evaluation/kitti_utils.py
|
lidongyv/Reppoint-Tracking
|
81b81e921f6b905e68aba117ffc4fca8ffcfcfd6
|
[
"MIT"
] | null | null | null |
mmdetection/mmdet/core/evaluation/kitti_utils.py
|
lidongyv/Reppoint-Tracking
|
81b81e921f6b905e68aba117ffc4fca8ffcfcfd6
|
[
"MIT"
] | null | null | null |
import itertools
import mmcv
import numpy as np
from pycocotools.kitti import COCO
from pycocotools.kittieval import COCOeval
from terminaltables import AsciiTable
from .recall import eval_recalls
def coco_eval(result_files,
result_types,
coco,
max_dets=(100, 300, 1000),
classwise=False):
for res_type in result_types:
assert res_type in [
'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'
]
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
if result_types == ['proposal_fast']:
ar = fast_eval_recall(result_files, coco, np.array(max_dets))
for i, num in enumerate(max_dets):
print('AR@{}\t= {:.4f}'.format(num, ar[i]))
return
for res_type in result_types:
if isinstance(result_files, str):
result_file = result_files
elif isinstance(result_files, dict):
result_file = result_files[res_type]
else:
assert TypeError('result_files must be a str or dict')
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise:
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/blob/03064eb5bafe4a3e5750cc7a16672daf5afe8435/detectron2/evaluation/coco_evaluation.py#L259-L283 # noqa
precisions = cocoEval.eval['precision']
catIds = coco.getCatIds()
# precision has dims (iou, recall, cls, area range, max dets)
assert len(catIds) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(catIds):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float('nan')
results_per_category.append(
('{}'.format(nm['name']),
'{:0.3f}'.format(float(ap * 100))))
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (N_COLS // 2)
results_2d = itertools.zip_longest(
*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print(table.table)
def fast_eval_recall(results,
coco,
max_dets,
iou_thrs=np.arange(0.5, 0.96, 0.05)):
if mmcv.is_str(results):
assert results.endswith('.pkl')
results = mmcv.load(results)
elif not isinstance(results, list):
raise TypeError(
'results must be a list of numpy arrays or a filename, not {}'.
format(type(results)))
gt_bboxes = []
img_ids = coco.getImgIds()
for i in range(len(img_ids)):
ann_ids = coco.getAnnIds(imgIds=img_ids[i])
ann_info = coco.loadAnns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, max_dets, iou_thrs, print_summary=False)
ar = recalls.mean(axis=1)
return ar
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def proposal2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def det2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
json_results.append(data)
return json_results
def segm2json(dataset, results):
bbox_json_results = []
segm_json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different score for det and segm
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = dataset.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(dataset, results, out_file):
result_files = dict()
if isinstance(results[0], list):
json_results = det2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = segm2json(dataset, results)
result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = proposal2json(dataset, results)
result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
| 36.761261
| 169
| 0.563656
|
a2e3c4b22592073f2901c0ef775b43267f20caae
| 90
|
py
|
Python
|
lead_bot/apps.py
|
LEXResearch/lex
|
346cfafa5f313ca0228cab47e8dfe61ecc0ed995
|
[
"MIT"
] | null | null | null |
lead_bot/apps.py
|
LEXResearch/lex
|
346cfafa5f313ca0228cab47e8dfe61ecc0ed995
|
[
"MIT"
] | null | null | null |
lead_bot/apps.py
|
LEXResearch/lex
|
346cfafa5f313ca0228cab47e8dfe61ecc0ed995
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class LeadBotConfig(AppConfig):
name = 'lead_bot'
| 15
| 33
| 0.755556
|
33de4fc0b5ec45f8da646da5403509bac35def49
| 807
|
py
|
Python
|
demo_app/flippy_demo_app/views.py
|
Kos/flippy
|
2d77dc1a981ec25025e7e7067a71c142df194d2a
|
[
"0BSD"
] | 6
|
2019-04-18T08:04:04.000Z
|
2021-06-23T09:57:18.000Z
|
demo_app/flippy_demo_app/views.py
|
Kos/flippy
|
2d77dc1a981ec25025e7e7067a71c142df194d2a
|
[
"0BSD"
] | 7
|
2019-04-24T10:35:27.000Z
|
2019-10-03T09:13:53.000Z
|
demo_app/flippy_demo_app/views.py
|
Kos/flippy
|
2d77dc1a981ec25025e7e7067a71c142df194d2a
|
[
"0BSD"
] | 1
|
2021-04-08T07:24:36.000Z
|
2021-04-08T07:24:36.000Z
|
from django.contrib.auth.models import User
from django.template.response import TemplateResponse
from .flags import enable_weather, enable_cats, enable_sudoku
def index(request):
if request.user.is_authenticated:
show_sudoku = should_show_sudoku(request.user)
else:
show_sudoku = False
# Any flag can be queried with a request.
return TemplateResponse(
request,
"index.html",
{
"show_weather": enable_weather.get_state_for_request(request),
"show_cat": enable_cats.get_state_for_request(request),
"show_sudoku": show_sudoku,
},
)
def should_show_sudoku(user: User):
# TypedFlags can be queried anywhere in the code, even without a request:
return enable_sudoku.get_state_for_object(user)
| 28.821429
| 77
| 0.698885
|
dfee77ee5531d84afa5f17aa69d9b48584d5c94d
| 2,993
|
py
|
Python
|
audio/record.py
|
bbcdli/xuexi
|
f791d6bdc2fccc1bab322b474c9cfc7572690f44
|
[
"Apache-2.0"
] | 1
|
2019-01-16T05:55:23.000Z
|
2019-01-16T05:55:23.000Z
|
audio/record.py
|
bbcdli/xuexi
|
f791d6bdc2fccc1bab322b474c9cfc7572690f44
|
[
"Apache-2.0"
] | null | null | null |
audio/record.py
|
bbcdli/xuexi
|
f791d6bdc2fccc1bab322b474c9cfc7572690f44
|
[
"Apache-2.0"
] | null | null | null |
from sys import byteorder
from array import array
from struct import pack
import pyaudio
import wave
THRESHOLD = 500
CHUNK_SIZE = 1024
FORMAT = pyaudio.paInt16
RATE = 44100
def is_silent(snd_data):
"Returns 'True' if below the 'silent' threshold"
return max(snd_data) < THRESHOLD
def normalize(snd_data):
"Average the volume out"
MAXIMUM = 16384
times = float(MAXIMUM)/max(abs(i) for i in snd_data)
r = array('h')
for i in snd_data:
r.append(int(i*times))
return r
def trim(snd_data):
"Trim the blank spots at the start and end"
def _trim(snd_data):
snd_started = False
r = array('h')
for i in snd_data:
if not snd_started and abs(i)>THRESHOLD:
snd_started = True
r.append(i)
elif snd_started:
r.append(i)
return r
# Trim to the left
snd_data = _trim(snd_data)
# Trim to the right
snd_data.reverse()
snd_data = _trim(snd_data)
snd_data.reverse()
return snd_data
def add_silence(snd_data, seconds):
"Add silence to the start and end of 'snd_data' of length 'seconds' (float)"
r = array('h', [0 for i in range(int(seconds*RATE))])
r.extend(snd_data)
r.extend([0 for i in range(int(seconds*RATE))])
return r
def record():
"""
Record a word or words from the microphone and
return the data as an array of signed shorts.
Normalizes the audio, trims silence from the
start and end, and pads with 0.5 seconds of
blank sound to make sure VLC et al can play
it without getting chopped off.
"""
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=1, rate=RATE,
input=True, output=True,
frames_per_buffer=CHUNK_SIZE)
num_silent = 0
snd_started = False
r = array('h')
while 1:
# little endian, signed short
snd_data = array('h', stream.read(CHUNK_SIZE))
if byteorder == 'big':
snd_data.byteswap()
r.extend(snd_data)
silent = is_silent(snd_data)
if silent and snd_started:
num_silent += 1
elif not silent and not snd_started:
snd_started = True
if snd_started and num_silent > 30:
break
sample_width = p.get_sample_size(FORMAT)
stream.stop_stream()
stream.close()
p.terminate()
r = normalize(r)
r = trim(r)
r = add_silence(r, 0.5)
return sample_width, r
def record_to_file(path):
"Records from the microphone and outputs the resulting data to 'path'"
sample_width, data = record()
data = pack('<' + ('h'*len(data)), *data)
wf = wave.open(path, 'wb')
wf.setnchannels(1)
wf.setsampwidth(sample_width)
wf.setframerate(RATE)
wf.writeframes(data)
wf.close()
if __name__ == '__main__':
print("please speak a word into the microphone")
record_to_file('demo_j_qin.wav')
print("done - result written to demo.wav")
| 24.941667
| 80
| 0.625459
|
0e472c7420a6718ce02f46ab9e5c34a4034a65ef
| 2,155
|
py
|
Python
|
setup.py
|
lisy14liz/importance-sampling
|
15040a3c4435735e9b0155d3d0228909bd4e47b7
|
[
"MIT"
] | 289
|
2017-08-03T17:30:12.000Z
|
2022-03-30T12:04:21.000Z
|
setup.py
|
lisy14liz/importance-sampling
|
15040a3c4435735e9b0155d3d0228909bd4e47b7
|
[
"MIT"
] | 34
|
2017-08-03T21:47:49.000Z
|
2021-06-16T17:59:45.000Z
|
setup.py
|
lisy14liz/importance-sampling
|
15040a3c4435735e9b0155d3d0228909bd4e47b7
|
[
"MIT"
] | 58
|
2017-08-06T01:10:24.000Z
|
2022-03-07T00:30:24.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>
#
"""Setup importance-sampling"""
from itertools import dropwhile
from os import path
from setuptools import find_packages, setup
def collect_docstring(lines):
"""Return document docstring if it exists"""
lines = dropwhile(lambda x: not x.startswith('"""'), lines)
doc = ""
for line in lines:
doc += line
if doc.endswith('"""\n'):
break
return doc[3:-4].replace("\r", "").replace("\n", " ")
def collect_metadata():
meta = {}
with open(path.join("importance_sampling","__init__.py")) as f:
lines = iter(f)
meta["description"] = collect_docstring(lines)
for line in lines:
if line.startswith("__"):
key, value = map(lambda x: x.strip(), line.split("="))
meta[key[2:-2]] = value[1:-1]
return meta
def setup_package():
with open("README.rst") as f:
long_description = f.read()
meta = collect_metadata()
setup(
name="keras-importance-sampling",
version=meta["version"],
description=meta["description"],
long_description=long_description,
maintainer=meta["maintainer"],
maintainer_email=meta["email"],
url=meta["url"],
license=meta["license"],
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
packages=find_packages(exclude=["docs", "tests", "scripts", "examples"]),
install_requires=["keras>=2", "blinker", "numpy"]
)
if __name__ == "__main__":
setup_package()
| 30.785714
| 81
| 0.585615
|
666b76b53b77c6b36505a574547cb5c195b761ba
| 48,522
|
py
|
Python
|
chives/wallet/wallet_state_manager.py
|
ChivesWorld/chives-blockchain
|
56734ef0719f7bf844213823bb95b0fcc642d222
|
[
"Apache-2.0"
] | 1
|
2021-08-01T17:14:54.000Z
|
2021-08-01T17:14:54.000Z
|
chives/wallet/wallet_state_manager.py
|
ChivesWorld/chives-blockchain
|
56734ef0719f7bf844213823bb95b0fcc642d222
|
[
"Apache-2.0"
] | null | null | null |
chives/wallet/wallet_state_manager.py
|
ChivesWorld/chives-blockchain
|
56734ef0719f7bf844213823bb95b0fcc642d222
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import base64
import json
import logging
import time
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import aiosqlite
from blspy import AugSchemeMPL, G1Element, PrivateKey
from chiabip158 import PyBIP158
from cryptography.fernet import Fernet
from chives import __version__
from chives.consensus.block_record import BlockRecord
from chives.consensus.coinbase import pool_parent_id, farmer_parent_id
from chives.consensus.constants import ConsensusConstants
from chives.consensus.find_fork_point import find_fork_point_in_chain
from chives.full_node.weight_proof import WeightProofHandler
from chives.protocols.wallet_protocol import PuzzleSolutionResponse, RespondPuzzleSolution
from chives.types.blockchain_format.coin import Coin
from chives.types.blockchain_format.program import Program
from chives.types.blockchain_format.sized_bytes import bytes32
from chives.types.full_block import FullBlock
from chives.types.header_block import HeaderBlock
from chives.types.mempool_inclusion_status import MempoolInclusionStatus
from chives.util.byte_types import hexstr_to_bytes
from chives.util.db_wrapper import DBWrapper
from chives.util.errors import Err
from chives.util.hash import std_hash
from chives.util.ints import uint32, uint64, uint128
from chives.wallet.block_record import HeaderBlockRecord
from chives.wallet.cc_wallet.cc_wallet import CCWallet
from chives.wallet.derivation_record import DerivationRecord
from chives.wallet.derive_keys import master_sk_to_backup_sk, master_sk_to_wallet_sk
from chives.wallet.key_val_store import KeyValStore
from chives.wallet.rl_wallet.rl_wallet import RLWallet
from chives.wallet.settings.user_settings import UserSettings
from chives.wallet.trade_manager import TradeManager
from chives.wallet.transaction_record import TransactionRecord
from chives.wallet.util.backup_utils import open_backup_file
from chives.wallet.util.transaction_type import TransactionType
from chives.wallet.util.wallet_types import WalletType
from chives.wallet.wallet import Wallet
from chives.wallet.wallet_action import WalletAction
from chives.wallet.wallet_action_store import WalletActionStore
from chives.wallet.wallet_block_store import WalletBlockStore
from chives.wallet.wallet_blockchain import WalletBlockchain
from chives.wallet.wallet_coin_record import WalletCoinRecord
from chives.wallet.wallet_coin_store import WalletCoinStore
from chives.wallet.wallet_info import WalletInfo, WalletInfoBackup
from chives.wallet.wallet_puzzle_store import WalletPuzzleStore
from chives.wallet.wallet_sync_store import WalletSyncStore
from chives.wallet.wallet_transaction_store import WalletTransactionStore
from chives.wallet.wallet_user_store import WalletUserStore
from chives.server.server import ChivesServer
from chives.wallet.did_wallet.did_wallet import DIDWallet
class WalletStateManager:
constants: ConsensusConstants
config: Dict
tx_store: WalletTransactionStore
puzzle_store: WalletPuzzleStore
user_store: WalletUserStore
action_store: WalletActionStore
basic_store: KeyValStore
start_index: int
# Makes sure only one asyncio thread is changing the blockchain state at one time
lock: asyncio.Lock
tx_lock: asyncio.Lock
log: logging.Logger
# TODO Don't allow user to send tx until wallet is synced
sync_mode: bool
genesis: FullBlock
state_changed_callback: Optional[Callable]
pending_tx_callback: Optional[Callable]
puzzle_hash_created_callbacks: Dict = defaultdict(lambda *x: None)
db_path: Path
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
main_wallet: Wallet
wallets: Dict[uint32, Any]
private_key: PrivateKey
trade_manager: TradeManager
new_wallet: bool
user_settings: UserSettings
blockchain: Any
block_store: WalletBlockStore
coin_store: WalletCoinStore
sync_store: WalletSyncStore
weight_proof_handler: Any
server: ChivesServer
@staticmethod
async def create(
private_key: PrivateKey,
config: Dict,
db_path: Path,
constants: ConsensusConstants,
server: ChivesServer,
name: str = None,
):
self = WalletStateManager()
self.new_wallet = False
self.config = config
self.constants = constants
self.server = server
self.log = logging.getLogger(name if name else __name__)
self.lock = asyncio.Lock()
self.log.debug(f"Starting in db path: {db_path}")
self.db_connection = await aiosqlite.connect(db_path)
self.db_wrapper = DBWrapper(self.db_connection)
self.coin_store = await WalletCoinStore.create(self.db_wrapper)
self.tx_store = await WalletTransactionStore.create(self.db_wrapper)
self.puzzle_store = await WalletPuzzleStore.create(self.db_wrapper)
self.user_store = await WalletUserStore.create(self.db_wrapper)
self.action_store = await WalletActionStore.create(self.db_wrapper)
self.basic_store = await KeyValStore.create(self.db_wrapper)
self.trade_manager = await TradeManager.create(self, self.db_wrapper)
self.user_settings = await UserSettings.create(self.basic_store)
self.block_store = await WalletBlockStore.create(self.db_wrapper)
self.blockchain = await WalletBlockchain.create(
self.block_store,
self.coin_store,
self.tx_store,
self.constants,
self.coins_of_interest_received,
self.reorg_rollback,
self.lock,
)
self.weight_proof_handler = WeightProofHandler(self.constants, self.blockchain)
self.sync_mode = False
self.sync_store = await WalletSyncStore.create()
self.state_changed_callback = None
self.pending_tx_callback = None
self.db_path = db_path
main_wallet_info = await self.user_store.get_wallet_by_id(1)
assert main_wallet_info is not None
self.private_key = private_key
self.main_wallet = await Wallet.create(self, main_wallet_info)
self.wallets = {main_wallet_info.id: self.main_wallet}
wallet = None
for wallet_info in await self.get_all_wallet_info_entries():
# self.log.info(f"wallet_info {wallet_info}")
if wallet_info.type == WalletType.STANDARD_WALLET:
if wallet_info.id == 1:
continue
wallet = await Wallet.create(config, wallet_info)
elif wallet_info.type == WalletType.COLOURED_COIN:
wallet = await CCWallet.create(
self,
self.main_wallet,
wallet_info,
)
elif wallet_info.type == WalletType.RATE_LIMITED:
wallet = await RLWallet.create(self, wallet_info)
elif wallet_info.type == WalletType.DISTRIBUTED_ID:
wallet = await DIDWallet.create(
self,
self.main_wallet,
wallet_info,
)
if wallet is not None:
self.wallets[wallet_info.id] = wallet
async with self.puzzle_store.lock:
index = await self.puzzle_store.get_last_derivation_path()
if index is None or index < self.config["initial_num_public_keys"] - 1:
await self.create_more_puzzle_hashes(from_zero=True)
return self
@property
def peak(self) -> Optional[BlockRecord]:
peak = self.blockchain.get_peak()
return peak
def get_derivation_index(self, pubkey: G1Element, max_depth: int = 1000) -> int:
for i in range(0, max_depth):
derived = self.get_public_key(uint32(i))
if derived == pubkey:
return i
return -1
def get_public_key(self, index: uint32) -> G1Element:
return master_sk_to_wallet_sk(self.private_key, index).get_g1()
async def load_wallets(self):
for wallet_info in await self.get_all_wallet_info_entries():
if wallet_info.id in self.wallets:
continue
if wallet_info.type == WalletType.STANDARD_WALLET:
if wallet_info.id == 1:
continue
wallet = await Wallet.create(self.config, wallet_info)
self.wallets[wallet_info.id] = wallet
# TODO add RL AND DiD WALLETS HERE
elif wallet_info.type == WalletType.COLOURED_COIN:
wallet = await CCWallet.create(
self,
self.main_wallet,
wallet_info,
)
self.wallets[wallet_info.id] = wallet
elif wallet_info.type == WalletType.DISTRIBUTED_ID:
wallet = await DIDWallet.create(
self,
self.main_wallet,
wallet_info,
)
self.wallets[wallet_info.id] = wallet
async def get_keys(self, puzzle_hash: bytes32) -> Optional[Tuple[G1Element, PrivateKey]]:
index_for_puzzlehash = await self.puzzle_store.index_for_puzzle_hash(puzzle_hash)
if index_for_puzzlehash is None:
raise ValueError(f"No key for this puzzlehash {puzzle_hash})")
private = master_sk_to_wallet_sk(self.private_key, index_for_puzzlehash)
pubkey = private.get_g1()
return pubkey, private
async def create_more_puzzle_hashes(self, from_zero: bool = False):
"""
For all wallets in the user store, generates the first few puzzle hashes so
that we can restore the wallet from only the private keys.
"""
targets = list(self.wallets.keys())
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
# This handles the case where the database has entries but they have all been used
unused = await self.puzzle_store.get_last_derivation_path()
if unused is None:
# This handles the case where the database is empty
unused = uint32(0)
if self.new_wallet:
to_generate = self.config["initial_num_public_keys_new_wallet"]
else:
to_generate = self.config["initial_num_public_keys"]
for wallet_id in targets:
target_wallet = self.wallets[wallet_id]
last: Optional[uint32] = await self.puzzle_store.get_last_derivation_path_for_wallet(wallet_id)
start_index = 0
derivation_paths: List[DerivationRecord] = []
if last is not None:
start_index = last + 1
# If the key was replaced (from_zero=True), we should generate the puzzle hashes for the new key
if from_zero:
start_index = 0
for index in range(start_index, unused + to_generate):
if WalletType(target_wallet.type()) == WalletType.RATE_LIMITED:
if target_wallet.rl_info.initialized is False:
break
wallet_type = target_wallet.rl_info.type
if wallet_type == "user":
rl_pubkey = G1Element.from_bytes(target_wallet.rl_info.user_pubkey)
else:
rl_pubkey = G1Element.from_bytes(target_wallet.rl_info.admin_pubkey)
rl_puzzle: Program = target_wallet.puzzle_for_pk(rl_pubkey)
puzzle_hash: bytes32 = rl_puzzle.get_tree_hash()
rl_index = self.get_derivation_index(rl_pubkey)
if rl_index == -1:
break
derivation_paths.append(
DerivationRecord(
uint32(rl_index),
puzzle_hash,
rl_pubkey,
target_wallet.type(),
uint32(target_wallet.id()),
)
)
break
pubkey: G1Element = self.get_public_key(uint32(index))
puzzle: Program = target_wallet.puzzle_for_pk(bytes(pubkey))
if puzzle is None:
self.log.warning(f"Unable to create puzzles with wallet {target_wallet}")
break
puzzlehash: bytes32 = puzzle.get_tree_hash()
self.log.info(f"Puzzle at index {index} wallet ID {wallet_id} puzzle hash {puzzlehash.hex()}")
derivation_paths.append(
DerivationRecord(
uint32(index),
puzzlehash,
pubkey,
target_wallet.type(),
uint32(target_wallet.id()),
)
)
await self.puzzle_store.add_derivation_paths(derivation_paths)
if unused > 0:
await self.puzzle_store.set_used_up_to(uint32(unused - 1))
async def update_wallet_puzzle_hashes(self, wallet_id):
derivation_paths: List[DerivationRecord] = []
target_wallet = self.wallets[wallet_id]
last: Optional[uint32] = await self.puzzle_store.get_last_derivation_path_for_wallet(wallet_id)
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
# This handles the case where the database has entries but they have all been used
unused = await self.puzzle_store.get_last_derivation_path()
if unused is None:
# This handles the case where the database is empty
unused = uint32(0)
for index in range(unused, last):
pubkey: G1Element = self.get_public_key(uint32(index))
puzzle: Program = target_wallet.puzzle_for_pk(bytes(pubkey))
puzzlehash: bytes32 = puzzle.get_tree_hash()
self.log.info(f"Generating public key at index {index} puzzle hash {puzzlehash.hex()}")
derivation_paths.append(
DerivationRecord(
uint32(index),
puzzlehash,
pubkey,
target_wallet.wallet_info.type,
uint32(target_wallet.wallet_info.id),
)
)
await self.puzzle_store.add_derivation_paths(derivation_paths)
async def get_unused_derivation_record(self, wallet_id: uint32) -> DerivationRecord:
"""
Creates a puzzle hash for the given wallet, and then makes more puzzle hashes
for every wallet to ensure we always have more in the database. Never reusue the
same public key more than once (for privacy).
"""
async with self.puzzle_store.lock:
# If we have no unused public keys, we will create new ones
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
await self.create_more_puzzle_hashes()
# Now we must have unused public keys
unused = await self.puzzle_store.get_unused_derivation_path()
assert unused is not None
record: Optional[DerivationRecord] = await self.puzzle_store.get_derivation_record(unused, wallet_id)
assert record is not None
# Set this key to used so we never use it again
await self.puzzle_store.set_used_up_to(record.index)
# Create more puzzle hashes / keys
await self.create_more_puzzle_hashes()
return record
async def get_current_derivation_record_for_wallet(self, wallet_id: uint32) -> Optional[DerivationRecord]:
async with self.puzzle_store.lock:
# If we have no unused public keys, we will create new ones
current: Optional[DerivationRecord] = await self.puzzle_store.get_current_derivation_record_for_wallet(
wallet_id
)
return current
def set_callback(self, callback: Callable):
"""
Callback to be called when the state of the wallet changes.
"""
self.state_changed_callback = callback
def set_pending_callback(self, callback: Callable):
"""
Callback to be called when new pending transaction enters the store
"""
self.pending_tx_callback = callback
def set_coin_with_puzzlehash_created_callback(self, puzzlehash, callback: Callable):
"""
Callback to be called when new coin is seen with specified puzzlehash
"""
self.puzzle_hash_created_callbacks[puzzlehash] = callback
async def puzzle_hash_created(self, coin: Coin):
callback = self.puzzle_hash_created_callbacks[coin.puzzle_hash]
if callback is None:
return None
await callback(coin)
def state_changed(self, state: str, wallet_id: int = None, data_object=None):
"""
Calls the callback if it's present.
"""
if data_object is None:
data_object = {}
if self.state_changed_callback is None:
return None
self.state_changed_callback(state, wallet_id, data_object)
def tx_pending_changed(self) -> None:
"""
Notifies the wallet node that there's new tx pending
"""
if self.pending_tx_callback is None:
return None
self.pending_tx_callback()
async def synced(self):
if self.sync_mode is True:
return False
peak: Optional[BlockRecord] = self.blockchain.get_peak()
if peak is None:
return False
curr = peak
while not curr.is_transaction_block and not curr.height == 0:
curr = self.blockchain.try_block_record(curr.prev_hash)
if curr is None:
return False
if curr.is_transaction_block and curr.timestamp > int(time.time()) - 7 * 60:
return True
return False
def set_sync_mode(self, mode: bool):
"""
Sets the sync mode. This changes the behavior of the wallet node.
"""
self.sync_mode = mode
self.state_changed("sync_changed")
async def get_confirmed_spendable_balance_for_wallet(self, wallet_id: int, unspent_records=None) -> uint128:
"""
Returns the balance amount of all coins that are spendable.
"""
spendable: Set[WalletCoinRecord] = await self.get_spendable_coins_for_wallet(wallet_id, unspent_records)
spendable_amount: uint128 = uint128(0)
for record in spendable:
spendable_amount = uint128(spendable_amount + record.coin.amount)
return spendable_amount
async def does_coin_belong_to_wallet(self, coin: Coin, wallet_id: int) -> bool:
"""
Returns true if we have the key for this coin.
"""
info = await self.puzzle_store.wallet_info_for_puzzle_hash(coin.puzzle_hash)
if info is None:
return False
coin_wallet_id, wallet_type = info
if wallet_id == coin_wallet_id:
return True
return False
async def get_confirmed_balance_for_wallet(
self, wallet_id: int, unspent_coin_records: Optional[Set[WalletCoinRecord]] = None
) -> uint128:
"""
Returns the confirmed balance, including coinbase rewards that are not spendable.
"""
# lock only if unspent_coin_records is None
if unspent_coin_records is None:
async with self.lock:
if unspent_coin_records is None:
unspent_coin_records = await self.coin_store.get_unspent_coins_for_wallet(wallet_id)
amount: uint128 = uint128(0)
for record in unspent_coin_records:
amount = uint128(amount + record.coin.amount)
self.log.info(f"Confirmed balance amount is {amount}")
return uint128(amount)
async def get_unconfirmed_balance(
self, wallet_id, unspent_coin_records: Optional[Set[WalletCoinRecord]] = None
) -> uint128:
"""
Returns the balance, including coinbase rewards that are not spendable, and unconfirmed
transactions.
"""
confirmed = await self.get_confirmed_balance_for_wallet(wallet_id, unspent_coin_records)
unconfirmed_tx: List[TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
removal_amount: int = 0
addition_amount: int = 0
for record in unconfirmed_tx:
for removal in record.removals:
removal_amount += removal.amount
for addition in record.additions:
# This change or a self transaction
if await self.does_coin_belong_to_wallet(addition, wallet_id):
addition_amount += addition.amount
result = confirmed - removal_amount + addition_amount
return uint128(result)
async def unconfirmed_additions_for_wallet(self, wallet_id: int) -> Dict[bytes32, Coin]:
"""
Returns change coins for the wallet_id.
(Unconfirmed addition transactions that have not been confirmed yet.)
"""
additions: Dict[bytes32, Coin] = {}
unconfirmed_tx = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
for record in unconfirmed_tx:
for coin in record.additions:
if await self.is_addition_relevant(coin):
additions[coin.name()] = coin
return additions
async def unconfirmed_removals_for_wallet(self, wallet_id: int) -> Dict[bytes32, Coin]:
"""
Returns new removals transactions that have not been confirmed yet.
"""
removals: Dict[bytes32, Coin] = {}
unconfirmed_tx = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
for record in unconfirmed_tx:
for coin in record.removals:
removals[coin.name()] = coin
return removals
async def coins_of_interest_received(self, removals: List[Coin], additions: List[Coin], height: uint32):
for coin in additions:
await self.puzzle_hash_created(coin)
trade_additions, added = await self.coins_of_interest_added(additions, height)
trade_removals, removed = await self.coins_of_interest_removed(removals, height)
if len(trade_additions) > 0 or len(trade_removals) > 0:
await self.trade_manager.coins_of_interest_farmed(trade_removals, trade_additions, height)
added_notified = set()
removed_notified = set()
for coin_record in added:
if coin_record.wallet_id in added_notified:
continue
added_notified.add(coin_record.wallet_id)
self.state_changed("coin_added", coin_record.wallet_id)
for coin_record in removed:
if coin_record.wallet_id in removed_notified:
continue
removed_notified.add(coin_record.wallet_id)
self.state_changed("coin_removed", coin_record.wallet_id)
async def coins_of_interest_added(
self, coins: List[Coin], height: uint32
) -> Tuple[List[Coin], List[WalletCoinRecord]]:
(
trade_removals,
trade_additions,
) = await self.trade_manager.get_coins_of_interest()
trade_adds: List[Coin] = []
block: Optional[BlockRecord] = await self.blockchain.get_block_record_from_db(
self.blockchain.height_to_hash(height)
)
assert block is not None
pool_rewards = set()
farmer_rewards = set()
added = []
prev = await self.blockchain.get_block_record_from_db(block.prev_hash)
# [block 1] [block 2] [tx block 3] [block 4] [block 5] [tx block 6]
# [tx block 6] will contain rewards for [block 1] [block 2] [tx block 3]
while prev is not None:
# step 1 find previous block
if prev.is_transaction_block:
break
prev = await self.blockchain.get_block_record_from_db(prev.prev_hash)
if prev is not None:
# include last block
pool_parent = pool_parent_id(uint32(prev.height), self.constants.GENESIS_CHALLENGE)
farmer_parent = farmer_parent_id(uint32(prev.height), self.constants.GENESIS_CHALLENGE)
pool_rewards.add(pool_parent)
farmer_rewards.add(farmer_parent)
prev = await self.blockchain.get_block_record_from_db(prev.prev_hash)
while prev is not None:
# step 2 traverse from previous block to the block before it
pool_parent = pool_parent_id(uint32(prev.height), self.constants.GENESIS_CHALLENGE)
farmer_parent = farmer_parent_id(uint32(prev.height), self.constants.GENESIS_CHALLENGE)
pool_rewards.add(pool_parent)
farmer_rewards.add(farmer_parent)
if prev.is_transaction_block:
break
prev = await self.blockchain.get_block_record_from_db(prev.prev_hash)
wallet_ids: Set[int] = set()
for coin in coins:
info = await self.puzzle_store.wallet_info_for_puzzle_hash(coin.puzzle_hash)
if info is not None:
wallet_ids.add(info[0])
all_outgoing_tx: Dict[int, List[TransactionRecord]] = {}
for wallet_id in wallet_ids:
all_outgoing_tx[wallet_id] = await self.tx_store.get_all_transactions_for_wallet(
wallet_id, TransactionType.OUTGOING_TX
)
for coin in coins:
if coin.name() in trade_additions:
trade_adds.append(coin)
is_coinbase = False
is_fee_reward = False
if coin.parent_coin_info in pool_rewards:
is_coinbase = True
if coin.parent_coin_info in farmer_rewards:
is_fee_reward = True
info = await self.puzzle_store.wallet_info_for_puzzle_hash(coin.puzzle_hash)
if info is not None:
wallet_id, wallet_type = info
added_coin_record = await self.coin_added(
coin, is_coinbase, is_fee_reward, uint32(wallet_id), wallet_type, height, all_outgoing_tx[wallet_id]
)
added.append(added_coin_record)
derivation_index = await self.puzzle_store.index_for_puzzle_hash(coin.puzzle_hash)
if derivation_index is not None:
await self.puzzle_store.set_used_up_to(derivation_index, True)
return trade_adds, added
async def coins_of_interest_removed(
self, coins: List[Coin], height: uint32
) -> Tuple[List[Coin], List[WalletCoinRecord]]:
# This gets called when coins of our interest are spent on chain
self.log.info(f"Coins removed {coins} at height: {height}")
(
trade_removals,
trade_additions,
) = await self.trade_manager.get_coins_of_interest()
# Keep track of trade coins that are removed
trade_coin_removed: List[Coin] = []
removed = []
all_unconfirmed: List[TransactionRecord] = await self.tx_store.get_all_unconfirmed()
for coin in coins:
record = await self.coin_store.get_coin_record(coin.name())
if coin.name() in trade_removals:
trade_coin_removed.append(coin)
if record is None:
self.log.info(f"Record for removed coin {coin.name()} is None. (ephemeral)")
continue
await self.coin_store.set_spent(coin.name(), height)
for unconfirmed_record in all_unconfirmed:
for rem_coin in unconfirmed_record.removals:
if rem_coin.name() == coin.name():
self.log.info(f"Setting tx_id: {unconfirmed_record.name} to confirmed")
await self.tx_store.set_confirmed(unconfirmed_record.name, height)
removed.append(record)
return trade_coin_removed, removed
async def coin_added(
self,
coin: Coin,
coinbase: bool,
fee_reward: bool,
wallet_id: uint32,
wallet_type: WalletType,
height: uint32,
all_outgoing_transaction_records: List[TransactionRecord],
) -> WalletCoinRecord:
"""
Adding coin to DB
"""
self.log.info(f"Adding coin: {coin} at {height}")
farm_reward = False
if coinbase or fee_reward:
farm_reward = True
now = uint64(int(time.time()))
if coinbase:
tx_type: int = TransactionType.COINBASE_REWARD.value
else:
tx_type = TransactionType.FEE_REWARD.value
tx_record = TransactionRecord(
confirmed_at_height=uint32(height),
created_at_time=now,
to_puzzle_hash=coin.puzzle_hash,
amount=coin.amount,
fee_amount=uint64(0),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=[coin],
removals=[],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(tx_type),
name=coin.name(),
)
await self.tx_store.add_transaction_record(tx_record, True)
else:
records: List[TransactionRecord] = []
for record in all_outgoing_transaction_records:
for add_coin in record.additions:
if add_coin.name() == coin.name():
records.append(record)
if len(records) > 0:
# This is the change from this transaction
for record in records:
if record.confirmed is False:
await self.tx_store.set_confirmed(record.name, height)
else:
now = uint64(int(time.time()))
tx_record = TransactionRecord(
confirmed_at_height=uint32(height),
created_at_time=now,
to_puzzle_hash=coin.puzzle_hash,
amount=coin.amount,
fee_amount=uint64(0),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=[coin],
removals=[],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(TransactionType.INCOMING_TX.value),
name=coin.name(),
)
if coin.amount > 0:
await self.tx_store.add_transaction_record(tx_record, True)
coin_record: WalletCoinRecord = WalletCoinRecord(
coin, height, uint32(0), False, farm_reward, wallet_type, wallet_id
)
await self.coin_store.add_coin_record(coin_record)
if wallet_type == WalletType.COLOURED_COIN or wallet_type == WalletType.DISTRIBUTED_ID:
wallet = self.wallets[wallet_id]
header_hash: bytes32 = self.blockchain.height_to_hash(height)
block: Optional[HeaderBlockRecord] = await self.block_store.get_header_block_record(header_hash)
assert block is not None
assert block.removals is not None
await wallet.coin_added(coin, header_hash, block.removals, height)
return coin_record
async def add_pending_transaction(self, tx_record: TransactionRecord):
"""
Called from wallet before new transaction is sent to the full_node
"""
if self.peak is None or int(time.time()) <= self.constants.INITIAL_FREEZE_END_TIMESTAMP:
raise ValueError("Initial Freeze Period")
# Wallet node will use this queue to retry sending this transaction until full nodes receives it
await self.tx_store.add_transaction_record(tx_record, False)
self.tx_pending_changed()
self.state_changed("pending_transaction", tx_record.wallet_id)
async def add_transaction(self, tx_record: TransactionRecord):
"""
Called from wallet to add transaction that is not being set to full_node
"""
await self.tx_store.add_transaction_record(tx_record, False)
self.state_changed("pending_transaction", tx_record.wallet_id)
async def remove_from_queue(
self,
spendbundle_id: bytes32,
name: str,
send_status: MempoolInclusionStatus,
error: Optional[Err],
):
"""
Full node received our transaction, no need to keep it in queue anymore
"""
updated = await self.tx_store.increment_sent(spendbundle_id, name, send_status, error)
if updated:
tx: Optional[TransactionRecord] = await self.get_transaction(spendbundle_id)
if tx is not None:
self.state_changed("tx_update", tx.wallet_id, {"transaction": tx})
async def get_send_queue(self) -> List[TransactionRecord]:
"""
Wallet Node uses this to retry sending transactions
"""
records = await self.tx_store.get_not_sent()
return records
async def get_all_transactions(self, wallet_id: int) -> List[TransactionRecord]:
"""
Retrieves all confirmed and pending transactions
"""
records = await self.tx_store.get_all_transactions_for_wallet(wallet_id)
return records
async def get_transaction(self, tx_id: bytes32) -> Optional[TransactionRecord]:
return await self.tx_store.get_transaction_record(tx_id)
async def get_filter_additions_removals(
self, new_block: HeaderBlock, transactions_filter: bytes, fork_point_with_peak: Optional[uint32]
) -> Tuple[List[bytes32], List[bytes32]]:
"""Returns a list of our coin ids, and a list of puzzle_hashes that positively match with provided filter."""
# assert new_block.prev_header_hash in self.blockchain.blocks
tx_filter = PyBIP158([b for b in transactions_filter])
# Find fork point
if fork_point_with_peak is not None:
fork_h: int = fork_point_with_peak
elif new_block.prev_header_hash != self.constants.GENESIS_CHALLENGE and self.peak is not None:
block_record = await self.blockchain.get_block_record_from_db(self.peak.header_hash)
# this may return -1, in case there is no shared ancestor block
fork_h = find_fork_point_in_chain(
self.blockchain,
block_record,
new_block,
)
else:
fork_h = 0
# Get all unspent coins
my_coin_records: Set[WalletCoinRecord] = await self.coin_store.get_unspent_coins_at_height(
uint32(fork_h) if fork_h >= 0 else None
)
# Filter coins up to and including fork point
unspent_coin_names: Set[bytes32] = set()
for coin in my_coin_records:
if coin.confirmed_block_height <= fork_h:
unspent_coin_names.add(coin.name())
# Get all blocks after fork point up to but not including this block
if new_block.height > 0:
curr: BlockRecord = self.blockchain.block_record(new_block.prev_hash)
reorg_blocks: List[HeaderBlockRecord] = []
while curr.height > fork_h:
header_block_record = await self.block_store.get_header_block_record(curr.header_hash)
assert header_block_record is not None
reorg_blocks.append(header_block_record)
if curr.height == 0:
break
curr = await self.blockchain.get_block_record_from_db(curr.prev_hash)
reorg_blocks.reverse()
# For each block, process additions to get all Coins, then process removals to get unspent coins
for reorg_block in reorg_blocks:
for addition in reorg_block.additions:
unspent_coin_names.add(addition.name())
for removal in reorg_block.removals:
record = await self.puzzle_store.get_derivation_record_for_puzzle_hash(removal.puzzle_hash)
if record is None:
continue
unspent_coin_names.remove(removal)
my_puzzle_hashes = self.puzzle_store.all_puzzle_hashes
removals_of_interest: bytes32 = []
additions_of_interest: bytes32 = []
(
trade_removals,
trade_additions,
) = await self.trade_manager.get_coins_of_interest()
for name, trade_coin in trade_removals.items():
if tx_filter.Match(bytearray(trade_coin.name())):
removals_of_interest.append(trade_coin.name())
for name, trade_coin in trade_additions.items():
if tx_filter.Match(bytearray(trade_coin.puzzle_hash)):
additions_of_interest.append(trade_coin.puzzle_hash)
for coin_name in unspent_coin_names:
if tx_filter.Match(bytearray(coin_name)):
removals_of_interest.append(coin_name)
for puzzle_hash in my_puzzle_hashes:
if tx_filter.Match(bytearray(puzzle_hash)):
additions_of_interest.append(puzzle_hash)
return additions_of_interest, removals_of_interest
async def get_relevant_additions(self, additions: List[Coin]) -> List[Coin]:
"""Returns the list of coins that are relevant to us.(We can spend them)"""
result: List[Coin] = []
my_puzzle_hashes: Set[bytes32] = self.puzzle_store.all_puzzle_hashes
for coin in additions:
if coin.puzzle_hash in my_puzzle_hashes:
result.append(coin)
return result
async def is_addition_relevant(self, addition: Coin):
"""
Check whether we care about a new addition (puzzle_hash). Returns true if we
control this puzzle hash.
"""
result = await self.puzzle_store.puzzle_hash_exists(addition.puzzle_hash)
return result
async def get_wallet_for_coin(self, coin_id: bytes32) -> Any:
coin_record = await self.coin_store.get_coin_record(coin_id)
if coin_record is None:
return None
wallet_id = uint32(coin_record.wallet_id)
wallet = self.wallets[wallet_id]
return wallet
async def get_relevant_removals(self, removals: List[Coin]) -> List[Coin]:
"""Returns a list of our unspent coins that are in the passed list."""
result: List[Coin] = []
wallet_coin_records = await self.coin_store.get_unspent_coins_at_height()
my_coins: Dict[bytes32, Coin] = {r.coin.name(): r.coin for r in list(wallet_coin_records)}
for coin in removals:
if coin.name() in my_coins:
result.append(coin)
return result
async def reorg_rollback(self, height: int):
"""
Rolls back and updates the coin_store and transaction store. It's possible this height
is the tip, or even beyond the tip.
"""
await self.coin_store.rollback_to_block(height)
reorged: List[TransactionRecord] = await self.tx_store.get_transaction_above(height)
await self.tx_store.rollback_to_block(height)
await self.retry_sending_after_reorg(reorged)
async def retry_sending_after_reorg(self, records: List[TransactionRecord]):
"""
Retries sending spend_bundle to the Full_Node, after confirmed tx
get's excluded from chain because of the reorg.
"""
if len(records) == 0:
return None
for record in records:
if record.type in [
TransactionType.OUTGOING_TX,
TransactionType.OUTGOING_TRADE,
TransactionType.INCOMING_TRADE,
]:
await self.tx_store.tx_reorged(record)
self.tx_pending_changed()
async def close_all_stores(self) -> None:
if self.blockchain is not None:
self.blockchain.shut_down()
await self.db_connection.close()
async def clear_all_stores(self):
await self.coin_store._clear_database()
await self.tx_store._clear_database()
await self.puzzle_store._clear_database()
await self.user_store._clear_database()
await self.basic_store._clear_database()
def unlink_db(self):
Path(self.db_path).unlink()
async def get_all_wallet_info_entries(self) -> List[WalletInfo]:
return await self.user_store.get_all_wallet_info_entries()
async def get_start_height(self):
"""
If we have coin use that as starting height next time,
otherwise use the peak
"""
first_coin_height = await self.coin_store.get_first_coin_height()
if first_coin_height is None:
start_height = self.blockchain.get_peak()
else:
start_height = first_coin_height
return start_height
async def create_wallet_backup(self, file_path: Path):
all_wallets = await self.get_all_wallet_info_entries()
for wallet in all_wallets:
if wallet.id == 1:
all_wallets.remove(wallet)
break
backup_pk = master_sk_to_backup_sk(self.private_key)
now = uint64(int(time.time()))
wallet_backup = WalletInfoBackup(all_wallets)
backup: Dict[str, Any] = {}
data = wallet_backup.to_json_dict()
data["version"] = __version__
data["fingerprint"] = self.private_key.get_g1().get_fingerprint()
data["timestamp"] = now
data["start_height"] = await self.get_start_height()
key_base_64 = base64.b64encode(bytes(backup_pk))
f = Fernet(key_base_64)
data_bytes = json.dumps(data).encode()
encrypted = f.encrypt(data_bytes)
meta_data: Dict[str, Any] = {"timestamp": now, "pubkey": bytes(backup_pk.get_g1()).hex()}
meta_data_bytes = json.dumps(meta_data).encode()
signature = bytes(AugSchemeMPL.sign(backup_pk, std_hash(encrypted) + std_hash(meta_data_bytes))).hex()
backup["data"] = encrypted.decode()
backup["meta_data"] = meta_data
backup["signature"] = signature
backup_file_text = json.dumps(backup)
file_path.write_text(backup_file_text)
async def import_backup_info(self, file_path) -> None:
json_dict = open_backup_file(file_path, self.private_key)
wallet_list_json = json_dict["data"]["wallet_list"]
for wallet_info in wallet_list_json:
await self.user_store.create_wallet(
wallet_info["name"],
wallet_info["type"],
wallet_info["data"],
wallet_info["id"],
)
await self.load_wallets()
await self.user_settings.user_imported_backup()
await self.create_more_puzzle_hashes(from_zero=True)
async def get_wallet_for_colour(self, colour):
for wallet_id in self.wallets:
wallet = self.wallets[wallet_id]
if wallet.type() == WalletType.COLOURED_COIN:
if bytes(wallet.cc_info.my_genesis_checker).hex() == colour:
return wallet
return None
async def add_new_wallet(self, wallet: Any, wallet_id: int):
self.wallets[uint32(wallet_id)] = wallet
await self.create_more_puzzle_hashes()
# search through the blockrecords and return the most recent coin to use a given puzzlehash
async def search_blockrecords_for_puzzlehash(self, puzzlehash: bytes32):
header_hash_of_interest = None
heighest_block_height = 0
peak: Optional[BlockRecord] = self.blockchain.get_peak()
if peak is None:
return None, None
peak_block: Optional[HeaderBlockRecord] = await self.blockchain.block_store.get_header_block_record(
peak.header_hash
)
while peak_block is not None:
tx_filter = PyBIP158([b for b in peak_block.header.transactions_filter])
if tx_filter.Match(bytearray(puzzlehash)) and peak_block.height > heighest_block_height:
header_hash_of_interest = peak_block.header_hash
heighest_block_height = peak_block.height
break
else:
peak_block = await self.blockchain.block_store.get_header_block_record(
peak_block.header.prev_header_hash
)
return heighest_block_height, header_hash_of_interest
async def get_spendable_coins_for_wallet(self, wallet_id: int, records=None) -> Set[WalletCoinRecord]:
if self.peak is None:
return set()
if records is None:
records = await self.coin_store.get_unspent_coins_for_wallet(wallet_id)
# Coins that are currently part of a transaction
unconfirmed_tx: List[TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
removal_dict: Dict[bytes32, Coin] = {}
for tx in unconfirmed_tx:
for coin in tx.removals:
# TODO, "if" might not be necessary once unconfirmed tx doesn't contain coins for other wallets
if await self.does_coin_belong_to_wallet(coin, wallet_id):
removal_dict[coin.name()] = coin
# Coins that are part of the trade
offer_locked_coins: Dict[bytes32, WalletCoinRecord] = await self.trade_manager.get_locked_coins()
filtered = set()
for record in records:
if record.coin.name() in offer_locked_coins:
continue
if record.coin.name() in removal_dict:
continue
filtered.add(record)
return filtered
async def create_action(
self, name: str, wallet_id: int, wallet_type: int, callback: str, done: bool, data: str, in_transaction: bool
):
await self.action_store.create_action(name, wallet_id, wallet_type, callback, done, data, in_transaction)
self.tx_pending_changed()
async def set_action_done(self, action_id: int):
await self.action_store.action_done(action_id)
async def generator_received(self, height: uint32, header_hash: uint32, program: Program):
actions: List[WalletAction] = await self.action_store.get_all_pending_actions()
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_generator":
stored_header_hash = bytes32(hexstr_to_bytes(action_data["header_hash"]))
stored_height = uint32(action_data["height"])
if stored_header_hash == header_hash and stored_height == height:
if action.done:
return None
wallet = self.wallets[uint32(action.wallet_id)]
callback_str = action.wallet_callback
if callback_str is not None:
callback = getattr(wallet, callback_str)
await callback(height, header_hash, program, action.id)
async def puzzle_solution_received(self, response: RespondPuzzleSolution):
unwrapped: PuzzleSolutionResponse = response.response
actions: List[WalletAction] = await self.action_store.get_all_pending_actions()
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_puzzle_solution":
stored_coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"]))
height = uint32(action_data["height"])
if stored_coin_name == unwrapped.coin_name and height == unwrapped.height:
if action.done:
return None
wallet = self.wallets[uint32(action.wallet_id)]
callback_str = action.wallet_callback
if callback_str is not None:
callback = getattr(wallet, callback_str)
await callback(unwrapped, action.id)
| 42.046794
| 120
| 0.638762
|
d0ab2fa07ec216f8af2558d0b182fc6b664345b5
| 61,891
|
py
|
Python
|
metalpipe/node.py
|
zacernst/nanostream
|
382389b09c42b55c6bdb64c7b0017d4810c7165f
|
[
"MIT"
] | 2
|
2019-04-12T19:32:55.000Z
|
2019-12-24T16:50:09.000Z
|
metalpipe/node.py
|
zacernst/metalpipe
|
382389b09c42b55c6bdb64c7b0017d4810c7165f
|
[
"MIT"
] | 10
|
2019-04-03T01:25:52.000Z
|
2019-12-16T05:09:35.000Z
|
metalpipe/node.py
|
zacernst/nanostream
|
382389b09c42b55c6bdb64c7b0017d4810c7165f
|
[
"MIT"
] | 1
|
2019-04-17T12:55:19.000Z
|
2019-04-17T12:55:19.000Z
|
"""
Node module
===========
The ``node`` module contains the ``MetalNode`` class, which is the foundation
for MetalPipe.
"""
import time
import datetime
import uuid
import importlib
import logging
import os
import threading
import pprint
import sys
import copy
import random
import functools
import csv
import MySQLdb
import re
import io
import yaml
import types
import inspect
import prettytable
import requests
import graphviz
from timed_dict.timed_dict import TimedDict
from metalpipe.message.batch import BatchStart, BatchEnd
from metalpipe.message.message import MetalPipeMessage
from metalpipe.node_queue.queue import MetalPipeQueue
from metalpipe.message.canary import Canary
from metalpipe.utils.set_attributes import set_kwarg_attributes
from metalpipe.utils.data_structures import Row, MySQLTypeSystem
from metalpipe.utils import data_structures as ds
# from metalpipe.metalpipe_recorder import RedisFixturizer
from metalpipe.utils.helpers import (
load_function,
replace_by_path,
remap_dictionary,
set_value,
get_value,
to_bool,
aggregate_values,
)
DEFAULT_MAX_QUEUE_SIZE = int(os.environ.get("DEFAULT_MAX_QUEUE_SIZE", 128))
MONITOR_INTERVAL = 1
STATS_COUNTER_MODULO = 4
LOGJAM_THRESHOLD = 0.25
SHORT_DELAY = 0.1
PROMETHEUS = False
def no_op(*args, **kwargs):
"""
No-op function to serve as default ``get_runtime_attrs``.
"""
return None
class bcolors:
"""
This class holds the values for the various colors that are used in the
tables that monitor the status of the nodes.
"""
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
class NothingToSeeHere:
"""
Vacuous class used as a no-op message type.
"""
pass
class Terminated:
"""
Class sent optionally when a node is done processing messages (i.e. when its
upstream nodes have finished.)
"""
def __init__(self, node):
self.node = node
class MetalNode:
"""
The foundational class of `MetalPipe`. This class is inherited by all
nodes in a computation graph.
Order of operations:
1. Child class ``__init__`` function
2. ``MetalNode`` ``__init__`` function
3. ``preflight_function`` (Specified in initialization params)
4. ``setup``
5. start
These methods have the following intended uses:
1. ``__init__`` Sets attribute values and calls the ``MetalNode`` ``__init__``
method.
2. ``get_runtime_attrs`` Sets any attribute values that are to be determined
at runtime, e.g. by checking environment variables or reading values
from a database. The ``get_runtime_attrs`` should return a dictionary
of attributes -> values, or else ``None``.
3. ``setup`` Sets the state of the ``MetalNode`` and/or creates any attributes
that require information available only at runtime.
Args:
send_batch_markers: If ``True``, then a ``BatchStart`` marker will
be sent when a new input is received, and a ``BatchEnd`` will be sent
after the input has been processed. The intention is that a number of
items will be emitted for each input received. For example, we might
emit a table row-by-row for each input.
get_runtime_attrs: A function that returns a dictionary-like object.
The keys and values will be saved to this ``MetalNode`` object's
attributes. The function is executed one time, upon starting the node.
get_runtime_attrs_args: A tuple of arguments to be passed to the
``get_runtime_attrs`` function upon starting the node.
get_runtime_attrs_kwargs: A dictionary of kwargs passed to the
``get_runtime_attrs`` function.
runtime_attrs_destinations: If set, this is a dictionary mapping
the keys returned from the ``get_runtime_attrs`` function to the
names of the attributes to which the values will be saved.
throttle: For each input received, a delay of ``throttle`` seconds
will be added.
keep_alive: If ``True``, keep the node's thread alive after
everything has been processed.
name: The name of the node. Defaults to a randomly generated hash.
Note that this hash is not consistent from one run to the next.
input_mapping: When the node receives a dictionary-like object,
this dictionary will cause the keys of the dictionary to be remapped
to new keys.
retain_input: If ``True``, then combine the dictionary-like input
with the output. If keys clash, the output value will be kept.
input_message_keypath: Read the value in this keypath as the content
of the incoming message.
"""
def __init__(
self,
*args,
batch=False,
get_runtime_attrs=no_op,
get_runtime_attrs_args=None,
get_runtime_attrs_kwargs=None,
runtime_attrs_destinations=None,
input_mapping=None,
retain_input=True,
throttle=0,
keep_alive=True,
max_errors=0,
max_messages_received=None,
name=None,
input_message_keypath=None,
key=None,
messages_received_counter=0,
prefer_existing_value=False,
messages_sent_counter=0,
post_process_function=None,
post_process_keypath=None,
summary="",
fixturize=False,
post_process_function_kwargs=None,
output_key=None,
break_test=None,
send_termination_message=False,
**kwargs
):
self.name = name or uuid.uuid4().hex
self.input_mapping = input_mapping or {}
self.input_queue_list = []
self.output_queue_list = []
self.input_node_list = []
self.queue_event = threading.Event()
self.input_message_keypath = input_message_keypath or []
self.output_node_list = []
self.max_messages_received = max_messages_received
self.global_dict = None # We'll add a dictionary upon startup
self.terminate = False
self.thread_dict = {}
self.kill_thread = False
self.prefer_existing_value = prefer_existing_value
self.accumulator = {}
self.output_key = output_key
self.fixturize = fixturize
self.keep_alive = keep_alive
self.retain_input = (
retain_input # Keep the input dictionary and send it downstream
)
if break_test is not None:
self.break_test = load_function(break_test)
else:
self.break_test = None
self.throttle = throttle
self.get_runtime_attrs = get_runtime_attrs
self.get_runtime_attrs_args = get_runtime_attrs_args or tuple()
self.cleanup_called = False
self.get_runtime_attrs_kwargs = get_runtime_attrs_kwargs or {}
self.runtime_attrs_destinations = runtime_attrs_destinations or {}
self.key = key
self.messages_received_counter = messages_received_counter
self.messages_sent_counter = messages_sent_counter
self.instantiated_at = datetime.datetime.now()
self.started_at = None
self.stopped_at = None
self.error_counter = 0
self.status = "stopped" # running, error, success
self.max_errors = max_errors
self.post_process_function_name = (
post_process_function # Function to be run on result
)
self.post_process_function_kwargs = post_process_function_kwargs or {}
self.summary = summary
self.prometheus_objects = None
self.logjam_score = {"polled": 0.0, "logjam": 0.0}
self.send_termination_message = send_termination_message
# Get post process function if one is named
if self.post_process_function_name is not None:
components = self.post_process_function_name.split("__")
if len(components) == 1:
module = None
function_name = components[0]
self.post_process_function = globals()[function_name]
else:
module = ".".join(components[:-1])
function_name = components[-1]
module = importlib.import_module(module)
self.post_process_function = getattr(module, function_name)
else:
self.post_process_function = None
self.post_process_keypath = (
post_process_keypath.split(".")
if post_process_keypath is not None
else None
)
if self.fixturize:
self.fixturizer = RedisFixturizer()
else:
self.fixturizer = None
def setup(self):
"""
For classes that require initialization at runtime, which can't be done
when the class's ``__init__`` function is called. The ``MetalNode`` base
class's setup function is just a logging call.
It should be unusual to have to make use of ``setup`` because in practice,
initialization can be done in the ``__init__`` function.
"""
logging.debug(
"No ``setup`` method for {class_name}.".format(
class_name=self.__class__.__name__
)
)
pass
def __gt__(self, other):
"""
Convenience method so that we can link two nodes by ``node1 > node2``.
This just calls ``add_edge``.
"""
self.add_edge(other)
return other
@property
def is_source(self):
"""
Tests whether the node is a source or not, i.e. whether there are no
inputs to the node.
Returns:
(bool): ``True`` if the node has no inputs, ``False`` otherwise.
"""
return len(self.input_queue_list) == 0
@property
def is_sink(self):
"""
Tests whether the node is a sink or not, i.e. whether there are no
outputs from the node.
Returns:
(bool): ``True`` if the node has no output nodes, ``False`` otherwise.
"""
return len(self.output_queue_list) == 0
def add_edge(self, target, **kwargs):
"""
Create an edge connecting `self` to `target`.
This method instantiates the ``MetalPipeQueue`` object that connects the
nodes. Connecting the nodes together consists in (1) adding the queue to
the other's ``input_queue_list`` or ``output_queue_list`` and (2) setting
the queue's ``source_node`` and ``target_node`` attributes.
Args:
target (``MetalNode``): The node to which ``self`` will be connected.
"""
max_queue_size = kwargs.get("max_queue_size", DEFAULT_MAX_QUEUE_SIZE)
edge_queue = MetalPipeQueue(max_queue_size)
self.output_node_list.append(target)
target.input_node_list.append(self)
edge_queue.source_node = self
edge_queue.target_node = target
target.input_queue_list.append(edge_queue)
self.output_queue_list.append(edge_queue)
def _get_message_content(self, one_item):
# Get the content of a specific keypath, if one has
# been defined in the ``MetalNode`` initialization.
message_content = (
get_value(one_item.message_content, self.input_message_keypath)
if len(self.input_message_keypath) > 0
else one_item.message_content
)
if (
isinstance(message_content, (dict,))
and len(message_content) == 1
and "__value__" in message_content
):
message_content = message_content["__value__"]
return message_content
def wait_for_pipeline_finish(self):
while not self.pipeline_finished:
time.sleep(SHORT_DELAY)
def start(self):
"""
Starts the node. This is called by ``MetalNode.global_start()``.
The node's main loop is contained in this method. The main loop does
the following:
1. records the timestamp to the node's ``started_at`` attribute.
#. calls ``get_runtime_attrs`` (TODO: check if we can deprecate this)
#. calls the ``setup`` method for the class (which is a no-op by default)
#. if the node is a source, then successively yield all the results of
the node's ``generator`` method, then exit.
#. if the node is not a source, then loop over the input queues, getting
the next message. Note that when the message is pulled from the queue,
the ``MetalPipeQueue`` yields it as a dictionary.
#. gets either the content of the entire message if the node has no ``key``
attribute, or the value of ``message[self.key]``.
#. remaps the message content if a ``remapping`` dictionary has been
given in the node's configuration
#. calls the node's ``process_item`` method, yielding back the results.
(Note that a single input message may cause the node to yield zero,
one, or more than one output message.)
#. places the results into each of the node's output queues.
"""
self.started_at = datetime.datetime.now()
logging.debug("Starting node: {node}".format(node=self.__class__.__name__))
# ``get_runtime_attrs`` returns a dict-like object whose keys and
# values are stored as attributes of the ``MetalNode`` object.
if self.get_runtime_attrs is not None:
pre_flight_results = (
self.get_runtime_attrs(
*self.get_runtime_attrs_args, **self.get_runtime_attrs_kwargs
)
or {}
)
if self.runtime_attrs_destinations is not None:
for key, value in pre_flight_results.items():
setattr(self, self.runtime_attrs_destinations[key], value)
elif self.runtime_attrs_destinations is None:
for key, value in pre_flight_results.items():
setattr(self, key, value)
else:
raise Exception(
"There is a ``get_runtime_attrs``, but the "
"``runtime_attrs_destinations`` is neither None nor a "
"dict-like object."
)
# We have to separate the pre-flight function, the setup of the
# class, and any necessary startup functions (such as connecting
# to a database).
self.setup() # Setup function?
if self.is_source and not isinstance(self, (DynamicClassMediator,)):
for output in self.generator():
if self.fixturizer:
self.fixturizer.record_source_node(self, output)
yield output, None
for output in self._cleanup():
yield output, None
else:
logging.debug(
"About to enter loop for reading input queue in {node}.".format(
node=str(self)
)
)
# insert conditions for having no more messages to read...
upstream_nodes_finished = all(
input_node.cleanup_called for input_node in self.input_node_list
)
input_queues_empty = self.is_source or self.input_queues_empty()
while not (upstream_nodes_finished and input_queues_empty):
for input_queue in self.input_queue_list:
one_item = input_queue.get()
####
if self.terminate:
# self.finished = True
break
if one_item is None:
continue
# Keep track of where the message came from, useful for
# managing streaming joins, e.g.
message_source = input_queue.source_node
self.messages_received_counter += 1
if (
self.max_messages_received is not None
and self.messages_received_counter > self.max_messages_received
):
self.finished = True
break
# The ``throttle`` keyword introduces a delay in seconds
time.sleep(self.throttle)
# Retrieve the ``message_content``
message_content = self._get_message_content(one_item)
# If we receive ``None`` or a ``NothingToSeeHere``, continue.
if message_content is None or isinstance(
message_content, (NothingToSeeHere,)
):
continue
# Record the message and its source in the node's attributes
self.message = message_content
self.message_source = message_source
# Otherwise, process the message as usual, by calling
# the ``MetalNode`` object's ``process_item`` method.
for output in self._process_item():
# Put redis recording here
if self.fixturizer:
self.fixturizer.record_worker_node(self, one_item, output)
yield output, one_item # yield previous message
### Do the self.break_test() if it's been defined
### Execute the function and break
### if it returns True
if self.break_test is not None and not self.finished:
self.log_info("running break_test.")
break_test_result = self.break_test(
output_message=output, input_message=self.__message__,
)
self.log_info("NODE BREAK TEST: " + str(break_test_result))
# self.finished = break_test_result
# Check input node(s) here to see if they're all ``.finished``
upstream_nodes_finished = all(
input_node.cleanup_called for input_node in self.input_node_list
)
input_queues_empty = self.is_source or self.input_queues_empty()
self.log_info("checking whether cleanup is a generator. " + str(self.name))
for i in self._cleanup():
yield i, None
@property
def upstream_nodes_finished(self):
return all(input_node.cleanup_called for input_node in self.input_node_list)
@property
def finished(self):
"""
A node is considered "finished" if:
1. All of its immediate parents are "finished" (including if the node
is a generator and has no parents);
2. All of its input queues are empty;
3. It is not processing any messages;
4. Its ``cleanup`` method (if any) has been called.
Alternatively, a node is forced to be in a "finished" state if the
pipeline is being terminated. This causes each node's ``terminate``
attribute to be set to ``True``.
"""
input_queues_empty = self.is_source or self.input_queues_empty()
return (
self.upstream_nodes_finished and input_queues_empty and self.cleanup_called
) or self.terminate
def input_queues_empty(self):
"""
Tests whether there are any messages on any of the node's input
queues.
Returns:
bool: ``True`` if input queues are all empty.
"""
return all(queue.empty for queue in self.input_queue_list)
def cleanup(self):
"""
If there is any cleanup (closing files, shutting down database connections),
necessary when the node is stopped, then the node's class should provide
a ``cleanup`` method. By default, the method is just a logging statement.
"""
self.log_info("in null cleanup")
yield NothingToSeeHere()
def _cleanup(self):
self.log_info("Cleanup called after shutdown.")
for i in self.cleanup():
yield i
# Send termination message here
if self.send_termination_message:
yield Terminated(self)
for q in self.output_queue_list:
while not q.empty:
pass
self.log_info("setting cleanup_called to True")
self.cleanup_called = True
def log_info(self, message=""):
logging.info(
"{node_name}: {message}".format(node_name=self.name, message=message)
)
def terminate_pipeline(self, error=False):
"""
This method can be called on any node in a pipeline, and it will cause
all of the nodes to terminate if they haven't stopped already.
Args:
error (bool): Not yet implemented.
"""
self.log_info("terminate_pipeline called..." + str(self.name))
for node in self.all_connected():
node.terminate = True
for q in node.output_queue_list:
q.drain()
# if not node.finished:
# node.stopped_at = datetime.datetime.now()
# print('setting node.terminate')
# node.terminate = True
def process_item(self, *args, **kwargs):
"""
Default no-op for nodes.
"""
pass
def generator(self):
"""
If there is no ``generator`` method, then call the node's ``process_item``
method instead, assuming that there is code to accommodate this case.
"""
for i in self.process_item():
yield i
@property
def __message__(self):
"""
If the node has an ``output_key`` defined, return the corresponding
value in the message dictionary. If it does not, return the entire
message dictionary.
Nodes should access the content of their incoming message via this
property.
"""
if self.key is None:
out = self.message
elif isinstance(self.key, (str,)):
out = self.message[self.key]
elif isinstance(self.key, (list,)):
out = get_value(self.message, self.key)
else:
raise Exception("Bad type for input key.")
return out
def _process_item(self, *args, **kwargs):
"""
This method wraps the node's ``process_item`` method. It provides a place
to insert code for logging, error handling, etc.
There's lots of experimental code here, particularly the code for
Prometheus monitoring.
"""
# Swap out the message if ``key`` is specified
# If we're using prometheus, then increment a counter
if self.prometheus_objects is not None:
self.prometheus_objects["incoming_message_summary"].observe(random.random())
message_arrival_time = time.time()
try:
for out in self.process_item(*args, **kwargs):
if (
not isinstance(out, (dict, NothingToSeeHere))
and self.output_key is None
):
logging.debug("Exception raised due to no key" + str(self.name))
raise Exception(
"Either message must be a dictionary or `output_key` "
"must be specified. {name}".format(self.name)
)
# Apply post_process_function if it's defined
if self.post_process_function is not None:
set_value(
out,
self.post_process_keypath,
self.post_process_function(
get_value(out, self.post_process_keypath),
**self.post_process_function_kwargs
),
)
if self.prometheus_objects is not None:
self.prometheus_objects["outgoing_message_summary"].set(
time.time() - message_arrival_time
)
yield out
except Exception as err:
self.error_counter += 1
logging.error(
"message: "
+ str(err.args)
+ str(self.__class__.__name__)
+ str(self.name)
)
if self.error_counter > self.max_errors:
self.terminate_pipeline(error=True)
self.status = "error" #
else:
logging.warning("oops")
def stream(self):
"""
Called in each ``MetalNode`` thread.
"""
self.status = "running"
if getattr(self, "_import_pydatalog", False):
from pyDatalog import pyDatalog, Logic
Logic(self.logic_engine)
try:
for output, previous_message in self.start():
logging.debug("In MetalNode.stream.stream() --> " + str(output))
for output_queue in self.output_queue_list:
self.messages_sent_counter += 1
output_queue.put(
output,
block=True,
timeout=None,
queue_event=self.queue_event,
previous_message=previous_message,
)
# if 1 or not isinstance(output, (NothingToSeeHere,)) and output is not None:
except Exception as error:
self.status = "error"
self.stopped_at = datetime.datetime.now()
raise error
self.status = "success"
self.stopped_at = datetime.datetime.now()
@property
def time_running(self):
"""
Return the number of wall-clock seconds elapsed since the node was
started.
"""
if self.status == "stopped":
return None
elif self.status == "running":
return datetime.datetime.now() - self.started_at
elif self.stopped_at is None:
return datetime.datetime.now() - self.started_at
else:
return self.stopped_at - self.started_at
def all_connected(self, seen=None):
"""
Returns all the nodes connected (directly or indirectly) to ``self``.
This allows us to loop over all the nodes in a pipeline even if we
have a handle on only one. This is used by ``global_start``, for
example.
Args:
seen (set): A set of all the nodes that have been identified as
connected to ``self``.
Returns:
(set of ``MetalNode``): All the nodes connected to ``self``. This
includes ``self``.
"""
seen = seen or set()
if isinstance(self, (DynamicClassMediator,)):
for node_name, node_dict in self.node_dict.items():
node_obj = node_dict["obj"]
seen = seen | node_obj.all_connected(seen=seen)
else:
if self not in seen:
seen.add(self)
for node in self.input_node_list + self.output_node_list:
if node in seen:
continue
seen.add(node)
seen = seen | node.all_connected(seen=seen)
return seen
def broadcast(self, broadcast_message):
"""
Puts the message into all the input queues for all connected nodes.
"""
for node in self.all_connected():
for input_queue in node.input_queue_list:
input_queue.put(broadcast_message)
@property
def logjam(self):
"""
Returns the logjam score, which measures the degree to which the
node is holding up progress in downstream nodes.
We're defining a logjam as a node whose input queue is full, but
whose output queue(s) is not. More specifically, we poll each node
in the ``monitor_thread``, and increment a counter if the node is
a logjam at that time. This property returns the percentage of
samples in which the node is a logjam. Our intention is that if
this score exceeds a threshold, the user is alerted, or the load
is rebalanced somehow (not yet implemented).
Returns:
(float): Logjam score
"""
if self.logjam_score["polled"] == 0:
return 0.0
else:
return self.logjam_score["logjam"] / self.logjam_score["polled"]
def global_start(
self, prometheus=False, pipeline_name=None, max_time=None, fixturize=False,
):
"""
Starts every node connected to ``self``. Mainly, it:
1. calls ``start()`` on each node
#. sets some global variables
#. optionally starts some experimental code for monitoring
"""
def prometheus_init():
"""
Experimental code for enabling Prometheus monitoring.
"""
from prometheus_client import (
start_http_server,
Summary,
Gauge,
Histogram,
Counter,
)
for node in self.all_connected():
node.prometheus_objects = {}
summary = Summary(
node.name + "_incoming", "Summary of incoming messages"
)
node.prometheus_objects["incoming_message_summary"] = summary
node.prometheus_objects["outgoing_message_summary"] = Gauge(
node.name + "_outgoing", "Summary of outgoing messages"
)
start_http_server(8000)
if PROMETHEUS:
prometheus_init()
# thread_dict = self.thread_dict
global_dict = {}
run_id = uuid.uuid4().hex
for node in self.all_connected():
# Set the pipeline name on the attribute of each node
node.pipeline_name = pipeline_name or uuid.uuid4().hex
# Set a unique run_id
node.run_id = run_id
node.fixturize = fixturize
node.global_dict = global_dict # Establishing shared globals
logging.debug("global_start:" + str(self))
# Create thread event here?
thread = threading.Thread(
target=MetalNode.stream, args=(node,), daemon=False
)
thread.start()
node.thread_dict = self.thread_dict
self.thread_dict[node.name] = thread
node.status = "running"
monitor_thread = threading.Thread(
target=MetalNode.thread_monitor,
args=(self,),
kwargs={"max_time": max_time},
daemon=True,
)
monitor_thread.start()
@property
def input_queue_size(self):
"""
Return the total number of items in all of the queues that are inputs
to this node.
"""
return sum([input_queue.queue.qsize() for input_queue in self.input_queue_list])
def kill_pipeline(self):
for node in self.all_connected():
node.finished = True
def draw_pipeline(self):
"""
Draw the pipeline structure using graphviz.
"""
dot = graphviz.Digraph()
for node in self.all_connected():
dot.node(node.name, node.name, shape="box")
for node in self.all_connected():
for target_node in node.output_node_list:
dot.edge(node.name, target_node.name)
dot.render("pipeline_drawing.gv", view=True)
@property
def pipeline_finished(self):
finished = all(node.cleanup_called for node in self.all_connected())
self.log_info("finished. " + str(self.name))
return finished
def thread_monitor(self, max_time=None):
"""
This function loops over all of the threads in the pipeline, checking
that they are either ``finished`` or ``running``. If any have had an
abnormal exit, terminate the entire pipeline.
"""
counter = 0
error = False
time_started = time.time()
while not self.pipeline_finished:
logging.debug("MONITOR THREAD")
time.sleep(MONITOR_INTERVAL)
counter += 1
if max_time is not None:
print("checking max_time...")
if time.time() - time_started >= max_time:
self.pipeline_finished = True
print("finished because of max_time")
for node in self.all_connected():
node.finished = True
continue
# Check whether all the workers have ``.finished``
# self.pipeline_finished = all(
# node.finished for node in self.all_connected())
if counter % STATS_COUNTER_MODULO == 0:
table = prettytable.PrettyTable(
["Node", "Class", "Received", "Sent", "Queued", "Status", "Time",]
)
for node in sorted(list(self.all_connected()), key=lambda x: x.name):
if node.status == "running":
status_color = bcolors.WARNING
elif node.status == "stopped":
status_color = ""
elif node.status == "error":
status_color = bcolors.FAIL
error = True
elif node.status == "success":
status_color = bcolors.OKGREEN
else:
assert False
if node.logjam >= LOGJAM_THRESHOLD:
logjam_color = bcolors.FAIL
else:
logjam_color = ""
table.add_row(
[
logjam_color + node.name + bcolors.ENDC,
node.__class__.__name__,
node.messages_received_counter,
node.messages_sent_counter,
node.input_queue_size,
status_color + node.status + bcolors.ENDC,
node.time_running,
]
)
self.log_info("\n" + str(table))
if error:
logging.error("Terminating due to error.")
self.terminate_pipeline(error=True)
# self.pipeline_finished = True
break
# Check for blocked nodes
for node in self.all_connected():
input_queue_full = [
input_queue.approximately_full()
for input_queue in node.input_queue_list
]
output_queue_full = [
output_queue.approximately_full()
for output_queue in node.output_queue_list
]
logjam = (
not node.is_source
and all(input_queue_full)
and not any(output_queue_full)
)
node.logjam_score["polled"] += 1
logging.debug("LOGJAM SCORE: {logjam}".format(logjam=str(node.logjam)))
if logjam:
node.logjam_score["logjam"] += 1
logging.debug(
"LOGJAM {logjam} {name}".format(logjam=logjam, name=node.name)
)
self.log_info("Pipeline finished.")
self.log_info("Sending terminate signal to nodes.")
self.log_info("Messages that are being processed will complete.")
# HERE
if error:
self.log_info("Abnormal exit")
sys.exit(1)
else:
self.log_info("Normal exit.")
sys.exit(0)
class CounterOfThings(MetalNode):
def bar__init__(self, *args, start=0, end=None, **kwargs):
self.start = start
self.end = end
super(CounterOfThings, self).__init__(*args, **kwargs)
def generator(self):
"""
Just start counting integers
"""
counter = 1
while 1:
yield counter
counter += 1
if counter > 10:
assert False
class FunctionOfMessage(MetalNode):
def __init__(self, function_name, *args, **kwargs):
self.function_name = function_name
components = self.function_name.split("__")
if len(components) == 1:
module = None
function_name = components[0]
function_obj = globals()[function_name]
else:
module = ".".join(components[:-1])
function_name = components[-1]
module = importlib.import_module(module)
function = getattr(module, function_name)
self.function = function
super(FunctionOfMessage, self).__init__(*args, **kwargs)
def process_item(self):
yield self.function(self.__message__)
class MockNode(MetalNode):
"""
This is only intended for doing unit tests, etc.
"""
def __init__(self, **kwargs):
self.message_holder = None
self.message_counter = 0
self.message_list = []
super(MockNode, self).__init__(**kwargs)
def process_item(self):
self.message_holder = self.__message__
self.message_list.append(self.__message__)
self.message_counter += 1
yield NothingToSeeHere()
class InsertData(MetalNode):
def __init__(
self, overwrite=True, overwrite_if_null=True, value_dict=None, **kwargs
):
self.overwrite = overwrite
self.overwrite_if_null = overwrite_if_null
self.value_dict = value_dict or {}
super(InsertData, self).__init__(**kwargs)
def process_item(self):
logging.debug("INSERT DATA: " + str(self.__message__))
for key, value in self.value_dict.items():
if (
(key not in self.__message__)
or self.overwrite
or (self.__message__.get(key) == None and self.overwrite_if_null)
):
self.__message__[key] = value
yield self.__message__
class RandomSample(MetalNode):
"""
Lets through only a random sample of incoming messages. Might be useful
for testing, or when only approximate results are necessary.
"""
def __init__(self, sample=0.1):
self.sample = sample
def process_item(self):
yield self.message if random.random() <= self.sample else None
class SubstituteRegex(MetalNode):
def __init__(self, match_regex=None, substitute_string=None, *args, **kwargs):
self.match_regex = match_regex
self.substitute_string = substitute_string
self.regex_obj = re.compile(self.match_regex)
super(SubstituteRegex, self).__init__(*args, **kwargs)
def process_item(self):
out = self.regex_obj.sub(self.substitute_string, self.message[self.key])
yield out
class CSVToDictionaryList(MetalNode):
def __init__(self, **kwargs):
super(CSVToDictionaryList, self).__init__(**kwargs)
def process_item(self):
csv_file_obj = io.StringIO(self.__message__)
csv_reader = csv.DictReader(csv_file_obj)
output = [row for row in csv_reader]
yield output
class SequenceEmitter(MetalNode):
"""
Emits ``sequence`` ``max_sequences`` times, or forever if
``max_sequences`` is ``None``.
"""
def __init__(self, sequence, *args, max_sequences=1, **kwargs):
self.sequence = sequence
self.max_sequences = max_sequences
super(SequenceEmitter, self).__init__(*args, **kwargs)
def generator(self):
"""
Emit the sequence ``max_sequences`` times.
"""
type_dict = {
"int": int,
"integer": int,
"str": str,
"string": str,
"float": float,
"bool": to_bool,
}
counter = 0
while counter < self.max_sequences:
for item in self.sequence:
if isinstance(item, (dict,)) and "value" in item and "type" in item:
item = type_dict[item["type"].lower()](item["value"])
item = {self.output_key: item}
yield item
counter += 1
def process_item(self):
"""
Emit the sequence ``max_sequences`` times.
"""
type_dict = {
"int": int,
"integer": int,
"str": str,
"string": str,
"float": float,
"bool": to_bool,
}
counter = 0
while counter < self.max_sequences:
for item in self.sequence:
if isinstance(item, (dict,)) and "value" in item and "type" in item:
item = type_dict[item["type"].lower()](item["value"])
item = {self.output_key: item}
yield item
counter += 1
class GetEnvironmentVariables(MetalNode):
"""
This node reads environment variables and stores them in the message.
The required keyword argument for this node is ``environment_variables``,
which is a list of -- you guessed it! -- environment variables. By
default, they will be read and stored in the outgoing message under
keys with the same names as the environment variables. E.g. ``FOO_VAR``
will be stored in the message ``{"FOO_BAR": whatever}``.
Optionally, you can provide a dictionary to the ``mappings`` keyword
argument, which maps environment variable names to new names. E.g.
if ``mappings = {"FOO_VAR": "bar_var"}``, then the value of ``FOO_VAR``
will be stored in the message ``{"bar_var": whatever}``.
If the environment variable is not defined, then its value will be
set to ``None``.
Args:
mappings (dict): An optional dictionary mapping environment variable
names to new names.
environment_variables (list): A list of environment variable names.
"""
def __init__(self, mappings=None, environment_variables=None, **kwargs):
self.environment_mappings = mappings or {}
self.environment_variables = environment_variables or []
super(GetEnvironmentVariables, self).__init__(**kwargs)
def generator(self):
environment = {
self.environment_mappings.get(
environment_variable, environment_variable
): os.environ.get(environment_variable, None)
for environment_variable in self.environment_variables
}
yield environment
def process_item(self):
environment = {
self.environment_mappings.get(
environment_variable, environment_variable
): os.environ.get(environment_variable, None)
for environment_variable in self.environment_variables
}
yield environment
class SimpleTransforms(MetalNode):
def __init__(
self,
missing_keypath_action="ignore",
starting_path=None,
transform_mapping=None,
target_value=None,
keypath=None,
**kwargs
):
self.missing_keypath_action = missing_keypath_action
self.transform_mapping = transform_mapping or []
self.functions_dict = {}
self.starting_path = starting_path
for transform in self.transform_mapping:
# Not doing the transforms; only loading the right functions here
function_name = transform.get("target_function", None)
full_function_name = function_name
if function_name is not None:
components = function_name.split("__")
if len(components) == 1:
module = None
function_name = components[0]
function_obj = globals()[function_name]
else:
module = ".".join(components[:-1])
function_name = components[-1]
module = importlib.import_module(module)
function = getattr(module, function_name)
self.functions_dict[full_function_name] = function
super(SimpleTransforms, self).__init__(**kwargs)
def process_item(self):
logging.debug("TRANSFORM " + str(self.name))
logging.debug(self.name + " " + str(self.message))
for transform in self.transform_mapping:
path = transform["path"]
target_value = transform.get("target_value", None)
function_name = transform.get("target_function", None)
starting_path = transform.get("starting_path", None)
if function_name is not None:
function = self.functions_dict[function_name]
else:
function = None
function_kwargs = transform.get("function_kwargs", None)
function_args = transform.get("function_args", None)
logging.debug(self.name + " calling replace_by_path:")
replace_by_path(
self.message,
tuple(path),
target_value=target_value,
function=function,
function_args=function_args,
starting_path=starting_path,
function_kwargs=function_kwargs,
)
logging.debug("after SimpleTransform: " + self.name + str(self.message))
yield self.message
class Serializer(MetalNode):
"""
Takes an iterable thing as input, and successively yields its items.
"""
def __init__(self, values=False, *args, **kwargs):
self.values = values
super(Serializer, self).__init__(**kwargs)
def process_item(self):
if self.__message__ is None:
yield None
elif self.values:
for item in self.__message__.values():
yield item
else:
for item in self.__message__:
logging.debug(self.name + " " + str(item))
yield item
class AggregateValues(MetalNode):
"""
Does that.
"""
def __init__(self, values=False, tail_path=None, **kwargs):
self.tail_path = tail_path
self.values = values
super(AggregateValues, self).__init__(**kwargs)
def process_item(self):
values = aggregate_values(self.__message__, self.tail_path, values=self.values)
logging.debug("aggregate_values " + self.name + " " + str(values))
yield values
class Filter(MetalNode):
"""
Applies tests to each message and filters out messages that don't pass
Built-in tests:
key_exists
value_is_true
value_is_not_none
Example:
{'test': 'key_exists',
'key': mykey}
"""
def __init__(self, test=None, test_keypath=None, value=True, *args, **kwargs):
self.test = test
self.value = value
self.test_keypath = test_keypath or []
super(Filter, self).__init__(*args, **kwargs)
@staticmethod
def _key_exists(message, key):
return key in message
@staticmethod
def _value_is_not_none(message, key):
logging.debug(
"value_is_not_none: {message} {key}".format(message=str(message), key=key)
)
return get_value(message, key) is not None
@staticmethod
def _value_is_true(message, key):
return to_bool(message.get(key, False))
def process_item(self):
if self.test in ["key_exists", "value_is_not_none", "value_is_true"]:
result = (
getattr(self, "_" + self.test)(self.__message__, self.test_keypath)
== self.value
)
else:
raise Exception("Unknown test: {test_name}".format(test_name=test))
if result:
logging.debug("Sending message through")
yield self.message
else:
logging.debug("Blocking message: " + str(self.__message__))
yield NothingToSeeHere()
class StreamMySQLTable(MetalNode):
def __init__(
self,
*args,
host="localhost",
user=None,
table=None,
password=None,
database=None,
port=3306,
to_row_obj=False,
send_batch_markers=False,
**kwargs
):
self.host = host
self.user = user
self.to_row_obj = to_row_obj
self.password = password
self.database = database
self.port = port
self.table = table
self.send_batch_markers = send_batch_markers
super(StreamMySQLTable, self).__init__(**kwargs)
def setup(self):
self.db = MySQLdb.connect(
passwd=self.password, db=self.database, user=self.user, port=self.port,
)
self.cursor = MySQLdb.cursors.DictCursor(self.db)
self.table_schema_query = (
"""SELECT column_name, column_type """
"""FROM information_schema.columns """
"""WHERE table_name='{table}';""".format(table=self.table)
)
print(self.table_schema_query)
# self.table_schema = self.get_schema()
# Need a mapping from header to MYSQL TYPE
# for mapping in self.table_schema:
# column = mapping["column_name"]
# type_string = mapping["column_type"]
# this_type = ds.MySQLTypeSystem.type_mapping(type_string)
# Unfinished experimental code
# Start here:
# store the type_mapping
# use it to cast the data into the MySQLTypeSchema
# ensure that the generator is emitting MySQLTypeSchema objects
# def get_schema(self):
# self.cursor.execute(self.table_schema_query)
# table_schema = self.cursor.fetchall()
# return table_schema
def generator(self):
if self.send_batch_markers:
yield BatchStart(schema=self.table_schema)
self.cursor.execute("""SELECT * FROM {table};""".format(table=self.table))
result = self.cursor.fetchone()
while result is not None:
yield result
result = self.cursor.fetchone()
if self.send_batch_markers:
yield BatchEnd()
class PrinterOfThings(MetalNode):
@set_kwarg_attributes()
def __init__(self, disable=False, pretty=False, prepend="printer: ", **kwargs):
self.disable = disable
self.pretty = pretty
super(PrinterOfThings, self).__init__(**kwargs)
logging.debug("Initialized printer...")
def process_item(self):
if not self.disable:
print(self.prepend)
if self.pretty:
pprint.pprint(self.__message__, indent=2)
else:
print(str(self.__message__))
print("\n")
print("------------")
yield self.message
class ConstantEmitter(MetalNode):
"""
Send a thing every n seconds
"""
def __init__(self, thing=None, max_loops=5, delay=0.5, **kwargs):
self.thing = thing
self.delay = delay
self.max_loops = max_loops
super(ConstantEmitter, self).__init__(**kwargs)
def generator(self):
counter = 0
while counter < self.max_loops:
if random.random() < -0.1:
assert False
time.sleep(self.delay)
yield self.thing
counter += 1
class TimeWindowAccumulator(MetalNode):
"""
Every N seconds, put the latest M seconds data on the queue.
"""
@set_kwarg_attributes()
def __init__(self, time_window=None, send_interval=None, **kwargs):
pass
class LocalFileReader(MetalNode):
@set_kwarg_attributes()
def __init__(
self,
directory=".",
send_batch_markers=True,
serialize=False,
read_mode="r",
filename=None,
**kwargs
):
super(LocalFileReader, self).__init__(**kwargs)
def process_item(self):
filename = "/".join([self.directory, self.filename or self.__message__])
with open(filename, self.read_mode) as file_obj:
if self.serialize:
for line in file_obj:
output = line
yield output
else:
output = file_obj.read()
yield output
class CSVReader(MetalNode):
@set_kwarg_attributes()
def __init__(self, **kwargs):
super(CSVReader, self).__init__(**kwargs)
def process_item(self):
file_obj = io.StringIO(self.__message__)
reader = csv.DictReader(file_obj)
for row in reader:
yield row
class LocalDirectoryWatchdog(MetalNode):
def __init__(self, directory=".", check_interval=3, **kwargs):
self.directory = directory
self.latest_arrival = time.time()
self.check_interval = check_interval
super(LocalDirectoryWatchdog, self).__init__(**kwargs)
def generator(self):
while self.keep_alive:
logging.debug("sleeping...")
time.sleep(self.check_interval)
time_in_interval = None
for filename in os.listdir(self.directory):
last_modified_time = os.path.getmtime(
"/".join([self.directory, filename])
)
if last_modified_time > self.latest_arrival:
yield "/".join([self.directory, filename])
if (
time_in_interval is None
or last_modified_time > time_in_interval
):
time_in_interval = last_modified_time
logging.debug("time_in_interval: " + str(time_in_interval))
if time_in_interval is not None:
self.latest_arrival = time_in_interval
class StreamingJoin(MetalNode):
"""
Joins two streams on a key, using exact match only. MVP.
"""
def __init__(self, window=30, streams=None, *args, **kwargs):
self.window = window
self.streams = streams
self.stream_paths = streams
self.buffers = {
stream_name: TimedDict(timeout=self.window)
for stream_name in self.stream_paths.keys()
}
super(StreamingJoin, self).__init__(*args, **kwargs)
def process_item(self):
"""
"""
value_to_match = get_value(
self.message, self.stream_paths[self.message_source.name]
)
# Check for matches in all other streams.
# If complete set of matches, yield the merged result
# If not, add it to the `TimedDict`.
yield ("hi")
class DynamicClassMediator(MetalNode):
def __init__(self, *args, **kwargs):
super(DynamicClassMediator, self).__init__(**kwargs)
for node_name, node_dict in self.node_dict.items():
cls_obj = node_dict["cls_obj"]
node_obj = cls_obj(**kwargs)
node_dict["obj"] = node_obj
for edge in self.raw_config["edges"]:
source_node_obj = self.node_dict[edge["from"]]["obj"]
target_node_obj = self.node_dict[edge["to"]]["obj"]
source_node_obj > target_node_obj
def bind_methods():
for attr_name in dir(DynamicClassMediator):
if attr_name.startswith("_"):
continue
attr_obj = getattr(DynamicClassMediator, attr_name)
if not isinstance(attr_obj, types.FunctionType):
continue
setattr(self, attr_name, types.MethodType(attr_obj, self))
bind_methods()
source = self.get_source()
self.input_queue_list = source.input_queue_list
sink = self.get_sink()
self.output_queue_list = sink.output_queue_list
self.output_node_list = sink.output_node_list
self.input_node_list = source.input_node_list
def get_sink(self):
sinks = self.sink_list()
if len(sinks) > 1:
raise Exception("`DynamicClassMediator` may have no more than one sink.")
elif len(sinks) == 0:
return None
return sinks[0]
def get_source(self):
sources = self.source_list()
if len(sources) > 1:
raise Exception("`DynamicClassMediator` may have no more than one source.")
elif len(sources) == 0:
return None
return sources[0]
def sink_list(self):
sink_nodes = []
for node_name, node_dict in self.node_dict.items():
node_obj = node_dict["obj"]
if len(node_obj.output_queue_list) == 0:
sink_nodes.append(node_obj)
return sink_nodes
def source_list(self):
source_nodes = [
node_dict["obj"]
for node_dict in self.node_dict.values()
if node_dict["obj"].is_source
]
return source_nodes
def hi(self):
return "hi"
def get_node_dict(node_config):
node_dict = {}
for node_config in node_config["nodes"]:
node_class = globals()[node_config["class"]]
node_name = node_config["name"]
node_dict[node_name] = {}
node_dict[node_name]["class"] = node_class
frozen_arguments = node_config.get("frozen_arguments", {})
node_dict[node_name]["frozen_arguments"] = frozen_arguments
node_obj = node_class(**frozen_arguments)
node_dict[node_name]["remapping"] = node_config.get("arg_mapping", {})
return node_dict
def kwarg_remapper(f, **kwarg_mapping):
reverse_mapping = {value: key for key, value in kwarg_mapping.items()}
logging.debug("kwarg_mapping:" + str(kwarg_mapping))
parameters = [i for i, _ in list(inspect.signature(f).parameters.items())]
for kwarg in parameters:
if kwarg not in kwarg_mapping:
reverse_mapping[kwarg] = kwarg
def remapped_function(*args, **kwargs):
remapped_kwargs = {}
for key, value in kwargs.items():
if key in reverse_mapping:
remapped_kwargs[reverse_mapping[key]] = value
logging.debug("renamed function with kwargs: " + str(remapped_kwargs))
return f(*args, **remapped_kwargs)
return remapped_function
def template_class(
class_name, parent_class, kwargs_remapping, frozen_arguments_mapping
):
kwargs_remapping = kwargs_remapping or {}
frozen_init = functools.partial(parent_class.__init__, **frozen_arguments_mapping)
if isinstance(parent_class, (str,)):
parent_class = globals()[parent_class]
cls = type(class_name, (parent_class,), {})
setattr(cls, "__init__", kwarg_remapper(frozen_init, **kwargs_remapping))
return cls
def class_factory(raw_config):
new_class = type(raw_config["name"], (DynamicClassMediator,), {})
new_class.node_dict = get_node_dict(raw_config)
new_class.class_name = raw_config["name"]
new_class.edge_list_dict = raw_config.get("edges", [])
new_class.raw_config = raw_config
for node_name, node_config in new_class.node_dict.items():
_class = node_config["class"]
cls = template_class(
node_name,
_class,
node_config["remapping"],
node_config["frozen_arguments"],
)
setattr(cls, "raw_config", raw_config)
node_config["cls_obj"] = cls
# Inject?
globals()[new_class.__name__] = new_class
return new_class
class Remapper(MetalNode):
def __init__(self, mapping=None, **kwargs):
self.remapping_dict = mapping or {}
super(Remapper, self).__init__(**kwargs)
def process_item(self):
logging.debug("Remapper {node}:".format(node=self.name) + str(self.__message__))
out = remap_dictionary(self.__message__, self.remapping_dict)
yield out
class BlackHole(MetalNode):
"""
Accepts any incoming message and promptly ignores it. Returns ``NothingToSeeHere``.
"""
def __init__(self, **kwargs):
super(BlackHole, self).__init__(**kwargs)
def process_item(self):
logging.debug(
"BlackHole {node}:".format(node=self.name) + str(self.__message__)
)
yield NothingToSeeHere()
class Blocker(BlackHole):
"""
Class that ignores all messages, but sends a message when all of its upstream
nodes have finished.
"""
def __init__(self, **kwargs):
kwargs.update({"send_termination_message": True})
super(Blocker, self).__init__(**kwargs)
class BatchMessages(MetalNode):
def __init__(
self, batch_size=None, batch_list=None, counter=0, timeout=5, **kwargs
):
self.batch_size = batch_size
self.timeout = timeout
self.counter = 0
self.batch_list = batch_list or []
super(BatchMessages, self).__init__(**kwargs)
def process_item(self):
self.counter += 1
self.batch_list.append(self.__message__)
logging.debug(self.name + " " + str(self.__message__))
out = NothingToSeeHere()
if self.counter % self.batch_size == 0:
out = self.batch_list
logging.debug("BatchMessages: " + str(out))
self.batch_list = []
yield out
def cleanup(self):
self.log_info(self.name + " in cleanup, sending remainder of batch...")
yield self.batch_list
if __name__ == "__main__":
pass
| 35.185333
| 97
| 0.587194
|
a398e86c39e2b6840a4241a3fb3fdf25bae17d1f
| 2,164
|
py
|
Python
|
examples/simple_pruning.py
|
ammaddd/optuna
|
a24b3aa6e1ae2e4daf19ed3a79ac52f8b4265206
|
[
"MIT"
] | null | null | null |
examples/simple_pruning.py
|
ammaddd/optuna
|
a24b3aa6e1ae2e4daf19ed3a79ac52f8b4265206
|
[
"MIT"
] | null | null | null |
examples/simple_pruning.py
|
ammaddd/optuna
|
a24b3aa6e1ae2e4daf19ed3a79ac52f8b4265206
|
[
"MIT"
] | null | null | null |
"""
Optuna example that demonstrates a pruner.
In this example, we optimize a classifier configuration using scikit-learn. Note that, to enable
the pruning feature, the following 2 methods are invoked after each step of the iterative training.
(1) :func:`optuna.trial.Trial.report`
(2) :func:`optuna.trial.Trial.should_prune`
You can run this example as follows:
$ python simple.py
"""
import sklearn.datasets
import sklearn.linear_model
import sklearn.model_selection
import optuna
from optuna.trial import TrialState
# FYI: Objective functions can take additional arguments
# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).
def objective(trial):
iris = sklearn.datasets.load_iris()
classes = list(set(iris.target))
train_x, valid_x, train_y, valid_y = sklearn.model_selection.train_test_split(
iris.data, iris.target, test_size=0.25
)
alpha = trial.suggest_float("alpha", 1e-5, 1e-1, log=True)
clf = sklearn.linear_model.SGDClassifier(alpha=alpha)
for step in range(100):
clf.partial_fit(train_x, train_y, classes=classes)
# Report intermediate objective value.
intermediate_value = clf.score(valid_x, valid_y)
trial.report(intermediate_value, step)
# Handle pruning based on the intermediate value.
if trial.should_prune():
raise optuna.TrialPruned()
return clf.score(valid_x, valid_y)
if __name__ == "__main__":
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=100)
pruned_trials = study.get_trials(deepcopy=False, states=[TrialState.PRUNED])
complete_trials = study.get_trials(deepcopy=False, states=[TrialState.COMPLETE])
print("Study statistics: ")
print(" Number of finished trials: ", len(study.trials))
print(" Number of pruned trials: ", len(pruned_trials))
print(" Number of complete trials: ", len(complete_trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
| 31.362319
| 99
| 0.709335
|
8149db7c0a6f0e15714071474c4fe0e53fd9e682
| 1,959
|
py
|
Python
|
test/vanilla/Expected/AcceptanceTests/ParameterFlattening/parameterflattening/aio/_configuration_async.py
|
Azure/autorest.azure-functions-python
|
b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783
|
[
"MIT"
] | 4
|
2020-10-22T20:35:38.000Z
|
2021-12-21T07:29:01.000Z
|
test/vanilla/Expected/AcceptanceTests/ParameterFlattening/parameterflattening/aio/_configuration_async.py
|
Azure/autorest.azure-functions-python
|
b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783
|
[
"MIT"
] | 3
|
2020-09-09T15:16:15.000Z
|
2021-12-20T15:25:18.000Z
|
test/vanilla/Expected/AcceptanceTests/ParameterFlattening/parameterflattening/aio/_configuration_async.py
|
Azure/autorest.azure-functions-python
|
b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783
|
[
"MIT"
] | 2
|
2020-11-10T07:16:23.000Z
|
2020-12-30T11:03:14.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from .._version import VERSION
class AutoRestParameterFlatteningConfiguration(Configuration):
"""Configuration for AutoRestParameterFlattening.
Note that all parameters used to create this instance are saved as instance
attributes.
"""
def __init__(
self,
**kwargs: Any
) -> None:
super(AutoRestParameterFlatteningConfiguration, self).__init__(**kwargs)
kwargs.setdefault('sdk_moniker', 'autorestparameterflattening/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
| 43.533333
| 106
| 0.678407
|
22232d59e31a3a66fe0c7449ed51a0a3a1aedade
| 926
|
py
|
Python
|
bluebird/api/resources/seed.py
|
rkm/bluebird
|
2325ebb151724d4444c092c095a040d7365dda79
|
[
"MIT"
] | 8
|
2019-01-29T15:19:39.000Z
|
2020-07-16T03:55:36.000Z
|
bluebird/api/resources/seed.py
|
rkm/bluebird
|
2325ebb151724d4444c092c095a040d7365dda79
|
[
"MIT"
] | 46
|
2019-02-08T14:23:11.000Z
|
2021-04-06T13:45:10.000Z
|
bluebird/api/resources/seed.py
|
rkm/bluebird
|
2325ebb151724d4444c092c095a040d7365dda79
|
[
"MIT"
] | 3
|
2019-05-06T14:18:07.000Z
|
2021-06-17T10:39:59.000Z
|
"""
Provides logic for the SEED (set seed) API endpoint
"""
from flask_restful import reqparse
from flask_restful import Resource
import bluebird.api.resources.utils.responses as responses
import bluebird.api.resources.utils.utils as utils
from bluebird.utils.types import is_valid_seed
_PARSER = reqparse.RequestParser()
_PARSER.add_argument("value", type=int, location="json", required=True)
class Seed(Resource):
"""SEED (set seed) command"""
@staticmethod
def post():
"""Logic for POST events. Sets the seed of the simulator"""
req_args = utils.parse_args(_PARSER)
seed: int = req_args["value"]
if not is_valid_seed(seed):
return responses.bad_request_resp(
"Invalid seed specified. Must be a positive integer less than 2^32"
)
err = utils.sim_proxy().simulation.set_seed(seed)
return responses.checked_resp(err)
| 28.060606
| 83
| 0.694384
|
00e801779df86aebadd5e0e5ec153c7c6b93f6a1
| 4,969
|
py
|
Python
|
vnpy/trader/app/ctaStrategy/ctaPosition.py
|
frikyalong/vnpy
|
d8ea554e34ff285c97cc2ddb4e881a1f0a6f02d3
|
[
"MIT"
] | 1
|
2018-11-05T07:34:36.000Z
|
2018-11-05T07:34:36.000Z
|
vnpy/trader/app/ctaStrategy/ctaPosition.py
|
currently1/vnpy
|
674c9f04fe7d8e0784e5d98e96cd9f797742d22a
|
[
"MIT"
] | null | null | null |
vnpy/trader/app/ctaStrategy/ctaPosition.py
|
currently1/vnpy
|
674c9f04fe7d8e0784e5d98e96cd9f797742d22a
|
[
"MIT"
] | null | null | null |
# encoding: UTF-8
from datetime import datetime
from vnpy.trader.app.ctaStrategy.ctaBase import *
from vnpy.trader.vtConstant import *
DEBUGCTALOG = True
class CtaPosition:
"""策略的仓位管理类
v 0.1 简单的数值,代表多仓数量和空仓数量
v 0.2 增加多仓和空仓的持仓,去除持仓均价
v 0.3 正式取消更新上层strategy的pos
"""
def __init__(self, strategy):
self.strategy = strategy
self.longPos = 0 # 多仓持仓
self.shortPos = 0 # 空仓持仓
self.pos = 0 # 持仓状态 0:空仓/对空平等; >=1 净多仓 ;<=-1 净空仓
self.maxPos = 1 # 最大持仓量(多仓+空仓总量)
self.step = 1 # 增仓数量
# disabled in v0.2
#self.posList = []
#self.avgPrice = EMPTY_FLOAT
def avaliablePos2Add(self, direction=EMPTY_STRING):
"""剩余可加的仓位数量"""
if direction == DIRECTION_LONG:
return self.maxPos - abs(self.longPos)
elif direction == DIRECTION_SHORT:
return self.maxPos - abs(self.shortPos)
return self.maxPos - abs(self.longPos) - abs(self.shortPos)
def openPos(self, direction, vol, price=EMPTY_FLOAT):
"""开、加仓"""
# vol: 正整数
# disabled in v0.2
#if self.pos == 0:
# self.posList = []
if direction == DIRECTION_LONG: # 加多仓
if (max(self.pos, self.longPos) + vol) > self.maxPos:
self.writeCtaError(u'异常,超出仓位。净:{},多:{},加多:{},最大:{}'
.format(self.pos, self.longPos, vol, self.maxPos))
# 只告警
#return False
# 更新
self.writeCtaLog(u'多仓:{0}->{1}'.format(self.longPos, self.longPos + vol))
self.writeCtaLog(u'净:{0}->{1}'.format(self.pos, self.pos + vol))
self.longPos += vol
self.pos += vol
# 更新上层策略的pos。该方法不推荐使用
self.strategy.pos = self.pos
if direction == DIRECTION_SHORT: # 加空仓
if (min(self.pos, self.shortPos) - vol) < (0 - self.maxPos):
self.writeCtaError(u'异常,超出仓位。净:{},空:{},加空:{},最大:{}'
.format(self.pos, self.shortPos, vol, self.maxPos))
#return False
self.writeCtaLog(u'空仓:{0}->{1}'.format(self.shortPos, self.shortPos - vol))
self.writeCtaLog(u'净:{0}->{1}'.format(self.pos, self.pos-vol))
self.shortPos -= vol
self.pos -= vol
# 更新上层策略的pos。该方法不推荐使用
#self.strategy.pos = self.pos
# v0.2 disabled
#if price > EMPTY_FLOAT:
# self.posList.append(price)
# 计算持仓均价
#if len(self.posList) > 0:
# self.avgPrice = sum(self.posList)/len(self.posList)
# self.avgPrice = round(self.avgPrice, 3)
return True
def closePos(self, direction, vol):
"""平、减仓"""
# vol: 正整数
if direction == DIRECTION_LONG: # 平空仓 Cover
if self.shortPos + vol > 0:
self.writeCtaError(u'异常,超出仓位。净:{},空:{},平仓:{}'.format(self.pos, self.shortPos, vol))
#self.strategy.pos = self.pos
#return False
self.writeCtaLog(u'空仓:{0}->{1}'.format(self.shortPos, self.shortPos + vol))
self.writeCtaLog(u'净:{0}->{1}'.format(self.pos, self.pos + vol))
self.shortPos += vol
self.pos += vol
# 更新上层策略的pos。该方法不推荐使用
self.strategy.pos = self.pos
if direction == DIRECTION_SHORT: # 平多仓
if self.longPos - vol < 0:
self.writeCtaError(u'异常,超出仓位。净:{},多:{},平仓:{}'.format(self.pos, self.longPos, vol))
#self.strategy.pos = self.pos
#return False
self.writeCtaLog(u'多仓:{0}->{1}'.format(self.longPos, self.longPos - vol))
self.writeCtaLog(u'净:{0}->{1}'.format(self.pos, self.pos-vol))
self.longPos -= vol
self.pos -= vol
# self.strategy.pos = self.pos
# disabled in v0.2
#if abs(self.pos) > 0:
# self.posList = self.posList[:-vol]
#else:
# self.posList = []
# 计算持仓均价
#if len(self.posList) > 0:
# self.avgPrice = sum(self.posList)/len(self.posList)
# self.avgPrice = round(self.avgPrice, 3)
return True
def clear(self):
"""清除状态"""
self.writeCtaLog(u'清除所有持仓状态')
self.pos = 0
self.longPos = 0
self.shortPos = 0
# 更新上层策略的pos
#self.strategy.pos = 0
# ----------------------------------------------------------------------
def writeCtaError(self, content):
"""记录CTA日志错误"""
self.strategy.writeCtaLog(content)
# ----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录CTA日志"""
self.strategy.writeCtaLog(content)
def debugCtaLog(self, content):
"""记录CTA日志"""
if DEBUGCTALOG:
self.strategy.writeCtaLog('[DEBUG]'+content)
| 32.058065
| 99
| 0.511169
|
7791537b77bf42c98dd57fdf86704371e0ee9ddd
| 3,732
|
py
|
Python
|
untitled1.py
|
MohitKumar21/JAIPUR-RENTAL-PRICE-PREDICTION
|
03cc8558a9775117f841f1f181c96e20edf8fe7d
|
[
"Apache-2.0"
] | 1
|
2020-04-29T08:43:24.000Z
|
2020-04-29T08:43:24.000Z
|
untitled1.py
|
MohitKumar21/JAIPUR-RENTAL-PRICE-PREDICTION
|
03cc8558a9775117f841f1f181c96e20edf8fe7d
|
[
"Apache-2.0"
] | null | null | null |
untitled1.py
|
MohitKumar21/JAIPUR-RENTAL-PRICE-PREDICTION
|
03cc8558a9775117f841f1f181c96e20edf8fe7d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 20 22:56:12 2019
@author: Suman
JaipurRentals
Jaipur’s Real Estate Market is experiencing an incredible resurgence, with property prices soaring by double-digits on an
yearly basis since 2013. While home owners have a lot of reasons to laugh about, the same cannot be said of people
looking for a home to buy or rent.
In Jaipur, property rental market is said to be as crazy as property purchasing market. You are new to Jaipur and
want to rent a decent apartment. Since you have the knowledge of Machine Learning you decided to build a model,
that could help you out to get a nice apartment at best price.
Get Your data from various apartment rental sites and move towards the following observation points like:
· How does the general rental prices distribution looks like? (Graphical representation is appreciated)
· Which are the hottest areas?
· Which area would be more interesting to start hunting?
· Are you able to predict rental price of an apartment?
"""
import pandas as pd
data = pd.read_csv('processed_data.csv')
from collections import Counter
top = Counter(data.location)
data.index = range(data.shape[0])
property_type = data.PropertyType.unique()
loc_price = {}
for i in range(len(data)):
if loc_price.get(data.iloc[i].location):
loc_price[ data.iloc[i].location] += data.iloc[i].price
else:
loc_price[data.iloc[i].location] = data.iloc[i].price
avg_price = {}
for items in loc_price.keys():
avg_price[items] = loc_price.get(items)/top[items]
location = loc_price.keys()
#import matplotlib.pyplot as plt
#
#plt.figure(figsize=(30,10))
#plt.bar(height = avg_price.values(), x=avg_price.keys())
#plt.margins(x=0)
#plt.xticks(fontsize = 10,fontname = "Comic Sans MS", rotation = 90)
#plt.xlabel('Locations')
#plt.ylabel('Average Price')
#plt.savefig('chart.svg',format='svg',dpi=1500,bbox_inches = 'tight')
#plt.show()
#· Which are the hottest areas?
import operator
a = dict(sorted(avg_price.items(), key=operator.itemgetter(1), reverse=True)[:10])
#print('Top 10 Locations\n')
#for item in a.keys():
# print(item.title())
# Which area would be more interesting to start hunting?
hunt = pd.DataFrame()
for loc,num in top.most_common(10):
temp = []
for i in range(1,11):
try:
temp.append(str(str(i)+' BHK Average rate: '+str(int(data['price'][(data.location==loc) & (data.BHK==i)].mean()))))
except:
temp.append(str(str(i)+' BHK Not Available'))
hunt[loc] = temp
#
#hunt3 = pd.DataFrame()
#labels = []
#for loc,num in top.most_common(10):
# top3price = []
# for i in range(1,4):
# top3price.append(int(data['price'][(data.location==loc) & (data.BHK==i)].mean()))
# hunt3[loc] = top3price
# labels.append(loc)
#
#
#newhunt3 = pd.DataFrame({'one':hunt3.iloc[0:1].values[0],'two':hunt3.iloc[1:2].values[0],'three':hunt3.iloc[2:3].values[0]})
#
#import matplotlib.pyplot as plt
#
#x = [1,2,3,4,5,6,7,8,9,10]
#y = newhunt3.one.values
#plt.plot(x, y, label='1 BHK',marker='o')
#y = newhunt3.two.values
#plt.plot(x, y, label='2 BHK',marker='o')
#y = newhunt3.three.values
#plt.plot(x, y, label='3 BHK',marker='o')
#
#plt.xticks(x, labels, rotation='vertical')
#plt.xlabel('Locations')
#plt.ylabel('Price')
#plt.margins(0.1)
#plt.subplots_adjust(bottom=0.15)
#plt.legend()
#plt.savefig('top10loc1.svg',dpi=1500,bbox_inches = 'tight')
#plt.show()
import pickle
with open('model.pkl','rb') as f1:
model = pickle.load(f1)
| 31.361345
| 128
| 0.651393
|
f3e014367fe20b3c3e537bc49d451f56312fa336
| 23,957
|
py
|
Python
|
tests/view/runtests/test_views.py
|
yifanjiang/moztrap
|
2130c7101b7596b19a2697ab5f1c745e93e7c95b
|
[
"BSD-2-Clause"
] | 1
|
2015-02-10T15:09:42.000Z
|
2015-02-10T15:09:42.000Z
|
tests/view/runtests/test_views.py
|
yifanjiang/moztrap
|
2130c7101b7596b19a2697ab5f1c745e93e7c95b
|
[
"BSD-2-Clause"
] | null | null | null |
tests/view/runtests/test_views.py
|
yifanjiang/moztrap
|
2130c7101b7596b19a2697ab5f1c745e93e7c95b
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Tests for runtests views.
"""
from datetime import datetime
from django.core.urlresolvers import reverse
from BeautifulSoup import BeautifulSoup
from mock import patch
from tests import case
class SelectTest(case.view.AuthenticatedViewTestCase,
case.view.NoCacheTest,
):
"""Tests for select-run view."""
@property
def url(self):
"""Shortcut for runtests url."""
return reverse("runtests")
def test_requires_execute_permission(self):
"""Requires execute permission."""
res = self.app.get(
self.url, user=self.F.UserFactory.create(), status=302)
self.assertRedirects(res, "/")
def test_finder(self):
"""Finder is present in context with list of products."""
self.add_perm("execute")
p = self.F.ProductFactory.create(name="Foo Product")
res = self.get()
res.mustcontain("Foo Product")
res.mustcontain(
"data-sub-url="
'"?finder=1&col=productversions&id={0}"'.format(p.id))
def test_finder_ajax(self):
"""Finder intercepts its ajax requests to return child obj lists."""
self.add_perm("execute")
pv = self.F.ProductVersionFactory.create(version="1.0.1")
res = self.get(
params={
"finder": "1",
"col": "productversions",
"id": str(pv.product.id)
},
headers={"X-Requested-With": "XMLHttpRequest"},
)
self.assertIn("1.0.1", res.json["html"])
self.assertIn(
'data-sub-url="?finder=1&col=runs&id={0}"'.format(pv.id),
res.json["html"]
)
class SetEnvironmentTest(case.view.AuthenticatedViewTestCase,
case.view.NoCacheTest,
):
"""Tests for set_environment view."""
def setUp(self):
"""These tests all require a test run."""
super(SetEnvironmentTest, self).setUp()
self.testrun = self.F.RunFactory.create(name="Foo Run")
@property
def url(self):
"""Shortcut for set_environment url."""
return reverse(
"runtests_environment", kwargs={"run_id": self.testrun.id})
@property
def envs(self):
"""A lazily-created sample set of environments."""
if getattr(self, "_cached_envs", None) is None:
self._cached_envs = self.F.EnvironmentFactory.create_full_set(
{"OS": ["Windows 7", "Ubuntu Linux"]})
return self._cached_envs
def test_requires_execute_permission(self):
"""Requires execute permission."""
res = self.app.get(
self.url, user=self.F.UserFactory.create(), status=302)
self.assertRedirects(res, "/")
def test_form_choices(self):
"""Form has available categories and elements for run as choices."""
self.add_perm("execute")
self.testrun.environments.add(*self.envs)
res = self.get()
res.mustcontain("OS")
res.mustcontain("Ubuntu Linux")
res.mustcontain("Windows 7")
def test_valid_environments(self):
"""JSON list of valid envs (as ordered element list) is in template."""
self.add_perm("execute")
envs = self.F.EnvironmentFactory.create_set(
["OS", "Browser"], ["OS X", "Safari"], ["Windows", "IE"])
self.testrun.environments.add(*envs)
osx = self.model.Element.objects.get(name="OS X")
safari = self.model.Element.objects.get(name="Safari")
windows = self.model.Element.objects.get(name="Windows")
ie = self.model.Element.objects.get(name="IE")
res = self.get()
res.mustcontain("VALID_ENVIRONMENTS = [")
res.mustcontain("[{0}, {1}]".format(safari.id, osx.id))
res.mustcontain("[{0}, {1}]".format(ie.id, windows.id))
def test_form_initial(self):
"""Form initial choices determined by "environment" querystring key."""
self.add_perm("execute")
self.testrun.environments.add(*self.envs)
res = self.get(params=dict(environment=self.envs[0].id))
res.mustcontain(
'<option value="{0}" selected="selected">'.format(
self.envs[0].elements.get().id)
)
def test_run(self):
"""Form has test run name in label."""
self.add_perm("execute")
res = self.get()
res.mustcontain("run tests in Foo Run!")
def test_bad_run_id_404(self):
"""Bad run id returns 404."""
self.add_perm("execute")
url = reverse("runtests_environment", kwargs={"run_id": 9999})
self.app.get(url, user=self.user, status=404)
def test_ajax(self):
"""Ajax request uses partial template."""
self.add_perm("execute")
res = self.get(headers={"X-Requested-With": "XMLHttpRequest"})
self.assertNotIn("<body", res.body)
def test_env_required(self):
"""Invalid combination results in error."""
self.add_perm("execute")
res = self.get().forms["runtests-environment-form"].submit()
res.mustcontain("selected environment is not valid")
def test_set_environment(self):
"""Selecting an environment redirects to run view for that run/env."""
self.add_perm("execute")
self.testrun.environments.add(*self.envs)
cat = self.model.Category.objects.get()
form = self.get().forms["runtests-environment-form"]
form["category_{0}".format(cat.id)] = self.envs[0].elements.get().id
res = form.submit(status=302)
self.assertRedirects(
res,
reverse(
"runtests_run",
kwargs={"run_id": self.testrun.id, "env_id": self.envs[0].id})
)
def test_set_environment_and_build(self):
"""Selecting an environment and build redirects to run view for that run/env."""
self.add_perm("execute")
self.testrun.environments.add(*self.envs)
self.testrun.is_series = True
self.testrun.save()
cat = self.model.Category.objects.get()
form = self.get().forms["runtests-environment-form"]
form["category_{0}".format(cat.id)] = self.envs[0].elements.get().id
form["build"] = "rahbuild"
res = form.submit(status=302)
# we now need to find the run that was created for the series
# by having the old run as its series value.
newrun = self.F.model.Run.objects.get(series=self.testrun)
self.assertRedirects(
res,
reverse(
"runtests_run",
kwargs={"run_id": newrun.id, "env_id": self.envs[0].id})
)
class RunTestsTest(case.view.AuthenticatedViewTestCase,
case.view.NoCacheTest,
):
"""Tests for runtests view."""
def setUp(self):
"""These tests all require a test run and envs, and execute perm."""
super(RunTestsTest, self).setUp()
self.testrun = self.F.RunFactory.create(status="active")
self.envs = self.F.EnvironmentFactory.create_full_set(
{"OS": ["Windows 7", "Ubuntu Linux"]})
self.testrun.environments.add(*self.envs)
self.add_perm("execute")
@property
def url(self):
"""Shortcut for runtests_run url."""
return reverse(
"runtests_run",
kwargs={"run_id": self.testrun.id, "env_id": self.envs[0].id})
def create_rcv(self, **kwargs):
"""Create a runcaseversion for this run with given kwargs."""
defaults = {
"run": self.testrun,
"caseversion__productversion": self.testrun.productversion,
"caseversion__case__product": self.testrun.productversion.product,
"environments": self.envs,
}
defaults.update(kwargs)
return self.F.RunCaseVersionFactory.create(**defaults)
def create_result(self, **kwargs):
"""Create a result for this run/env/user with given kwargs."""
defaults = {
"tester": self.user,
"environment": self.envs[0]
}
defaults.update(kwargs)
if "runcaseversion" not in defaults:
defaults["runcaseversion"] = self.create_rcv()
return self.F.ResultFactory.create(**defaults)
def test_ajax_get(self):
"""Getting page via ajax returns just itemlist."""
res = self.get(ajax=True, status=200)
soup = BeautifulSoup(res.json["html"])
# outermost element is class "itemlist"
self.assertIn("itemlist", soup.findChild()["class"])
def test_requires_execute_permission(self):
"""Requires execute permission."""
res = self.app.get(
self.url, user=self.F.UserFactory.create(), status=302)
self.assertRedirects(res, "/")
def test_markdown_safe(self):
"""Raw HTML and markdown attributes are escaped."""
rcv = self.create_rcv(caseversion__description="<script>")
self.F.CaseStepFactory.create(
caseversion=rcv.caseversion,
instruction="<script>alert(foo);</script>",
expected="{@onclick=alert(1)}paragraph",
)
res = self.get()
self.assertEqual(
unicode(res.html.find("div", "description").find("p")),
"<p><script></p>"
)
step = res.html.find("li", {"data-step-number": "1"})
self.assertEqual(
unicode(step.find("div", "instruction").find("p")),
"<p><script>alert(foo);</script></p>"
)
self.assertEqual(
unicode(step.find("div", "outcome").find("p")),
"<p>{@onclick=alert(1)}paragraph</p>",
)
def test_bad_run_id_404(self):
"""Bad run id returns 404."""
url = reverse("runtests_environment", kwargs={"run_id": 9999})
self.app.get(url, user=self.user, status=404)
def test_inactive_run_redirects_to_selector(self):
"""An inactive run redirects to run selector with message."""
self.testrun.status = "draft"
self.testrun.save()
res = self.get(status=302)
self.assertRedirects(res, reverse("runtests"))
res.follow().mustcontain("not open for testing")
def test_invalid_environment_set(self):
"""If env is not valid for run, redirects to set-environment."""
self.testrun.environments.remove(self.envs[0])
res = self.get(status=302)
self.assertRedirects(
res,
reverse("runtests_environment", kwargs={"run_id": self.testrun.id})
)
def test_environment(self):
"""Environment is shown in template."""
res = self.get(status=200)
self.assertEqual(
res.html.findAll("ul", "envsettings")[0].find("li").text,
self.envs[0].elements.get().name)
def test_finder_productversions_prepopulated(self):
"""Finder is prepopulated with product versions."""
res = self.get(status=200)
finder_productversions = res.html.findAll(
"input",
id="finder-productversions-{0}".format(
self.testrun.productversion.id)
)
self.assertEqual(len(finder_productversions), 1)
self.assertIn("checked", unicode(finder_productversions[0]))
def test_finder_runs_prepopulated(self):
"""Finder is prepopulated with runs."""
res = self.get(status=200)
finder_runs = res.html.findAll(
"input", id="finder-runs-{0}".format(self.testrun.id))
self.assertEqual(len(finder_runs), 1)
self.assertIn("checked", unicode(finder_runs[0]))
def test_finder_env_form_prepopulated(self):
"""Finder env form is prepopulated."""
el = self.envs[0].elements.get()
res = self.get(status=200)
form = res.html.find("form", id="runtests-environment-form")
self.assertEqual(
form.find("option", value=str(el.id))["selected"], "selected")
def test_runcaseversions(self):
"""Lists runcaseversions."""
self.create_rcv(caseversion__name="Foo Case")
res = self.get(status=200)
res.mustcontain("Foo Case")
def test_runcaseversions_env_narrowed(self):
"""Lists only correct env runcaseversions."""
self.create_rcv(
caseversion__name="Env0 Case", environments=self.envs[:1])
self.create_rcv(
caseversion__name="Env1 Case", environments=self.envs[1:])
self.create_rcv(caseversion__name="EnvAll Case")
res = self.get(status=200)
res.mustcontain("Env0 Case")
res.mustcontain("EnvAll Case")
self.assertNotIn("Env1 Case", res)
def test_redirect_preserves_sort(self):
"""Redirect after non-Ajax post preserves sort params."""
rcv = self.create_rcv()
form = self.get(
params={"sortfield": "name"}, status=200).forms[
"test-status-form-{0}".format(rcv.id)]
res = form.submit(name="action-result_pass", index=0, status=302)
self.assertRedirects(res, self.url + "?sortfield=name")
def test_description(self):
"""Returns details HTML snippet for given caseversion"""
rcv = self.create_rcv(
caseversion__name="Foo Case",
caseversion__description="_Valmorphanize_",
)
form = self.get(status=200).forms["test-status-form-{0}".format(rcv.id)]
res = form.submit(
name="action-result_pass",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
status=200
)
res.mustcontain("<em>Valmorphanize</em>")
def test_post_no_action_redirect(self):
"""POST with no action does nothing and redirects."""
rcv = self.create_rcv()
form = self.get(status=200).forms["test-status-form-{0}".format(rcv.id)]
res = form.submit(status=302)
self.assertRedirects(res, self.url)
def test_post_no_action_ajax(self):
"""Ajax POST with no action does nothing and returns no HTML."""
rcv = self.create_rcv()
form = self.get(status=200).forms["test-status-form-{0}".format(rcv.id)]
res = form.submit(
headers={"X-Requested-With": "XMLHttpRequest"}, status=200)
self.assertEqual(res.json["html"], "")
self.assertEqual(res.json["no_replace"], True)
@patch("moztrap.view.runtests.views.ACTIONS", {})
def test_post_bad_action_redirect(self):
"""POST with bad action does nothing but message and redirects."""
rcv = self.create_rcv()
form = self.get(status=200).forms["test-status-form-{0}".format(rcv.id)]
# we patched the actions dictionary so "result_pass" will not be valid
res = form.submit(name="action-result_pass", index=0, status=302)
self.assertRedirects(res, self.url)
res.follow().mustcontain("result_pass is not a valid action")
@patch("moztrap.view.runtests.views.ACTIONS", {})
def test_post_bad_action_ajax(self):
"""Ajax POST with bad action sets message and returns no HTML."""
rcv = self.create_rcv()
form = self.get(status=200).forms["test-status-form-{0}".format(rcv.id)]
# we patched the actions dictionary so "result_pass" will not be valid
res = form.submit(
name="action-result_pass", index=0,
headers={"X-Requested-With": "XMLHttpRequest"}, status=200)
self.assertEqual(res.json["html"], "")
self.assertEqual(res.json["no_replace"], True)
self.assertEqual(
res.json["messages"][0]["message"], "result_pass is not a valid action.")
def test_post_bad_rcv_id_redirect(self):
"""POST with bad rcv id does nothing but message and redirects."""
rcv = self.create_rcv()
form = self.get(status=200).forms["test-status-form-{0}".format(rcv.id)]
rcv.delete()
res = form.submit(name="action-result_pass", index=0, status=302)
self.assertRedirects(res, self.url)
res.follow().mustcontain("is not a valid run/caseversion ID")
def test_post_bad_rcv_id_ajax(self):
"""Ajax POST with bad rcv id sets message and returns no HTML."""
rcv = self.create_rcv()
form = self.get(status=200).forms["test-status-form-{0}".format(rcv.id)]
rcv.delete()
res = form.submit(
name="action-result_pass", index=0,
headers={"X-Requested-With": "XMLHttpRequest"}, status=200)
self.assertEqual(res.json["html"], "")
self.assertEqual(res.json["no_replace"], True)
self.assertIn(
"is not a valid run/caseversion ID",
res.json["messages"][0]["message"]
)
def test_post_missing_result(self):
"""Can pass/fail/invalid a not-yet-existing result."""
result = self.create_result(status="started")
rcv = result.runcaseversion
form = self.get(status=200).forms["test-status-form-{0}".format(rcv.id)]
result.delete()
res = form.submit(name="action-result_pass", index=0, status=302)
self.assertRedirects(res, self.url)
result = rcv.results.get(tester=self.user, environment=self.envs[0])
self.assertEqual(result.status, result.STATUS.passed)
def test_post_missing_result_ajax(self):
"""Can pass/fail/invalid a not-yet-existing result via ajax."""
result = self.create_result(status="started")
rcv = result.runcaseversion
form = self.get(status=200).forms["test-status-form-{0}".format(rcv.id)]
result.delete()
res = form.submit(
name="action-result_pass", index=0,
headers={"X-Requested-With": "XMLHttpRequest"}, status=200)
self.assertElement(
res.json["html"], "button", attrs={"name": "action-start"})
def test_pass_case(self):
"""Submit a "result_pass" action for a case; redirects."""
result = self.create_result(status="started")
rcv = result.runcaseversion
form = self.get(status=200).forms["test-status-form-{0}".format(rcv.id)]
res = form.submit(name="action-result_pass", index=0, status=302)
self.assertRedirects(res, self.url)
result = rcv.results.get(
tester=self.user,
environment=self.envs[0],
is_latest=True)
self.assertEqual(result.status, result.STATUS.passed)
def test_pass_case_ajax(self):
"""Ajax post a "result_pass" action; returns HTML snippet."""
result = self.create_result(status="started")
rcv = result.runcaseversion
form = self.get(status=200).forms["test-status-form-{0}".format(rcv.id)]
res = form.submit(
name="action-result_pass",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
status=200
)
self.assertElement(
res.json["html"], "button", attrs={"name": "action-start"})
def test_invalidate_case(self):
"""Submit a "result_invalid" action for a case; redirects."""
result = self.create_result(status="started")
rcv = result.runcaseversion
form = self.get(status=200).forms[
"test-invalid-form-{0}".format(rcv.id)]
form["comment"] = "it ain't valid"
res = form.submit(
name="action-result_invalid", index=0, status=302)
self.assertRedirects(res, self.url)
result = rcv.results.get(
tester=self.user,
environment=self.envs[0],
is_latest=True)
self.assertEqual(result.status, result.STATUS.invalidated)
self.assertEqual(result.comment, "it ain't valid")
def test_invalidate_case_ajax(self):
"""Ajax post a "result_invalid" action; returns HTML snippet."""
result = self.create_result(status="started")
rcv = result.runcaseversion
form = self.get(status=200).forms[
"test-invalid-form-{0}".format(rcv.id)]
form["comment"] = "it ain't valid"
res = form.submit(
name="action-result_invalid",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
status=200
)
self.assertElement(
res.json["html"], "button", attrs={"name": "action-start"})
def test_fail_case(self):
"""Submit a "result_invalid" action for a case; redirects."""
step = self.F.CaseStepFactory.create(number=1)
rcv = self.create_rcv(caseversion=step.caseversion)
self.create_result(status="started", runcaseversion=rcv)
form = self.get(status=200).forms[
"test-fail-form-{0}-1".format(rcv.id)]
form["comment"] = "it didn't pass"
res = form.submit(
name="action-result_fail", index=0, status=302)
self.assertRedirects(res, self.url)
result = rcv.results.get(
tester=self.user,
environment=self.envs[0],
is_latest=True)
self.assertEqual(result.status, result.STATUS.failed)
self.assertEqual(result.comment, "it didn't pass")
def test_fail_case_ajax(self):
"""Ajax post a "result_invalid" action; returns HTML snippet."""
step = self.F.CaseStepFactory.create(number=1)
rcv = self.create_rcv(caseversion=step.caseversion)
self.create_result(status="started", runcaseversion=rcv)
form = self.get(status=200).forms[
"test-fail-form-{0}-1".format(rcv.id)]
form["comment"] = "it didn't pass"
res = form.submit(
name="action-result_fail",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
status=200
)
self.assertElement(
res.json["html"], "button", attrs={"name": "action-start"})
def test_restart_case(self):
"""Submit a "restart" action for a case; redirects."""
result = self.create_result(status="passed")
rcv = result.runcaseversion
form = self.get(status=200).forms["restart-form-{0}".format(rcv.id)]
res = form.submit(name="action-start", index=0, status=302)
self.assertRedirects(res, self.url)
result = rcv.results.get(
tester=self.user,
environment=self.envs[0],
is_latest=True,
)
self.assertEqual(result.status, result.STATUS.started)
def test_restart_case_ajax(self):
"""Ajax post a "restart" action; returns HTML snippet."""
result = self.create_result(status="passed")
rcv = result.runcaseversion
form = self.get(status=200).forms["restart-form-{0}".format(rcv.id)]
res = form.submit(
name="action-start",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
status=200
)
self.assertElement(
res.json["html"], "button", attrs={"name": "action-result_pass"})
def test_parameter_defaults(self):
"""Action parameters have defaults and are not required."""
result = self.create_result(status="started")
rcv = result.runcaseversion
form = self.get(status=200).forms[
"test-invalid-form-{0}".format(rcv.id)]
# prevents any comment parameter from being submitted
del form.fields["comment"]
res = form.submit(name="action-result_invalid", index=0, status=302)
self.assertRedirects(res, self.url)
result = rcv.results.get(is_latest=True)
self.assertEqual(result.status, result.STATUS.invalidated)
self.assertEqual(result.comment, "")
| 31.234681
| 88
| 0.598072
|
75f7acd92c656d4c518b85b32a58b8441315759f
| 117
|
py
|
Python
|
ALS/__init__.py
|
np179/ALSCPD
|
86fe4743585bb80867bfb7ff61c060c22037de74
|
[
"MIT"
] | null | null | null |
ALS/__init__.py
|
np179/ALSCPD
|
86fe4743585bb80867bfb7ff61c060c22037de74
|
[
"MIT"
] | null | null | null |
ALS/__init__.py
|
np179/ALSCPD
|
86fe4743585bb80867bfb7ff61c060c22037de74
|
[
"MIT"
] | null | null | null |
__all__ = ['dvr', 'h2o', 'ALS1D', 'ALS2D', 'twoDsub', 'tracker', 'MonteC', 'ALSclass', 'potentials']
from . import *
| 39
| 100
| 0.606838
|
455233aade18dc0076ddca73c44fda9743df9038
| 4,577
|
py
|
Python
|
setup.py
|
makkes/dcos
|
a6df70f3f58ead134c8c49af8fa1387b4f81c19c
|
[
"Apache-2.0"
] | 1
|
2019-10-01T14:53:09.000Z
|
2019-10-01T14:53:09.000Z
|
setup.py
|
makkes/dcos
|
a6df70f3f58ead134c8c49af8fa1387b4f81c19c
|
[
"Apache-2.0"
] | 1
|
2020-03-19T18:01:32.000Z
|
2020-03-19T18:01:32.000Z
|
setup.py
|
makkes/dcos
|
a6df70f3f58ead134c8c49af8fa1387b4f81c19c
|
[
"Apache-2.0"
] | 1
|
2019-09-02T11:45:32.000Z
|
2019-09-02T11:45:32.000Z
|
from pathlib import Path
from setuptools import setup
def get_advanced_templates():
template_base = 'aws/templates/advanced/'
template_names = ['advanced-master', 'advanced-priv-agent', 'advanced-pub-agent', 'infra', 'zen']
return [template_base + name + '.json' for name in template_names]
# These files are expected source files to the dcos-builder docker.
# They need to match the contents of ./pkgpanda/docker/dcos-builder/*
# exactly otherwise the dcos-builder docker will have a different sha1
# checksum calculated during when the ./release script is run.
# That leads to cached packages hashes being different from what
# is cached in S3 and prevents us from building DC/OS locally.
expected_dcos_builder_files = [
Path('docker/dcos-builder/Dockerfile'),
Path('docker/dcos-builder/README.md'),
]
dcos_builder_files = [f.relative_to(Path("./pkgpanda")) for f in Path("./pkgpanda").glob('docker/**/*') if f.is_file()]
if set(expected_dcos_builder_files) != set(dcos_builder_files):
raise Exception('Expected ./pkgpanda/docker/dcos-builder to contain {} but it had {}'.format(
expected_dcos_builder_files, dcos_builder_files))
setup(
name='dcos_image',
version='0.1',
description='DC/OS cluster configuration, assembly, and maintenance code',
url='https://dcos.io',
author='Mesosphere, Inc.',
author_email='help@dcos.io',
license='apache2',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
packages=[
'dcos_installer',
'gen',
'gen.build_deploy',
'pkgpanda',
'pkgpanda.build',
'pkgpanda.http',
'release',
'release.storage'],
install_requires=[
# DCOS-21656 - `botocore`` requires less than 2.7.0 while
# `analytics-python` package installs 2.7.0 version
'python-dateutil>=2.1,<2.7.0',
'aiohttp==0.22.5',
'analytics-python',
'coloredlogs',
'Flask',
'flask-compress',
'urllib3==1.24.2',
'chardet',
'PyJWT',
# Pins taken from 'azure==2.0.0rc4'
'msrest==0.4.17',
'msrestazure==0.4.15',
'azure-common==1.1.4',
'azure-storage==0.32.0',
'azure-mgmt-network==0.30.0rc4',
'azure-mgmt-resource==0.30.0rc4',
'botocore',
'boto3',
'checksumdir',
'coloredlogs',
'docopt',
'passlib',
'py',
'pytest',
'pyyaml',
'responses',
'requests==2.20.1',
'retrying',
'schema',
'wheel==0.33.1',
'keyring==9.1', # FIXME: pin keyring to prevent dbus dep
'teamcity-messages'],
entry_points={
'console_scripts': [
'release=release:main',
'pkgpanda=pkgpanda.cli:main',
'mkpanda=pkgpanda.build.cli:main',
'dcos_installer=dcos_installer.cli:main',
],
},
package_data={
'gen': [
'ip-detect/aws.sh',
'ip-detect/aws6.sh',
'ip-detect/aws_public.sh',
'ip-detect/azure.sh',
'ip-detect/azure6.sh',
'ip-detect/vagrant.sh',
'ip-detect/vagrant6.sh',
'fault-domain-detect/cloud.sh',
'fault-domain-detect/aws.sh',
'fault-domain-detect/azure.sh',
'cloud-config.yaml',
'cloud-config-windows.yaml',
'dcos-config.yaml',
'dcos-config-windows.yaml',
'dcos-metadata.yaml',
'dcos-services.yaml',
'dcos-services-windows.yaml',
'aws/dcos-config.yaml',
'aws/templates/aws.html',
'aws/templates/cloudformation.json',
'azure/cloud-config.yaml',
'azure/cloud-config-windows.yaml',
'azure/azuredeploy-parameters.json',
'azure/templates/acs.json',
'azure/templates/azure.html',
'azure/templates/azuredeploy.json',
'build_deploy/bash/dcos_generate_config.sh.in',
'build_deploy/bash/Dockerfile.in',
'build_deploy/bash/installer_internal_wrapper.in',
'build_deploy/bash/dcos-launch.spec',
'coreos-aws/cloud-config.yaml',
'coreos/cloud-config.yaml'
] + get_advanced_templates(),
'pkgpanda': [str(f) for f in expected_dcos_builder_files],
},
zip_safe=False
)
| 34.156716
| 119
| 0.58357
|
4a7efd88767fb918ffc36be328a839f48925530e
| 29,650
|
py
|
Python
|
great_expectations/util.py
|
jaidparmar/great_expectations
|
dac57e3638a0ca2c7adc62a8c0cee8525ea706a5
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/util.py
|
jaidparmar/great_expectations
|
dac57e3638a0ca2c7adc62a8c0cee8525ea706a5
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/util.py
|
jaidparmar/great_expectations
|
dac57e3638a0ca2c7adc62a8c0cee8525ea706a5
|
[
"Apache-2.0"
] | null | null | null |
import copy
import importlib
import json
import logging
import os
import time
from collections import OrderedDict
from functools import wraps
from gc import get_referrers
from inspect import (
ArgInfo,
BoundArguments,
Parameter,
Signature,
currentframe,
getargvalues,
getclosurevars,
getmodule,
signature,
)
from pathlib import Path
from types import CodeType, FrameType, ModuleType
from typing import Any, Callable, Optional, Union
import black
from pkg_resources import Distribution
from great_expectations.core.expectation_suite import expectationSuiteSchema
from great_expectations.exceptions import (
PluginClassNotFoundError,
PluginModuleNotFoundError,
)
try:
# This library moved in python 3.8
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
# Fallback for python < 3.8
import importlib_metadata
logger = logging.getLogger(__name__)
def measure_execution_time(func: Callable = None) -> Callable:
@wraps(func)
def compute_delta_t(*args, **kwargs) -> Callable:
time_begin: int = int(round(time.time() * 1000))
try:
return func(*args, **kwargs)
finally:
time_end: int = int(round(time.time() * 1000))
delta_t: int = time_end - time_begin
bound_args: BoundArguments = signature(func).bind(*args, **kwargs)
call_args: OrderedDict = bound_args.arguments
print(
f"Total execution time of function {func.__name__}({str(dict(call_args))}): {delta_t} ms."
)
return compute_delta_t
# noinspection SpellCheckingInspection
def get_project_distribution() -> Union[Distribution, None]:
ditr: Distribution
for distr in importlib_metadata.distributions():
relative_path: Path
try:
relative_path = Path(__file__).relative_to(distr.locate_file(""))
except ValueError:
pass
else:
if relative_path in distr.files:
return distr
return None
# Returns the object reference to the currently running function (i.e., the immediate function under execution).
def get_currently_executing_function() -> Callable:
cf: FrameType = currentframe()
fb: FrameType = cf.f_back
fc: CodeType = fb.f_code
func_obj: Callable = [
referer
for referer in get_referrers(fc)
if getattr(referer, "__code__", None) is fc
and getclosurevars(referer).nonlocals.items() <= fb.f_locals.items()
][0]
return func_obj
# noinspection SpellCheckingInspection
def get_currently_executing_function_call_arguments(
include_module_name: bool = False, include_caller_names: bool = False, **kwargs
) -> dict:
cf: FrameType = currentframe()
fb: FrameType = cf.f_back
argvs: ArgInfo = getargvalues(fb)
fc: CodeType = fb.f_code
cur_func_obj: Callable = [
referer
for referer in get_referrers(fc)
if getattr(referer, "__code__", None) is fc
and getclosurevars(referer).nonlocals.items() <= fb.f_locals.items()
][0]
cur_mod = getmodule(cur_func_obj)
sig: Signature = signature(cur_func_obj)
params: dict = {}
var_positional: dict = {}
var_keyword: dict = {}
for key, param in sig.parameters.items():
val: Any = argvs.locals[key]
params[key] = val
if param.kind == Parameter.VAR_POSITIONAL:
var_positional[key] = val
elif param.kind == Parameter.VAR_KEYWORD:
var_keyword[key] = val
bound_args: BoundArguments = sig.bind(**params)
call_args: OrderedDict = bound_args.arguments
call_args_dict: dict = dict(call_args)
for key, value in var_positional.items():
call_args_dict[key] = value
for key, value in var_keyword.items():
call_args_dict.pop(key)
call_args_dict.update(value)
if include_module_name:
call_args_dict.update({"module_name": cur_mod.__name__})
if not include_caller_names:
if call_args.get("cls"):
call_args_dict.pop("cls", None)
if call_args.get("self"):
call_args_dict.pop("self", None)
call_args_dict.update(**kwargs)
return call_args_dict
def verify_dynamic_loading_support(module_name: str, package_name: str = None) -> None:
"""
:param module_name: a possibly-relative name of a module
:param package_name: the name of a package, to which the given module belongs
"""
try:
module_spec: importlib.machinery.ModuleSpec = importlib.util.find_spec(
module_name, package=package_name
)
except ModuleNotFoundError:
module_spec = None
if not module_spec:
if not package_name:
package_name = ""
message: str = f"""No module named "{package_name + module_name}" could be found in the repository. Please \
make sure that the file, corresponding to this package and module, exists and that dynamic loading of code modules, \
templates, and assets is supported in your execution environment. This error is unrecoverable.
"""
raise FileNotFoundError(message)
def import_library_module(module_name: str) -> Union[ModuleType, None]:
"""
:param module_name: a fully-qualified name of a module (e.g., "great_expectations.dataset.sqlalchemy_dataset")
:return: raw source code of the module (if can be retrieved)
"""
module_obj: Union[ModuleType, None]
try:
module_obj = importlib.import_module(module_name)
except ImportError:
module_obj = None
return module_obj
def is_library_loadable(library_name: str) -> bool:
module_obj: Union[ModuleType, None] = import_library_module(
module_name=library_name
)
return module_obj is not None
def load_class(class_name, module_name):
try:
verify_dynamic_loading_support(module_name=module_name)
except FileNotFoundError:
raise PluginModuleNotFoundError(module_name)
module_obj: Union[ModuleType, None] = import_library_module(module_name=module_name)
if module_obj is None:
raise PluginModuleNotFoundError(module_name)
try:
klass_ = getattr(module_obj, class_name)
except AttributeError:
raise PluginClassNotFoundError(module_name=module_name, class_name=class_name)
return klass_
def _convert_to_dataset_class(df, dataset_class, expectation_suite=None, profiler=None):
"""
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite
Args:
df: the DataFrame object to convert
dataset_class: the class to which to convert the existing DataFrame
expectation_suite: the expectation suite that should be attached to the resulting dataset
profiler: the profiler to use to generate baseline expectations, if any
Returns:
A new Dataset object
"""
if expectation_suite is not None:
# Create a dataset of the new class type, and manually initialize expectations according to
# the provided expectation suite
new_df = dataset_class.from_dataset(df)
new_df._initialize_expectations(expectation_suite)
else:
# Instantiate the new Dataset with default expectations
new_df = dataset_class.from_dataset(df)
if profiler is not None:
new_df.profile(profiler)
return new_df
def _load_and_convert_to_dataset_class(
df, class_name, module_name, expectation_suite=None, profiler=None
):
"""
Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite
Args:
df: the DataFrame object to convert
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
expectation_suite: the expectation suite that should be attached to the resulting dataset
profiler: the profiler to use to generate baseline expectations, if any
Returns:
A new Dataset object
"""
verify_dynamic_loading_support(module_name=module_name)
dataset_class = load_class(class_name, module_name)
return _convert_to_dataset_class(df, dataset_class, expectation_suite, profiler)
def read_csv(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_csv and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
df = pd.read_csv(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
)
def read_json(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
accessor_func=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_json and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
accessor_func (Callable): functions to transform the json object in the file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
if accessor_func is not None:
json_obj = json.load(open(filename, "rb"))
json_obj = accessor_func(json_obj)
df = pd.read_json(json.dumps(json_obj), *args, **kwargs)
else:
df = pd.read_json(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
)
def read_excel(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_excel and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset or ordered dict of great_expectations datasets,
if multiple worksheets are imported
"""
import pandas as pd
df = pd.read_excel(filename, *args, **kwargs)
if dataset_class is None:
verify_dynamic_loading_support(module_name=module_name)
dataset_class = load_class(class_name=class_name, module_name=module_name)
if isinstance(df, dict):
for key in df:
df[key] = _convert_to_dataset_class(
df=df[key],
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
df = _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
return df
def read_table(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_table and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
df = pd.read_table(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
)
def read_feather(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_feather and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
df = pd.read_feather(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
)
def read_parquet(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_parquet and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
df = pd.read_parquet(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
)
def from_pandas(
pandas_df,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
):
"""Read a Pandas data frame and return a great_expectations dataset.
Args:
pandas_df (Pandas df): Pandas data frame
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string) = None: path to great_expectations expectation suite file
profiler (profiler class) = None: The profiler that should
be run on the dataset to establish a baseline expectation suite.
Returns:
great_expectations dataset
"""
if dataset_class is not None:
return _convert_to_dataset_class(
df=pandas_df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=pandas_df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
)
def read_pickle(
filename,
class_name="PandasDataset",
module_name="great_expectations.dataset",
dataset_class=None,
expectation_suite=None,
profiler=None,
*args,
**kwargs,
):
"""Read a file using Pandas read_pickle and return a great_expectations dataset.
Args:
filename (string): path to file to read
class_name (str): class to which to convert resulting Pandas df
module_name (str): dataset module from which to try to dynamically load the relevant module
dataset_class (Dataset): If specified, the class to which to convert the resulting Dataset object;
if not specified, try to load the class named via the class_name and module_name parameters
expectation_suite (string): path to great_expectations expectation suite file
profiler (Profiler class): profiler to use when creating the dataset (default is None)
Returns:
great_expectations dataset
"""
import pandas as pd
df = pd.read_pickle(filename, *args, **kwargs)
if dataset_class is not None:
return _convert_to_dataset_class(
df=df,
dataset_class=dataset_class,
expectation_suite=expectation_suite,
profiler=profiler,
)
else:
return _load_and_convert_to_dataset_class(
df=df,
class_name=class_name,
module_name=module_name,
expectation_suite=expectation_suite,
profiler=profiler,
)
def validate(
data_asset,
expectation_suite=None,
data_asset_name=None,
expectation_suite_name=None,
data_context=None,
data_asset_class_name=None,
data_asset_module_name="great_expectations.dataset",
data_asset_class=None,
*args,
**kwargs,
):
"""Validate the provided data asset. Validate can accept an optional data_asset_name to apply, data_context to use
to fetch an expectation_suite if one is not provided, and data_asset_class_name/data_asset_module_name or
data_asset_class to use to provide custom expectations.
Args:
data_asset: the asset to validate
expectation_suite: the suite to use, or None to fetch one using a DataContext
data_asset_name: the name of the data asset to use
expectation_suite_name: the name of the expectation_suite to use
data_context: data context to use to fetch an an expectation suite, or the path from which to obtain one
data_asset_class_name: the name of a class to dynamically load a DataAsset class
data_asset_module_name: the name of the module to dynamically load a DataAsset class
data_asset_class: a class to use. overrides data_asset_class_name/ data_asset_module_name if provided
*args:
**kwargs:
Returns:
"""
# Get an expectation suite if not provided
if expectation_suite is None and data_context is None:
raise ValueError(
"Either an expectation suite or a DataContext is required for validation."
)
if expectation_suite is None:
logger.info("Using expectation suite from DataContext.")
# Allow data_context to be a string, and try loading it from path in that case
if isinstance(data_context, str):
from great_expectations.data_context import DataContext
data_context = DataContext(data_context)
expectation_suite = data_context.get_expectation_suite(
expectation_suite_name=expectation_suite_name
)
else:
if isinstance(expectation_suite, dict):
expectation_suite = expectationSuiteSchema.load(expectation_suite)
if data_asset_name is not None:
raise ValueError(
"When providing an expectation suite, data_asset_name cannot also be provided."
)
if expectation_suite_name is not None:
raise ValueError(
"When providing an expectation suite, expectation_suite_name cannot also be provided."
)
logger.info(
"Validating data_asset_name %s with expectation_suite_name %s"
% (data_asset_name, expectation_suite.expectation_suite_name)
)
# If the object is already a DataAsset type, then this is purely a convenience method
# and no conversion is needed; try to run validate on the given object
if data_asset_class_name is None and data_asset_class is None:
return data_asset.validate(
expectation_suite=expectation_suite,
data_context=data_context,
*args,
**kwargs,
)
# Otherwise, try to convert and validate the dataset
if data_asset_class is None:
verify_dynamic_loading_support(module_name=data_asset_module_name)
data_asset_class = load_class(data_asset_class_name, data_asset_module_name)
import pandas as pd
from great_expectations.dataset import Dataset, PandasDataset
if data_asset_class is None:
# Guess the GE data_asset_type based on the type of the data_asset
if isinstance(data_asset, pd.DataFrame):
data_asset_class = PandasDataset
# Add other data_asset_type conditions here as needed
# Otherwise, we will convert for the user to a subclass of the
# existing class to enable new expectations, but only for datasets
if not isinstance(data_asset, (Dataset, pd.DataFrame)):
raise ValueError(
"The validate util method only supports dataset validations, including custom subclasses. For other data "
"asset types, use the object's own validate method."
)
if not issubclass(type(data_asset), data_asset_class):
if isinstance(data_asset, pd.DataFrame) and issubclass(
data_asset_class, PandasDataset
):
pass # This is a special type of allowed coercion
else:
raise ValueError(
"The validate util method only supports validation for subtypes of the provided data_asset_type."
)
data_asset_ = _convert_to_dataset_class(
data_asset, dataset_class=data_asset_class, expectation_suite=expectation_suite
)
return data_asset_.validate(*args, data_context=data_context, **kwargs)
# https://stackoverflow.com/questions/9727673/list-directory-tree-structure-in-python
def gen_directory_tree_str(startpath):
"""Print the structure of directory as a tree:
Ex:
project_dir0/
AAA/
BBB/
aaa.txt
bbb.txt
#Note: files and directories are sorted alphabetically, so that this method can be used for testing.
"""
output_str = ""
tuples = list(os.walk(startpath))
tuples.sort()
for root, dirs, files in tuples:
level = root.replace(startpath, "").count(os.sep)
indent = " " * 4 * level
output_str += "{}{}/\n".format(indent, os.path.basename(root))
subindent = " " * 4 * (level + 1)
files.sort()
for f in files:
output_str += "{}{}\n".format(subindent, f)
return output_str
def lint_code(code):
"""Lint strings of code passed in."""
black_file_mode = black.FileMode()
if not isinstance(code, str):
raise TypeError
try:
linted_code = black.format_file_contents(code, fast=True, mode=black_file_mode)
return linted_code
except (black.NothingChanged, RuntimeError):
return code
def filter_properties_dict(
properties: dict,
keep_fields: Optional[list] = None,
delete_fields: Optional[list] = None,
clean_empty: Optional[bool] = True,
inplace: Optional[bool] = False,
) -> Optional[dict]:
"""Filter the entries of the source dictionary according to directives concerning the existing keys and values.
Args:
properties: source dictionary to be filtered according to the supplied filtering directives
keep_fields: list of keys that must be retained, with the understanding that all other entries will be deleted
delete_fields: list of keys that must be deleted, with the understanding that all other entries will be retained
clean_empty: If True, then in addition to other filtering directives, delete entries, whose values are Falsy
inplace: If True, then modify the source properties dictionary; otherwise, make a copy for filtering purposes
Returns:
The (possibly) filtered properties dictionary (or None if no entries remain after filtering is performed)
"""
if keep_fields and delete_fields:
raise ValueError(
"Only one of keep_fields and delete_fields filtering directives can be specified."
)
if not inplace:
properties = copy.deepcopy(properties)
keys_for_deletion: list = []
if keep_fields:
keys_for_deletion.extend(
[key for key, value in properties.items() if key not in keep_fields]
)
if delete_fields:
keys_for_deletion.extend(
[key for key, value in properties.items() if key in delete_fields]
)
if clean_empty:
keys_for_deletion.extend(
[
key
for key, value in properties.items()
if not (
(keep_fields and key in keep_fields)
or is_numeric(value=value)
or value
)
]
)
keys_for_deletion = list(set(keys_for_deletion))
for key in keys_for_deletion:
del properties[key]
if inplace:
return None
return properties
def is_numeric(value: Any) -> bool:
return value is not None and (is_int(value) or is_float(value))
def is_int(value: Any) -> bool:
try:
num: int = int(value)
except (TypeError, ValueError):
return False
return True
def is_float(value: Any) -> bool:
try:
num: float = float(value)
except (TypeError, ValueError):
return False
return True
def get_context():
from great_expectations.data_context.data_context_v3 import DataContextV3
return DataContextV3()
| 34.51688
| 120
| 0.677504
|
84fc5ea5c2e9d002eb569aea68fce38273ca34ec
| 1,888
|
py
|
Python
|
vk/constants.py
|
fossabot/vk.py
|
94d5c719eb8da6d778d2be208038c447971d5cff
|
[
"MIT"
] | null | null | null |
vk/constants.py
|
fossabot/vk.py
|
94d5c719eb8da6d778d2be208038c447971d5cff
|
[
"MIT"
] | null | null | null |
vk/constants.py
|
fossabot/vk.py
|
94d5c719eb8da6d778d2be208038c447971d5cff
|
[
"MIT"
] | null | null | null |
"""
A file which contains all project constants.
"""
from vk.utils.json import AbstractJsonLibrary
from vk.utils.json import JsonLibrary
API_VERSION: str = "5.101" # current api version https://vk.com/dev/versions
API_LINK: str = "https://api.vk.com/method/" # link to access API
try:
import orjson # noqa
except ImportError:
orjson = None
try:
import ujson # noqa
except ImportError:
ujson = None
if not (ujson or orjson):
import json
else:
json = None
_JSONLIB: AbstractJsonLibrary = [lib for lib in [orjson, ujson, json] if lib][0] # noqa
JSON_LIBRARY = JsonLibrary(_JSONLIB)
def default_rules() -> dict:
"""
Build and return dict of default handlers rules.
:return:
"""
from vk.bot_framework.rules.rules import (
Commands,
Text,
Payload,
ChatAction,
DataCheck,
MessageCountArgs,
MessageArgsValidate,
InPersonalMessages,
InChat,
FromBot,
WithReplyMessage,
WithFwdMessages,
CountFwdMessages,
Regex,
)
_default_rules: dict = {
"commands": Commands,
"text": Text,
"payload": Payload,
"chat_action": ChatAction,
"data_check": DataCheck,
"count_args": MessageCountArgs,
"have_args": MessageArgsValidate,
"in_chat": InChat,
"in_pm": InPersonalMessages,
"from_bot": FromBot,
"with_reply_message": WithReplyMessage,
"with_fwd_messages": WithFwdMessages,
"count_fwd_messages": CountFwdMessages,
"regex": Regex,
}
return _default_rules
def default_extensions() -> dict:
"""
Build and return dict of default dispatcher extensions
:return:
"""
from vk.bot_framework.extensions import Polling
_default_extensions: dict = {"polling": Polling}
return _default_extensions
| 23.6
| 88
| 0.637712
|
932cae6feac9139ad3b25923cd557894522fed02
| 6,034
|
py
|
Python
|
examples/mnist/tests/test_generate_mnist_dataset.py
|
cclauss/petastorm
|
12fc6542005c6dc7c99997604b939536cca79fa9
|
[
"Apache-2.0"
] | null | null | null |
examples/mnist/tests/test_generate_mnist_dataset.py
|
cclauss/petastorm
|
12fc6542005c6dc7c99997604b939536cca79fa9
|
[
"Apache-2.0"
] | null | null | null |
examples/mnist/tests/test_generate_mnist_dataset.py
|
cclauss/petastorm
|
12fc6542005c6dc7c99997604b939536cca79fa9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import numpy as np
import pytest
import torch # pylint: disable=unused-import
import examples.mnist.pytorch_example as pytorch_example
import examples.mnist.tf_example as tf_example
from examples.mnist.generate_petastorm_mnist import download_mnist_data, \
mnist_data_to_petastorm_dataset
from petastorm.reader import Reader
from petastorm.workers_pool.dummy_pool import DummyPool
logging.basicConfig(level=logging.INFO)
# Set test image sizes and number of mock nouns/variants
MOCK_IMAGE_SIZE = (28, 28)
MOCK_IMAGE_3DIM_SIZE = (28, 28, 1)
SMALL_MOCK_IMAGE_COUNT = {
'train': 30,
'test': 5
}
LARGE_MOCK_IMAGE_COUNT = {
'train': 600,
'test': 100
}
class MockDataObj(object):
""" Wraps a mock image array and provide a needed getdata() interface function. """
def __init__(self, a):
self.a = a
def getdata(self):
return self.a
def _mock_mnist_data(mock_spec):
"""
Creates a mock data dictionary with train and test sets, each containing 5 mock pairs:
``(random images, random digit)``.
"""
bogus_data = {
'train': [],
'test': []
}
for dset, data in bogus_data.items():
for _ in range(mock_spec[dset]):
pair = (
MockDataObj(np.random.randint(0, 255, size=MOCK_IMAGE_SIZE, dtype=np.uint8)), np.random.randint(0, 9))
data.append(pair)
return bogus_data
@pytest.fixture(scope="session")
def small_mock_mnist_data():
return _mock_mnist_data(SMALL_MOCK_IMAGE_COUNT)
@pytest.fixture(scope="session")
def large_mock_mnist_data():
return _mock_mnist_data(LARGE_MOCK_IMAGE_COUNT)
@pytest.fixture(scope="session")
def generate_mnist_dataset(small_mock_mnist_data, tmpdir_factory):
# Using parquet_files_count to speed up the test
path = tmpdir_factory.mktemp('data').strpath
dataset_url = 'file://{}'.format(path)
mnist_data_to_petastorm_dataset(path, dataset_url, mnist_data=small_mock_mnist_data,
spark_master='local[1]', parquet_files_count=1)
return path
def test_image_to_numpy(small_mock_mnist_data):
log = logging.getLogger('test_image_to_numpy')
""" Show output of image object reshaped as numpy array """
im = small_mock_mnist_data['train'][0]
log.debug(im)
log.debug(im[1])
assert 0 <= im[1] <= 9
log.debug(im[0].getdata())
assert im[0].getdata().shape == MOCK_IMAGE_SIZE
np.set_printoptions(linewidth=200)
reshaped = np.array(list(im[0].getdata()), dtype=np.uint8).reshape(MOCK_IMAGE_3DIM_SIZE)
log.debug(reshaped)
assert reshaped.shape == MOCK_IMAGE_3DIM_SIZE
def test_mnist_download(tmpdir):
""" Demonstrates that MNIST download works, using only the 'test' data. Assumes data does not change often. """
o = download_mnist_data(tmpdir, train=False)
assert 10000 == len(o)
assert o[0][1] == 7
assert o[len(o) - 1][1] == 6
def test_generate_mnist_dataset(generate_mnist_dataset):
train_path = os.path.join(generate_mnist_dataset, 'train')
assert os.path.exists(train_path)
assert os.path.exists(os.path.join(train_path, '_common_metadata'))
assert os.path.exists(os.path.join(train_path, '_metadata'))
test_path = os.path.join(generate_mnist_dataset, 'test')
assert os.path.exists(test_path)
assert os.path.exists(os.path.join(test_path, '_common_metadata'))
assert os.path.exists(os.path.join(test_path, '_metadata'))
def test_read_mnist_dataset(generate_mnist_dataset):
# Verify both datasets via a reader
for dset in SMALL_MOCK_IMAGE_COUNT.keys():
with Reader('file://{}/{}'.format(generate_mnist_dataset, dset), reader_pool=DummyPool()) as reader:
assert len(reader) == SMALL_MOCK_IMAGE_COUNT[dset]
def test_full_pytorch_example(large_mock_mnist_data, tmpdir):
# First, generate mock dataset
dataset_url = 'file://{}'.format(tmpdir)
mnist_data_to_petastorm_dataset(tmpdir, dataset_url, mnist_data=large_mock_mnist_data,
spark_master='local[1]', parquet_files_count=1)
# Next, run a round of training using the pytorce adapting data loader
from petastorm.pytorch import DataLoader
torch.manual_seed(1)
device = torch.device('cpu')
model = pytorch_example.Net().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
with DataLoader(Reader('{}/train'.format(dataset_url), reader_pool=DummyPool(), num_epochs=1),
batch_size=32, transform=pytorch_example._transform_row) as train_loader:
pytorch_example.train(model, device, train_loader, 10, optimizer, 1)
with DataLoader(Reader('{}/test'.format(dataset_url), reader_pool=DummyPool(), num_epochs=1),
batch_size=100, transform=pytorch_example._transform_row) as test_loader:
pytorch_example.test(model, device, test_loader)
def test_full_tf_example(large_mock_mnist_data, tmpdir):
# First, generate mock dataset
dataset_url = 'file://{}'.format(tmpdir)
mnist_data_to_petastorm_dataset(tmpdir, dataset_url, mnist_data=large_mock_mnist_data,
spark_master='local[1]', parquet_files_count=1)
# Tensorflow train and test
tf_example.train_and_test(
dataset_url=dataset_url,
training_iterations=10,
batch_size=10,
evaluation_interval=10,
)
| 34.878613
| 115
| 0.708982
|
55fa6cc2808021f59d7c485685c587139a47a63d
| 3,466
|
py
|
Python
|
src/scraper/helpers.py
|
open-austin/Odyssey-Court-Records-to-JSON
|
2fc8bfaceece02ed34aeaa387a686f7a1a43ed3b
|
[
"MIT"
] | 5
|
2022-02-06T23:26:20.000Z
|
2022-03-23T05:50:00.000Z
|
src/scraper/helpers.py
|
open-austin/Odyssey-Court-Records-to-JSON
|
2fc8bfaceece02ed34aeaa387a686f7a1a43ed3b
|
[
"MIT"
] | 11
|
2022-02-07T00:25:36.000Z
|
2022-03-31T23:07:52.000Z
|
src/scraper/helpers.py
|
open-austin/Odyssey-Court-Records-to-JSON
|
2fc8bfaceece02ed34aeaa387a686f7a1a43ed3b
|
[
"MIT"
] | 1
|
2022-03-13T22:29:40.000Z
|
2022-03-13T22:29:40.000Z
|
import os, sys
import requests
from time import sleep
from logging import Logger
from typing import Dict, Optional, Tuple, Literal
from enum import Enum
def write_debug_and_quit(
page_text: str, logger: Logger, verification_text: Optional[str] = None
) -> None:
logger.error(
(
f"{verification_text} could not be found in page."
if verification_text
else "Failed to load page."
)
+ f" Aborting. Writing /data/debug.html with response. May not be HTML."
)
with open(os.path.join("data", "debug.html"), "w") as file_handle:
file_handle.write(page_text)
sys.exit(1)
# helper function to make form data
def create_search_form_data(
date: str, JO_id: str, hidden_values: Dict[str, str], odyssey_version: int
) -> Dict[str, str]:
form_data = {}
form_data.update(hidden_values)
if odyssey_version < 2017:
form_data.update(
{
"SearchBy": "3",
"cboJudOffc": JO_id,
"DateSettingOnAfter": date,
"DateSettingOnBefore": date,
"SearchType": "JUDOFFC", # Search by Judicial Officer
"SearchMode": "JUDOFFC",
"CaseCategories": "CR", # "CR,CV,FAM,PR" criminal, civil, family, probate and mental health - these are the options
}
)
else:
form_data.update(
{
"SearchCriteria.SelectedHearingType": "Criminal Hearing Types",
"SearchCriteria.SearchByType": "JudicialOfficer",
"SearchCriteria.SelectedJudicialOfficer": JO_id,
"SearchCriteria.DateFrom": date,
"SearchCriteria.DateTo": date,
}
)
return form_data
class HTTPMethod(Enum):
POST: int = 1
GET: int = 2
def request_page_with_retry(
session: requests.Session,
url: str,
logger: Logger,
verification_text: Optional[str] = None,
http_method: Literal[HTTPMethod.POST, HTTPMethod.GET] = HTTPMethod.POST,
params: Dict[str, str] = {},
data: Optional[Dict[str, str]] = None,
max_retries: int = 5,
ms_wait: str = 200,
) -> Tuple[str, bool]:
response = None
for i in range(max_retries):
sleep(ms_wait / 1000 * (i + 1))
failed = False
try:
if http_method == HTTPMethod.POST:
if not data:
response = session.post(url, params=params)
else:
response = session.post(url, data=data, params=params)
elif http_method == HTTPMethod.GET:
if not data:
response = session.get(url, params=params)
else:
response = session.get(url, data=data, params=params)
response.raise_for_status()
if verification_text:
if verification_text not in response.text:
failed = True
logger.error(
f"Verification text {verification_text} not in response"
)
except requests.RequestException as e:
logger.exception(f"Failed to get url {url}, try {i}")
failed = True
if failed:
write_debug_and_quit(
verification_text=verification_text,
page_text=response.text,
logger=logger,
)
return response.text
| 33.326923
| 132
| 0.565782
|
c3cc4a8dcf9d9e5ada33c2a69d610435fbb8377d
| 177
|
py
|
Python
|
initpylib/templates_common/_PLEASE_PYPROJECT_NAME_/__PLEASE_PYPROJECT_NAME_.py
|
kirin123kirin/initpylib_capi
|
48442f10fbbbbd6d3caad2517a3cbccfe541ca37
|
[
"MIT"
] | null | null | null |
initpylib/templates_common/_PLEASE_PYPROJECT_NAME_/__PLEASE_PYPROJECT_NAME_.py
|
kirin123kirin/initpylib_capi
|
48442f10fbbbbd6d3caad2517a3cbccfe541ca37
|
[
"MIT"
] | null | null | null |
initpylib/templates_common/_PLEASE_PYPROJECT_NAME_/__PLEASE_PYPROJECT_NAME_.py
|
kirin123kirin/initpylib_capi
|
48442f10fbbbbd6d3caad2517a3cbccfe541ca37
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def _PLEASE_PYPROJECT_NAME_(word=None):
print(word)
if __name__ == "__main__":
_PLEASE_PYPROJECT_NAME_("hello world!")
| 17.7
| 43
| 0.677966
|
efb0ae99c05f0b0f05a9ef761ff56a696b5fa9f1
| 2,516
|
py
|
Python
|
test/functional/p2p_blocksonly.py
|
estxcoin/estcore
|
4398b1d944373fe25668469966fa2660da454279
|
[
"MIT"
] | 1
|
2019-09-17T07:53:52.000Z
|
2019-09-17T07:53:52.000Z
|
test/functional/p2p_blocksonly.py
|
estxcoin/estcore
|
4398b1d944373fe25668469966fa2660da454279
|
[
"MIT"
] | null | null | null |
test/functional/p2p_blocksonly.py
|
estxcoin/estcore
|
4398b1d944373fe25668469966fa2660da454279
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p blocksonly"""
from test_framework.messages import msg_tx, CTransaction, FromHex
from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
from test_framework.estxconfig import INITIAL_BLOCK_REWARD
class P2PBlocksOnly(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [["-blocksonly"]]
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
self.log.info('Check that txs from p2p are rejected')
prevtx = self.nodes[0].getblock(self.nodes[0].getblockhash(1), 2)['tx'][0]
rawtx = self.nodes[0].createrawtransaction(
inputs=[{
'txid': prevtx['txid'],
'vout': 0
}],
outputs=[{
self.nodes[0].get_deterministic_priv_key().address: INITIAL_BLOCK_REWARD - 0.00125
}],
)
sigtx = self.nodes[0].signrawtransactionwithkey(
hexstring=rawtx,
privkeys=[self.nodes[0].get_deterministic_priv_key().key],
prevtxs=[{
'txid': prevtx['txid'],
'vout': 0,
'scriptPubKey': prevtx['vout'][0]['scriptPubKey']['hex'],
}],
)['hex']
assert_equal(self.nodes[0].getnetworkinfo()['localrelay'], False)
with self.nodes[0].assert_debug_log(['transaction sent in violation of protocol peer=0']):
self.nodes[0].p2p.send_message(msg_tx(FromHex(CTransaction(), sigtx)))
self.nodes[0].p2p.sync_with_ping()
assert_equal(self.nodes[0].getmempoolinfo()['size'], 0)
self.log.info('Check that txs from rpc are not rejected and relayed to other peers')
assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True)
txid = self.nodes[0].testmempoolaccept([sigtx])[0]['txid']
with self.nodes[0].assert_debug_log(['received getdata for: tx {} peer=0'.format(txid)]):
self.nodes[0].sendrawtransaction(sigtx)
self.nodes[0].p2p.wait_for_tx(txid)
assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)
if __name__ == '__main__':
P2PBlocksOnly().main()
| 41.933333
| 98
| 0.639507
|
6a40171717a5a3a99b4de2bede01d53c0f2ad5e5
| 1,241
|
py
|
Python
|
stream_alert/rules_engine/main.py
|
yutiansut/streamalert
|
7d198a3273781f66465420e90886a3ce53ec7559
|
[
"Apache-2.0"
] | 7
|
2018-12-26T14:38:08.000Z
|
2022-03-09T13:21:00.000Z
|
stream_alert/rules_engine/main.py
|
revaniki/streamalert
|
7d198a3273781f66465420e90886a3ce53ec7559
|
[
"Apache-2.0"
] | 14
|
2018-05-09T19:18:15.000Z
|
2021-06-02T02:34:09.000Z
|
stream_alert/rules_engine/main.py
|
revaniki/streamalert
|
7d198a3273781f66465420e90886a3ce53ec7559
|
[
"Apache-2.0"
] | 1
|
2018-12-06T20:51:58.000Z
|
2018-12-06T20:51:58.000Z
|
"""
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import # Suppresses RuntimeWarning import error in Lambda
import json
from stream_alert.rules_engine import RulesEngine
from stream_alert.shared import logger
def handler(event, _):
"""Main Lambda handler function"""
try:
records = []
for record in event.get('Records', []):
body = json.loads(record['body'])
if isinstance(body, list):
records.extend(body)
else:
records.append(body)
RulesEngine().run(records)
except Exception:
logger.get_logger(__name__).exception('Invocation event: %s', json.dumps(event))
raise
| 32.657895
| 90
| 0.704271
|
1261f6eee4f34733967bedf692e3168f6260c260
| 3,284
|
py
|
Python
|
circuitPython/examples/internet-data-display/page2_ui.py
|
BRTSG-FOSS/pico-bteve
|
1697b9a972ad5e9c2cecca6d560aa16cab725a61
|
[
"MIT"
] | 1
|
2022-01-29T03:16:57.000Z
|
2022-01-29T03:16:57.000Z
|
circuitPython/examples/internet-data-display/page2_ui.py
|
BRTSG-FOSS/pico-brteve
|
1697b9a972ad5e9c2cecca6d560aa16cab725a61
|
[
"MIT"
] | 15
|
2021-09-22T08:36:08.000Z
|
2022-01-26T08:51:42.000Z
|
circuitPython/examples/internet-data-display/page2_ui.py
|
BRTSG-FOSS/pico-bteve
|
1697b9a972ad5e9c2cecca6d560aa16cab725a61
|
[
"MIT"
] | null | null | null |
from brteve.brt_eve_common import align4
from brteve.brt_eve_bt817_8 import BrtEve
class Page2_UI:
def __init__(self, eve: BrtEve) -> None:
self.eve=eve
self.CMD_PRIV = 1
self.CMD_NEXT = 2
self.CMD_BACK = 3
self._oldtag = 0
def start_load_image(self):
eve=self.eve
# start drawing
eve.ClearColorRGB(255, 255, 255)
eve.Clear()
eve.ColorRGB(255, 255, 255)
eve.VertexFormat(4)
self.eve.cmd_loadimage(0, 0)
def write_image_buffer(self, buff):
self.eve.cc(align4(buff))
def message(self, title, info):
eve=self.eve
eve.ClearColorRGB(255, 255, 255)
eve.Clear()
eve.ColorRGB(255, 255, 255)
eve.VertexFormat(4)
tx=eve.lcd_width/2 - len(title) * 5
ty=eve.lcd_height/2
eve.ColorRGB(0, 0, 0)
eve.cmd_text(tx, ty, 30, 0, title)
eve.cmd_text(tx, ty + 50, 25, 0, info)
eve.swap()
def draw_image(self, img, title):
eve=self.eve
if img != '': # showing an local image from sdcard
eve.ClearColorRGB(255, 255, 255)
eve.Clear()
eve.ColorRGB(255, 255, 255)
eve.VertexFormat(4)
eve.cmd_loadimage(0, 0)
eve.load(open(img, "rb"))
# validate image
eve.flush()
rp=eve.eve_write_pointer()
eve.cmd_getprops()
eve.flush()
w=eve.rd32(eve.RAM_CMD + rp+4*2)
h=eve.rd32(eve.RAM_CMD + rp+4*3)
if w<0 or w > eve.lcd_width:
print("Image invalid")
return 0
if h<0 or h > eve.lcd_height:
print("Image invalid")
return 0
x=eve.lcd_width/2-w/2
y=eve.lcd_height/2-h/2
eve.Begin(eve.BITMAPS)
eve.Vertex2f(x, y)
tx = x + w/2 - len(title) * 7
ty = y+ h + 10
eve.ColorRGB(0, 0, 0)
eve.cmd_text(tx, ty, 30, 0, title)
# control button
w=100
h=(int)(eve.lcd_height / 2)
eve.cmd_text(w/2, eve.lcd_height/2, 30, 0, "<")
eve.cmd_text((int)(eve.lcd_width - w/2), eve.lcd_height/2, 30, 0, ">")
eve.ColorA(0)
eve.Tag(self.CMD_PRIV)
eve.cmd_button(0, (int)(eve.lcd_height / 2 - h/2), w, h, 25, 0, "Previous")
eve.cmd_track(0, (int)(eve.lcd_height / 2 - h/2), w, h, self.CMD_PRIV)
eve.Tag(self.CMD_NEXT)
eve.cmd_button(eve.lcd_width - w, (int)(eve.lcd_height / 2 - h/2), w, h, 25, 0, "Next")
eve.cmd_track(eve.lcd_width - w, (int)(eve.lcd_height / 2 - h/2), w, h, self.CMD_NEXT)
w=100
h=50
eve.ColorA(200)
eve.cmd_fgcolor(0xb9b900)
eve.ColorRGB(255, 255, 255)
eve.Tag(self.CMD_BACK)
eve.cmd_button(2, 2, w, h, 20, 0, "Back")
eve.cmd_button(2, 2, w, h, 20, 0, "Back")
eve.swap()
eve.flush()
return 1
def get_comand(self):
eve=self.eve
tag = eve.rd32(eve.REG_TOUCH_TAG) & 0xFF
if tag == 0:
tag = eve.rd32(eve.REG_TRACKER) & 0xFF
if tag == 1:
return self.CMD_PRIV
if tag == 2:
return self.CMD_NEXT
if tag == 3:
return self.CMD_BACK
return 0
| 26.918033
| 95
| 0.529233
|
03af17b9230d8ed5914beeb754cb241d79b84636
| 2,511
|
py
|
Python
|
Classes/Super/Camera.py
|
crablab/cs1830_project
|
af0767a5860e18f5c7d58464704f186552a90ee6
|
[
"MIT"
] | null | null | null |
Classes/Super/Camera.py
|
crablab/cs1830_project
|
af0767a5860e18f5c7d58464704f186552a90ee6
|
[
"MIT"
] | null | null | null |
Classes/Super/Camera.py
|
crablab/cs1830_project
|
af0767a5860e18f5c7d58464704f186552a90ee6
|
[
"MIT"
] | null | null | null |
from Classes.Base.Vector import Vector
import configparser
config = configparser.ConfigParser()
config.read_file(open('Classes/config'))
import uuid
import time
class Camera:
def __init__(self, origin, dim):
self.idClass = 1
self.idObject = uuid.uuid4()
self.origin = origin
self.dim = dim
self.dimCanv=Vector(int(config['CANVAS']['CANVAS_WIDTH']),int(config['CANVAS']['CANVAS_HEIGHT']))
self.zoomIn=False
self.zoomOut=False
self.moveLeft=False
self.moveRight=False
self.moveUp=False
self.moveDown=False
self.maxZoomDist=int(config['CAMERA']['CAM_MAX_ZOOM_DIST'])
self.minZoomDist = int(config['CAMERA']['CAM_MIN_ZOOM_DIST'])
self.moveSensitivity=int(config['CAMERA']['CAM_MOVE_SENSITIVITY'])
self.zoomSensitivity=float(config['CAMERA']['CAM_ZOOM_SENSITIVITY'])
self.currentTime=time.time()
def move(self,playerId,player_list):
for player in player_list:
if playerId == player.idObject:
pos = player.particle.pos.copy()
self.currentTime=time.time()
if self.moveUp==True and config['DEVELOPER']['GOD_MODE']=='True':
self.origin.add(Vector(0,-self.moveSensitivity))
if self.moveDown==True and config['DEVELOPER']['GOD_MODE']=='True':
self.origin.add(Vector(0,self.moveSensitivity))
if self.moveLeft == True and config['DEVELOPER']['GOD_MODE']=='True':
self.origin.add(Vector(-self.moveSensitivity,0))
if self.moveRight == True and config['DEVELOPER']['GOD_MODE']=='True':
self.origin.add(Vector(self.moveSensitivity,0))
def zoom(self):
if self.zoomOut == True and ((self.dim.x<self.maxZoomDist and self.dim.y<self.maxZoomDist)or config['DEVELOPER']['GOD_MODE']=='True'):
self.dim.add(self.dim.copy().multiply(self.zoomSensitivity))
if self.zoomIn == True and ((self.dim.x>self.minZoomDist and self.dim.y>self.minZoomDist)or config['DEVELOPER']['GOD_MODE']=='True'):
self.dim.add(self.dim.copy().multiply(-self.zoomSensitivity))
def ratioToCam(self):
return(self.dimCanv.copy().divideVector(self.dim))
def ratioToCanv(self):
return (self.dim.copy().divideVector(self.dimCanv))
def get(self):
return(self.origin, self.dim.x)
def recieve(self,other):
self.currentTime=other.currentTime
self.origin=other.origin
self.dim=other.dim
| 37.477612
| 142
| 0.649542
|
9917370a9755f546072515c081158a22e0ef3ffb
| 828
|
py
|
Python
|
python/test_numpy.py
|
Halo9Pan/experiment
|
856facc1a6699e098e95fc4feb5485f878a2ba20
|
[
"MIT"
] | null | null | null |
python/test_numpy.py
|
Halo9Pan/experiment
|
856facc1a6699e098e95fc4feb5485f878a2ba20
|
[
"MIT"
] | null | null | null |
python/test_numpy.py
|
Halo9Pan/experiment
|
856facc1a6699e098e95fc4feb5485f878a2ba20
|
[
"MIT"
] | null | null | null |
import numpy as np
import time
N = 6000
M = 10000
k_list = [64, 80, 96, 104, 112, 120, 128, 144, 160, 176, 192, 200, 208, 224, 240, 256, 384]
def get_gflops(M, N, K):
return M*N*(2.0*K-1.0) / 1000**3
np.show_config()
for K in k_list:
a = np.array(np.random.random((M, N)), dtype=np.double, order='C', copy=False)
b = np.array(np.random.random((N, K)), dtype=np.double, order='C', copy=False)
A = np.matrix(a, dtype=np.double, copy=False)
B = np.matrix(b, dtype=np.double, copy=False)
C = A*B
start = time.time()
C = A*B
C = A*B
C = A*B
C = A*B
C = A*B
end = time.time()
tm = (end-start) / 5.0
print ('{0:4}, {1:9.7}, {2:9.7}'.format(K, tm, get_gflops(M, N, K) / tm))
| 23.657143
| 93
| 0.492754
|
7d18dd3203b7119834318c4470153b6b81e4c9b8
| 1,840
|
py
|
Python
|
generators/name.py
|
vickio/compgen
|
7bb9a473622e53df18501b577dca4a33fc83922c
|
[
"MIT"
] | 2
|
2018-11-24T05:52:48.000Z
|
2018-11-29T20:46:18.000Z
|
generators/name.py
|
vickio/compgen
|
7bb9a473622e53df18501b577dca4a33fc83922c
|
[
"MIT"
] | null | null | null |
generators/name.py
|
vickio/compgen
|
7bb9a473622e53df18501b577dca4a33fc83922c
|
[
"MIT"
] | 2
|
2018-11-23T12:33:07.000Z
|
2018-11-27T02:50:06.000Z
|
from random import choice
from string import Template
from . import BaseGenerator
class Name(BaseGenerator):
def __init__(self, company):
self.company = company
self.data = self._load_json('name.json')
self.templates = self.data.pop('templates')
self.nouns = self._load_txt('nouns.txt')
self.adjectives = self._load_txt('adjectives.txt')
self.founder_data = self._load_json('founder.json')
def generate(self):
template = Template(self._choose(self.templates))
elements = {}
for key, options in self.data.items():
elements[key] = self._choose(options)
for noun in ['noun', 'noun2']:
elements[noun] = choice(self.nouns)
if not elements[noun].isupper():
elements[noun] = elements[noun].title()
elements['adjective'] = choice(self.adjectives).title()
elements['adjective2'] = choice(self.adjectives).title()
fname, lname = self.company.founder.split(' ')
fake = self.company._fake
elements['lname'] = lname
elements['lname2'] = self._choose(self.founder_data['last_name'])
elements['lname3'] = self._choose(self.founder_data['last_name'])
elements['fname'] = fname
elements['place'] = choice([self.company.city, self.company.state_name])
elements['fakeword'] = fake.word().title()
if len(elements['fakeword']) <= 3:
elements['fakeword'] = elements['fakeword'].upper()
if self.company.founder_gender == 'male':
elements['family'] = elements['family_male']
else:
elements['family'] = elements['family_female']
return template.substitute(elements)
| 34.716981
| 80
| 0.584239
|
79c43126ab265ebcf6a340757400bd518535df16
| 2,665
|
py
|
Python
|
geonet/migrations/0001_initial.py
|
bbengfort/kahu
|
57a2ba417d545a57a987b3620e46e56f023134d6
|
[
"MIT"
] | 1
|
2018-08-27T10:07:06.000Z
|
2018-08-27T10:07:06.000Z
|
geonet/migrations/0001_initial.py
|
bbengfort/kahu
|
57a2ba417d545a57a987b3620e46e56f023134d6
|
[
"MIT"
] | 22
|
2018-06-09T14:16:36.000Z
|
2018-06-15T10:56:58.000Z
|
geonet/migrations/0001_initial.py
|
bbengfort/kahu
|
57a2ba417d545a57a987b3620e46e56f023134d6
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-06-11 20:20
import django.contrib.postgres.fields.jsonb
import django.core.serializers.json
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BotoCache',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('operation', models.CharField(help_text='unique name of the boto operation being cached', max_length=255, unique=True)),
],
options={
'db_table': 'boto_cache',
'get_latest_by': 'modified',
},
),
migrations.CreateModel(
name='RegionCache',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('region', models.CharField(help_text='name of the region the request was made to', max_length=255)),
('cache', django.contrib.postgres.fields.jsonb.JSONField(blank=True, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder, help_text='the actual cached data from the Boto3 response', null=True)),
('error', models.CharField(blank=True, default='', editable=False, help_text='any error or exception raised during the operation', max_length=255, null=True)),
('operation', models.ForeignKey(help_text='the associated operation the data is being stored for', on_delete=django.db.models.deletion.CASCADE, related_name='regions', to='geonet.BotoCache')),
],
options={
'db_table': 'boto_region_cache',
'get_latest_by': 'modified',
},
),
migrations.AlterUniqueTogether(
name='regioncache',
unique_together={('region', 'operation')},
),
]
| 50.283019
| 229
| 0.646904
|
c5da5c5cf1a064417a34aeba023bbfa24ba768c2
| 2,868
|
py
|
Python
|
plotting/plotting/status.py
|
eric-erki/GISportal
|
407764334b3ba50da4429fc170b98e20468ff8a0
|
[
"Apache-2.0"
] | 55
|
2015-03-20T23:54:17.000Z
|
2022-01-22T04:33:06.000Z
|
plotting/plotting/status.py
|
eric-erki/GISportal
|
407764334b3ba50da4429fc170b98e20468ff8a0
|
[
"Apache-2.0"
] | 27
|
2016-03-14T15:44:05.000Z
|
2021-09-03T10:23:06.000Z
|
plotting/plotting/status.py
|
eric-erki/GISportal
|
407764334b3ba50da4429fc170b98e20468ff8a0
|
[
"Apache-2.0"
] | 24
|
2015-03-11T14:59:24.000Z
|
2022-03-31T10:47:17.000Z
|
import json
from plotting.debug import debug
# Home rolled enums as Python 2.7 does not have them.
class Enum(set):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
# Valid plot status values.
Plot_status = Enum(["initialising", "extracting", "plotting", "complete", "failed"])
def read_status(dirname, my_hash):
'''
Reads a JSON status file whose name is defined by dirname and my_hash.
'''
status = None
file_path = dirname + "/" + my_hash + "-status.json"
try:
with open(file_path, 'r') as status_file:
status = json.load(status_file)
except IOError as err:
if err.errno == 2:
debug(2, u"Status file {} not found".format(file_path))
else:
raise
return status
# END read_status
def update_status(dirname, my_hash, plot_status, message="", percentage=0, traceback="", base_url="", minutes_remaining=-1):
'''
Updates a JSON status file whose name is defined by dirname and my_hash.
'''
initial_status = dict(
percentage = 0,
state = plot_status,
message = message,
completed = False,
traceback= traceback,
job_id = my_hash,
minutes_remaining = -1
)
# Read status file, create if not there.
file_path = dirname + "/" + my_hash + "-status.json"
try:
with open(file_path, 'r') as status_file:
if plot_status == Plot_status.initialising:
status = initial_status
else:
status = json.load(status_file)
except IOError as err:
if err.errno == 2:
debug(2, u"Status file {} not found".format(file_path))
# It does not exist yet so create the initial JSON
status = initial_status
else:
raise
# Update the status information.
status["message"] = message
status["traceback"] = traceback
status["state"] = plot_status
if plot_status == Plot_status.complete:
status["completed"] = True
status['percentage'] = 100
status['minutes_remaining'] = 0
status['filename'] = dirname + "/" + my_hash + "-plot.html"
status['csv'] = dirname + "/" + my_hash + ".zip"
if base_url:
status['csv_url'] = base_url + "/" + my_hash + ".zip"
elif plot_status == Plot_status.failed:
status["completed"] = True
status['percentage'] = 100
status['minutes_remaining'] = 0
status['filename'] = None
status['csv'] = None
else:
status["completed"] = False
status['percentage'] = percentage
status['minutes_remaining'] = minutes_remaining
status['filename'] = None
status['csv'] = None
debug(4, u"Status: {}".format(status))
# Write it back to the file.
with open(file_path, 'w') as status_file:
json.dump(status, status_file)
return status
# END update_status
| 29.265306
| 124
| 0.621339
|
e55a022d40ea10ce099eb702250581bd0db311b7
| 980
|
py
|
Python
|
google/appengine/tools/augment_mimetypes.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 1,463
|
2015-04-07T09:41:28.000Z
|
2022-03-30T02:05:19.000Z
|
server/lib/google/appengine/tools/augment_mimetypes.py
|
Palsoso/GoAgent-Always-Available
|
86c2fee6474890ff26b5a66b74a1306f7f31988b
|
[
"WTFPL",
"Apache-2.0"
] | 73
|
2015-04-08T05:08:52.000Z
|
2019-06-05T06:38:21.000Z
|
server/lib/google/appengine/tools/augment_mimetypes.py
|
Palsoso/GoAgent-Always-Available
|
86c2fee6474890ff26b5a66b74a1306f7f31988b
|
[
"WTFPL",
"Apache-2.0"
] | 698
|
2015-04-28T12:02:00.000Z
|
2022-03-19T23:53:55.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Augment the mimetypes provided by Python."""
import mimetypes
def init():
mimetypes.add_type('application/dart', '.dart')
mimetypes.add_type('text/css', '.gss')
mimetypes.add_type('text/html', '.ng')
mimetypes.add_type('application/x-font-ttf', '.ttf')
mimetypes.add_type('application/font-woff', '.woff')
mimetypes.add_type('application/font-woff2', '.woff2')
| 33.793103
| 74
| 0.735714
|
2ac585a6b47e0b35f12ec69e8771e73076858e6a
| 1,078
|
py
|
Python
|
resources/graphics/makeframes.py
|
paulscottrobson/string-trainer
|
2c48c892d81d6dd700f091d6c0fadbf5df3d70fc
|
[
"MIT"
] | null | null | null |
resources/graphics/makeframes.py
|
paulscottrobson/string-trainer
|
2c48c892d81d6dd700f091d6c0fadbf5df3d70fc
|
[
"MIT"
] | null | null | null |
resources/graphics/makeframes.py
|
paulscottrobson/string-trainer
|
2c48c892d81d6dd700f091d6c0fadbf5df3d70fc
|
[
"MIT"
] | null | null | null |
#
# Notebutton graphics creator.
#
from PIL import Image,ImageDraw
height = 50
roundWidth = 40
y3d = 8
widthGroup = [ 0,10,25,40,66,75,100,150,200,250,300 ]
for width in widthGroup:
for offset3D in range(1,2):
xl = 100+roundWidth/2
xr = xl + width
yc = 100
im = Image.new("RGBA",(600,200),0x00000000)
draw = ImageDraw.Draw(im)
if (offset3D != 0):
c1 = 0xFF808080
y = yc + y3d
draw.ellipse((xl-roundWidth/2,y-height/2,xl+roundWidth/2,y+height/2),fill = c1)
draw.ellipse((xr-roundWidth/2,y-height/2,xr+roundWidth/2,y+height/2),fill = c1)
draw.rectangle((xl,y-height/2,xr,y+height/2),fill = c1)
c1 = 0xFFFFFFFF
y = yc
draw.ellipse((xl-roundWidth/2,y-height/2,xl+roundWidth/2,y+height/2),fill = c1)
draw.ellipse((xr-roundWidth/2,y-height/2,xr+roundWidth/2,y+height/2),fill = c1)
draw.rectangle((xl,y-height/2,xr,y+height/2),fill = c1)
name = "source/notebutton_{0}_{1}.png".format("up" if offset3D != 0 else "down",width)
im2 = im.crop((xl-roundWidth/2-2,yc-height/2-2,xr+roundWidth/2+2,yc+height/2+y3d+2))
im2.save(name)
#print(name)
| 33.6875
| 88
| 0.675325
|
00afcbffdebb2adbb65d2e05ae834e5c6b41e309
| 29,457
|
py
|
Python
|
dbn/models.py
|
CurrenWong/deep-belief-network
|
f6ca118c187816dee753e0004149c1e55737a20a
|
[
"MIT"
] | null | null | null |
dbn/models.py
|
CurrenWong/deep-belief-network
|
f6ca118c187816dee753e0004149c1e55737a20a
|
[
"MIT"
] | null | null | null |
dbn/models.py
|
CurrenWong/deep-belief-network
|
f6ca118c187816dee753e0004149c1e55737a20a
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.stats import truncnorm
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, RegressorMixin
from .activations import SigmoidActivationFunction, ReLUActivationFunction
from .utils import batch_generator
class BaseModel(object):
def save(self, save_path):
import pickle
with open(save_path, 'wb') as fp:
pickle.dump(self, fp)
@classmethod
def load(cls, load_path):
import pickle
with open(load_path, 'rb') as fp:
return pickle.load(fp)
class BinaryRBM(BaseEstimator, TransformerMixin, BaseModel):
"""
This class implements a Binary Restricted Boltzmann machine.
"""
def __init__(self,
n_hidden_units=100,
activation_function='sigmoid',
optimization_algorithm='sgd',
learning_rate=1e-3,
n_epochs=10,
contrastive_divergence_iter=1,
batch_size=32,
verbose=True):
self.n_hidden_units = n_hidden_units
self.activation_function = activation_function
self.optimization_algorithm = optimization_algorithm
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.contrastive_divergence_iter = contrastive_divergence_iter
self.batch_size = batch_size
self.verbose = verbose
def fit(self, X):
"""
Fit a model given data.
:param X: array-like, shape = (n_samples, n_features)
:return:
"""
self.n_visible_units = X.shape[1]
if self.activation_function == 'sigmoid':
self.W = np.random.randn(
self.n_hidden_units, self.n_visible_units) / np.sqrt(self.n_visible_units)
self.c = np.random.randn(
self.n_hidden_units) / np.sqrt(self.n_visible_units)
self.b = np.random.randn(
self.n_visible_units) / np.sqrt(self.n_visible_units)
self._activation_function_class = SigmoidActivationFunction
elif self.activation_function == 'relu':
self.W = truncnorm.rvs(-0.2, 0.2, size=[self.n_hidden_units, self.n_visible_units]) / np.sqrt(
self.n_visible_units)
self.c = np.full(self.n_hidden_units, 0.1) / \
np.sqrt(self.n_visible_units)
self.b = np.full(self.n_visible_units, 0.1) / \
np.sqrt(self.n_visible_units)
self._activation_function_class = ReLUActivationFunction
else:
raise ValueError("Invalid activation function.")
if self.optimization_algorithm == 'sgd':
self._stochastic_gradient_descent(X)
else:
raise ValueError("Invalid optimization algorithm.")
return self
def transform(self, X):
"""
Transforms data using the fitted model.
:param X: array-like, shape = (n_samples, n_features)
:return:
"""
if len(X.shape) == 1: # It is a single sample
return self._compute_hidden_units(X)
transformed_data = self._compute_hidden_units_matrix(X)
return transformed_data
def _reconstruct(self, transformed_data):
"""
Reconstruct visible units given the hidden layer output.
:param transformed_data: array-like, shape = (n_samples, n_features)
:return:
"""
return self._compute_visible_units_matrix(transformed_data)
def _stochastic_gradient_descent(self, _data):
"""
Performs stochastic gradient descend optimization algorithm.
:param _data: array-like, shape = (n_samples, n_features)
:return:
"""
accum_delta_W = np.zeros(self.W.shape)
accum_delta_b = np.zeros(self.b.shape)
accum_delta_c = np.zeros(self.c.shape)
for iteration in range(1, self.n_epochs + 1):
idx = np.random.permutation(len(_data))
data = _data[idx]
for batch in batch_generator(self.batch_size, data):
accum_delta_W[:] = .0
accum_delta_b[:] = .0
accum_delta_c[:] = .0
for sample in batch:
delta_W, delta_b, delta_c = self._contrastive_divergence(
sample)
accum_delta_W += delta_W
accum_delta_b += delta_b
accum_delta_c += delta_c
self.W += self.learning_rate * \
(accum_delta_W / self.batch_size)
self.b += self.learning_rate * \
(accum_delta_b / self.batch_size)
self.c += self.learning_rate * \
(accum_delta_c / self.batch_size)
if self.verbose:
error = self._compute_reconstruction_error(data)
print(">> Epoch %d finished \tRBM Reconstruction error %f" %
(iteration, error))
def _contrastive_divergence(self, vector_visible_units):
"""
Computes gradients using Contrastive Divergence method.
:param vector_visible_units: array-like, shape = (n_features, )
:return:
"""
v_0 = vector_visible_units
v_t = np.array(v_0)
# Sampling
for t in range(self.contrastive_divergence_iter):
h_t = self._sample_hidden_units(v_t)
v_t = self._compute_visible_units(h_t)
# Computing deltas
v_k = v_t
h_0 = self._compute_hidden_units(v_0)
h_k = self._compute_hidden_units(v_k)
delta_W = np.outer(h_0, v_0) - np.outer(h_k, v_k)
delta_b = v_0 - v_k
delta_c = h_0 - h_k
return delta_W, delta_b, delta_c
def _sample_hidden_units(self, vector_visible_units):
"""
Computes hidden unit activations by sampling from a binomial distribution.
:param vector_visible_units: array-like, shape = (n_features, )
:return:
"""
hidden_units = self._compute_hidden_units(vector_visible_units)
return (np.random.random_sample(len(hidden_units)) < hidden_units).astype(np.int64)
def _sample_visible_units(self, vector_hidden_units):
"""
Computes visible unit activations by sampling from a binomial distribution.
:param vector_hidden_units: array-like, shape = (n_features, )
:return:
"""
visible_units = self._compute_visible_units(vector_hidden_units)
return (np.random.random_sample(len(visible_units)) < visible_units).astype(np.int64)
def _compute_hidden_units(self, vector_visible_units):
"""
Computes hidden unit outputs.
:param vector_visible_units: array-like, shape = (n_features, )
:return:
"""
v = np.expand_dims(vector_visible_units, 0)
h = np.squeeze(self._compute_hidden_units_matrix(v))
return np.array([h]) if not h.shape else h
def _compute_hidden_units_matrix(self, matrix_visible_units):
"""
Computes hidden unit outputs.
:param matrix_visible_units: array-like, shape = (n_samples, n_features)
:return:
"""
return np.transpose(self._activation_function_class.function(
np.dot(self.W, np.transpose(matrix_visible_units)) + self.c[:, np.newaxis]))
def _compute_visible_units(self, vector_hidden_units):
"""
Computes visible (or input) unit outputs.
:param vector_hidden_units: array-like, shape = (n_features, )
:return:
"""
h = np.expand_dims(vector_hidden_units, 0)
v = np.squeeze(self._compute_visible_units_matrix(h))
return np.array([v]) if not v.shape else v
def _compute_visible_units_matrix(self, matrix_hidden_units):
"""
Computes visible (or input) unit outputs.
:param matrix_hidden_units: array-like, shape = (n_samples, n_features)
:return:
"""
return self._activation_function_class.function(np.dot(matrix_hidden_units, self.W) + self.b[np.newaxis, :])
def _compute_free_energy(self, vector_visible_units):
"""
Computes the RBM free energy.
:param vector_visible_units: array-like, shape = (n_features, )
:return:
"""
v = vector_visible_units
return - np.dot(self.b, v) - np.sum(np.log(1 + np.exp(np.dot(self.W, v) + self.c)))
def _compute_reconstruction_error(self, data):
"""
Computes the reconstruction error of the data.
:param data: array-like, shape = (n_samples, n_features)
:return:
"""
data_transformed = self.transform(data)
data_reconstructed = self._reconstruct(data_transformed)
return np.mean(np.sum((data_reconstructed - data) ** 2, 1))
class UnsupervisedDBN(BaseEstimator, TransformerMixin, BaseModel):
"""
This class implements a unsupervised Deep Belief Network.
"""
def __init__(self,
hidden_layers_structure=[100, 100],
activation_function='sigmoid',
optimization_algorithm='sgd',
learning_rate_rbm=1e-3,
n_epochs_rbm=10,
contrastive_divergence_iter=1,
batch_size=32,
verbose=True):
self.hidden_layers_structure = hidden_layers_structure
self.activation_function = activation_function
self.optimization_algorithm = optimization_algorithm
self.learning_rate_rbm = learning_rate_rbm
self.n_epochs_rbm = n_epochs_rbm
self.contrastive_divergence_iter = contrastive_divergence_iter
self.batch_size = batch_size
self.rbm_layers = None
self.verbose = verbose
self.rbm_class = BinaryRBM
def fit(self, X, y=None):
"""
Fits a model given data.
:param X: array-like, shape = (n_samples, n_features)
:return:
"""
# Initialize rbm layers
self.rbm_layers = list()
if type(self.learning_rate_rbm) == list and len(self.learning_rate_rbm) > 1:
# Set diffreent learning_rate for each layers
mark = 0
for n_hidden_units in self.hidden_layers_structure:
rbm = self.rbm_class(
n_hidden_units=n_hidden_units,
activation_function=self.activation_function,
optimization_algorithm=self.optimization_algorithm,
learning_rate=self.learning_rate_rbm[mark],
n_epochs=self.n_epochs_rbm,
contrastive_divergence_iter=self.contrastive_divergence_iter,
batch_size=self.batch_size,
verbose=self.verbose)
mark += 1
self.rbm_layers.append(rbm)
else:
mark = 0
for n_hidden_units in self.hidden_layers_structure:
rbm = self.rbm_class(
n_hidden_units=n_hidden_units,
activation_function=self.activation_function,
optimization_algorithm=self.optimization_algorithm,
learning_rate=self.learning_rate_rbm,
n_epochs=self.n_epochs_rbm,
contrastive_divergence_iter=self.contrastive_divergence_iter,
batch_size=self.batch_size,
verbose=self.verbose)
mark += 1
self.rbm_layers.append(rbm)
# Fit RBM
if self.verbose:
print("[START] Pre-training step:")
input_data = X
for rbm in self.rbm_layers:
rbm.fit(input_data)
input_data = rbm.transform(input_data)
if self.verbose:
print("[END] Pre-training step")
return self
def transform(self, X):
"""
Transforms data using the fitted model.
:param X: array-like, shape = (n_samples, n_features)
:return:
"""
input_data = X
for rbm in self.rbm_layers:
input_data = rbm.transform(input_data)
return input_data
class AbstractSupervisedDBN(BaseEstimator, BaseModel):
"""
Abstract class for supervised Deep Belief Network.
"""
__metaclass__ = ABCMeta
def __init__(self,
unsupervised_dbn_class,
hidden_layers_structure=[100, 100],
activation_function='sigmoid',
optimization_algorithm='sgd',
learning_rate=1e-3,
learning_rate_rbm=1e-3,
n_iter_backprop=100,
l2_regularization=1.0,
n_epochs_rbm=10,
contrastive_divergence_iter=1,
batch_size=32,
dropout_p=0, # float between 0 and 1. Fraction of the input units to drop
verbose=True):
self.unsupervised_dbn = unsupervised_dbn_class(
hidden_layers_structure=hidden_layers_structure,
activation_function=activation_function,
optimization_algorithm=optimization_algorithm,
learning_rate_rbm=learning_rate_rbm,
n_epochs_rbm=n_epochs_rbm,
contrastive_divergence_iter=contrastive_divergence_iter,
batch_size=batch_size,
verbose=verbose)
self.unsupervised_dbn_class = unsupervised_dbn_class
self.n_iter_backprop = n_iter_backprop
self.l2_regularization = l2_regularization
self.learning_rate = learning_rate
self.batch_size = batch_size
self.dropout_p = dropout_p
self.p = 1 - self.dropout_p
self.verbose = verbose
def fit(self, X, y=None, pre_train=True):
"""
Fits a model given data.
:param X: array-like, shape = (n_samples, n_features)
:param y : array-like, shape = (n_samples, )
:param pre_train: bool
:return:
"""
if pre_train:
self.pre_train(X)
self._fine_tuning(X, y)
return self
def predict(self, X):
"""
Predicts the target given data.
:param X: array-like, shape = (n_samples, n_features)
:return:
"""
if len(X.shape) == 1: # It is a single sample
X = np.expand_dims(X, 0)
transformed_data = self.transform(X)
predicted_data = self._compute_output_units_matrix(transformed_data)
return predicted_data
def pre_train(self, X):
"""
Apply unsupervised network pre-training.
:param X: array-like, shape = (n_samples, n_features)
:return:
"""
self.unsupervised_dbn.fit(X)
return self
def transform(self, *args):
return self.unsupervised_dbn.transform(*args)
@abstractmethod
def _transform_labels_to_network_format(self, labels):
return
@abstractmethod
def _compute_output_units_matrix(self, matrix_visible_units):
return
@abstractmethod
def _determine_num_output_neurons(self, labels):
return
@abstractmethod
def _stochastic_gradient_descent(self, data, labels):
return
@abstractmethod
def _fine_tuning(self, data, _labels):
return
class NumPyAbstractSupervisedDBN(AbstractSupervisedDBN):
"""
Abstract class for supervised Deep Belief Network in NumPy
"""
__metaclass__ = ABCMeta
def __init__(self, **kwargs):
super(NumPyAbstractSupervisedDBN, self).__init__(
UnsupervisedDBN, **kwargs)
def _compute_activations(self, sample):
"""
Compute output values of all layers.
:param sample: array-like, shape = (n_features, )
:return:
"""
input_data = sample
if self.dropout_p > 0:
r = np.random.binomial(1, self.p, len(input_data))
input_data *= r
layers_activation = list()
for rbm in self.unsupervised_dbn.rbm_layers:
input_data = rbm.transform(input_data)
if self.dropout_p > 0:
r = np.random.binomial(1, self.p, len(input_data))
input_data *= r
layers_activation.append(input_data)
# Computing activation of output layer
input_data = self._compute_output_units(input_data)
layers_activation.append(input_data)
return layers_activation
def _stochastic_gradient_descent(self, _data, _labels):
"""
Performs stochastic gradient descend optimization algorithm.
:param _data: array-like, shape = (n_samples, n_features)
:param _labels: array-like, shape = (n_samples, targets)
:return:
"""
if self.verbose:
matrix_error = np.zeros([len(_data), self.num_classes])
num_samples = len(_data)
accum_delta_W = [np.zeros(rbm.W.shape)
for rbm in self.unsupervised_dbn.rbm_layers]
accum_delta_W.append(np.zeros(self.W.shape))
accum_delta_bias = [np.zeros(rbm.c.shape)
for rbm in self.unsupervised_dbn.rbm_layers]
accum_delta_bias.append(np.zeros(self.b.shape))
for iteration in range(1, self.n_iter_backprop + 1):
idx = np.random.permutation(len(_data))
data = _data[idx]
labels = _labels[idx]
i = 0
for batch_data, batch_labels in batch_generator(self.batch_size, data, labels):
# Clear arrays
for arr1, arr2 in zip(accum_delta_W, accum_delta_bias):
arr1[:], arr2[:] = .0, .0
for sample, label in zip(batch_data, batch_labels):
delta_W, delta_bias, predicted = self._backpropagation(
sample, label)
for layer in range(len(self.unsupervised_dbn.rbm_layers) + 1):
accum_delta_W[layer] += delta_W[layer]
accum_delta_bias[layer] += delta_bias[layer]
if self.verbose:
loss = self._compute_loss(predicted, label)
matrix_error[i, :] = loss
i += 1
layer = 0
for rbm in self.unsupervised_dbn.rbm_layers:
# Updating parameters of hidden layers
rbm.W = (1 - (
self.learning_rate * self.l2_regularization) / num_samples) * rbm.W - self.learning_rate * (
accum_delta_W[layer] / self.batch_size)
rbm.c -= self.learning_rate * \
(accum_delta_bias[layer] / self.batch_size)
layer += 1
# Updating parameters of output layer
self.W = (1 - (
self.learning_rate * self.l2_regularization) / num_samples) * self.W - self.learning_rate * (
accum_delta_W[layer] / self.batch_size)
self.b -= self.learning_rate * \
(accum_delta_bias[layer] / self.batch_size)
if self.verbose:
error = np.mean(np.sum(matrix_error, 1))
print(">> Epoch %d finished \tANN training loss %.10f" %
(iteration, error))
def _backpropagation(self, input_vector, label):
"""
Performs Backpropagation algorithm for computing gradients.
:param input_vector: array-like, shape = (n_features, )
:param label: array-like, shape = (n_targets, )
:return:
"""
x, y = input_vector, label
deltas = list()
list_layer_weights = list()
for rbm in self.unsupervised_dbn.rbm_layers:
list_layer_weights.append(rbm.W)
list_layer_weights.append(self.W)
# Forward pass
layers_activation = self._compute_activations(input_vector)
# Backward pass: computing deltas
activation_output_layer = layers_activation[-1]
delta_output_layer = self._compute_output_layer_delta(
y, activation_output_layer)
deltas.append(delta_output_layer)
layer_idx = list(range(len(self.unsupervised_dbn.rbm_layers)))
layer_idx.reverse()
delta_previous_layer = delta_output_layer
for layer in layer_idx:
neuron_activations = layers_activation[layer]
W = list_layer_weights[layer + 1]
delta = np.dot(delta_previous_layer, W) * self.unsupervised_dbn.rbm_layers[
layer]._activation_function_class.prime(neuron_activations)
deltas.append(delta)
delta_previous_layer = delta
deltas.reverse()
# Computing gradients
layers_activation.pop()
layers_activation.insert(0, input_vector)
layer_gradient_weights, layer_gradient_bias = list(), list()
for layer in range(len(list_layer_weights)):
neuron_activations = layers_activation[layer]
delta = deltas[layer]
gradient_W = np.outer(delta, neuron_activations)
layer_gradient_weights.append(gradient_W)
layer_gradient_bias.append(delta)
return layer_gradient_weights, layer_gradient_bias, activation_output_layer
def _fine_tuning(self, data, _labels):
"""
Entry point of the fine tuning procedure.
:param data: array-like, shape = (n_samples, n_features)
:param _labels: array-like, shape = (n_samples, targets)
:return:
"""
self.num_classes = self._determine_num_output_neurons(_labels)
n_hidden_units_previous_layer = self.unsupervised_dbn.rbm_layers[-1].n_hidden_units
self.W = np.random.randn(self.num_classes, n_hidden_units_previous_layer) / np.sqrt(
n_hidden_units_previous_layer)
self.b = np.random.randn(self.num_classes) / \
np.sqrt(n_hidden_units_previous_layer)
labels = self._transform_labels_to_network_format(_labels)
# Scaling up weights obtained from pretraining
for rbm in self.unsupervised_dbn.rbm_layers:
rbm.W /= self.p
rbm.c /= self.p
if self.verbose:
print("[START] Fine tuning step:")
if self.unsupervised_dbn.optimization_algorithm == 'sgd':
self._stochastic_gradient_descent(data, labels)
else:
raise ValueError("Invalid optimization algorithm.")
# Scaling down weights obtained from pretraining
for rbm in self.unsupervised_dbn.rbm_layers:
rbm.W *= self.p
rbm.c *= self.p
if self.verbose:
print("[END] Fine tuning step")
@abstractmethod
def _compute_loss(self, predicted, label):
return
@abstractmethod
def _compute_output_layer_delta(self, label, predicted):
return
class SupervisedDBNClassification(NumPyAbstractSupervisedDBN, ClassifierMixin):
"""
This class implements a Deep Belief Network for classification problems.
It appends a Softmax Linear Classifier as output layer.
"""
def _transform_labels_to_network_format(self, labels):
"""
Converts labels as single integer to row vectors. For instance, given a three class problem, labels would be
mapped as label_1: [1 0 0], label_2: [0 1 0], label_3: [0, 0, 1] where labels can be either int or string.
:param labels: array-like, shape = (n_samples, )
:return:
"""
new_labels = np.zeros([len(labels), self.num_classes])
self.label_to_idx_map, self.idx_to_label_map = dict(), dict()
idx = 0
for i, label in enumerate(labels):
if label not in self.label_to_idx_map:
self.label_to_idx_map[label] = idx
self.idx_to_label_map[idx] = label
idx += 1
new_labels[i][self.label_to_idx_map[label]] = 1
return new_labels
def _transform_network_format_to_labels(self, indexes):
"""
Converts network output to original labels.
:param indexes: array-like, shape = (n_samples, )
:return:
"""
return list(map(lambda idx: self.idx_to_label_map[idx], indexes))
def _compute_output_units(self, vector_visible_units):
"""
Compute activations of output units.
:param vector_visible_units: array-like, shape = (n_features, )
:return:
"""
v = vector_visible_units
scores = np.dot(self.W, v) + self.b
# get unnormalized probabilities
exp_scores = np.exp(scores)
# normalize them for each example
return exp_scores / np.sum(exp_scores)
def _compute_output_units_matrix(self, matrix_visible_units):
"""
Compute activations of output units.
:param matrix_visible_units: shape = (n_samples, n_features)
:return:
"""
matrix_scores = np.transpose(np.dot(self.W, np.transpose(
matrix_visible_units)) + self.b[:, np.newaxis])
exp_scores = np.exp(matrix_scores)
return exp_scores / np.expand_dims(np.sum(exp_scores, axis=1), 1)
def _compute_output_layer_delta(self, label, predicted):
"""
Compute deltas of the output layer, using cross-entropy cost function.
:param label: array-like, shape = (n_features, )
:param predicted: array-like, shape = (n_features, )
:return:
"""
dscores = np.array(predicted)
dscores[np.where(label == 1)] -= 1
return dscores
def predict_proba(self, X):
"""
Predicts probability distribution of classes for each sample in the given data.
:param X: array-like, shape = (n_samples, n_features)
:return:
"""
return super(SupervisedDBNClassification, self).predict(X)
def predict_proba_dict(self, X):
"""
Predicts probability distribution of classes for each sample in the given data.
Returns a list of dictionaries, one per sample. Each dict contains {label_1: prob_1, ..., label_j: prob_j}
:param X: array-like, shape = (n_samples, n_features)
:return:
"""
if len(X.shape) == 1: # It is a single sample
X = np.expand_dims(X, 0)
predicted_probs = self.predict_proba(X)
result = []
num_of_data, num_of_labels = predicted_probs.shape
for i in range(num_of_data):
# key : label
# value : predicted probability
dict_prob = {}
for j in range(num_of_labels):
dict_prob[self.idx_to_label_map[j]] = predicted_probs[i][j]
result.append(dict_prob)
return result
def predict(self, X):
probs = self.predict_proba(X)
indexes = np.argmax(probs, axis=1)
return self._transform_network_format_to_labels(indexes)
def _determine_num_output_neurons(self, labels):
"""
Given labels, compute the needed number of output units.
:param labels: shape = (n_samples, )
:return:
"""
return len(np.unique(labels))
def _compute_loss(self, probs, label):
"""
Computes categorical cross-entropy loss
:param probs:
:param label:
:return:
"""
return -np.log(probs[np.where(label == 1)])
class SupervisedDBNRegression(NumPyAbstractSupervisedDBN, RegressorMixin):
"""
This class implements a Deep Belief Network for regression problems.
"""
def _transform_labels_to_network_format(self, labels):
"""
Returns the same labels since regression case does not need to convert anything.
:param labels: array-like, shape = (n_samples, targets)
:return:
"""
return labels
def _compute_output_units(self, vector_visible_units):
"""
Compute activations of output units.
:param vector_visible_units: array-like, shape = (n_features, )
:return:
"""
v = vector_visible_units
return np.dot(self.W, v) + self.b
def _compute_output_units_matrix(self, matrix_visible_units):
"""
Compute activations of output units.
:param matrix_visible_units: shape = (n_samples, n_features)
:return:
"""
return np.transpose(np.dot(self.W, np.transpose(matrix_visible_units)) + self.b[:, np.newaxis])
def _compute_output_layer_delta(self, label, predicted):
"""
Compute deltas of the output layer for the regression case, using common (one-half) squared-error cost function.
:param label: array-like, shape = (n_features, )
:param predicted: array-like, shape = (n_features, )
:return:
"""
return -(label - predicted)
def _determine_num_output_neurons(self, labels):
"""
Given labels, compute the needed number of output units.
:param labels: shape = (n_samples, n_targets)
:return:
"""
if len(labels.shape) == 1:
return 1
else:
return labels.shape[1]
def _compute_loss(self, predicted, label):
"""
Computes Mean squared error loss.
:param predicted:
:param label:
:return:
"""
error = predicted - label
return error * error
| 37.717029
| 120
| 0.607767
|
72f230128b35d42d251f2f26bf132592048ab623
| 8,507
|
py
|
Python
|
script_ROC_all8.py
|
Majeed7/L1LR
|
de6523d3881f52493f24da6386231d6315351dea
|
[
"MIT"
] | 4
|
2021-05-13T16:50:44.000Z
|
2022-02-08T10:48:13.000Z
|
script_ROC_all8.py
|
Majeed7/L1LR
|
de6523d3881f52493f24da6386231d6315351dea
|
[
"MIT"
] | null | null | null |
script_ROC_all8.py
|
Majeed7/L1LR
|
de6523d3881f52493f24da6386231d6315351dea
|
[
"MIT"
] | null | null | null |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
from sklearn.datasets import load_svmlight_file
from sklearn.metrics import auc, roc_curve
plt.rcParams['figure.dpi'] = 300
plt.rcParams["font.family"] = "Arial"
plt.rcParams['font.size'] = 24
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
if __name__ == "__main__":
method_names = ['Gauss-Seidel','Shooting','Gauss-Southwell','Grafting','SubGradient','Max-K SubGradient','epsL1','Log-Barrier','SmoothL1 (short-cut)','SmoothL1 (continuation)','EM','SQP','ProjectionL1','InteriorPoint','Orthant-Wise','Pattern-Search','Projected SubGradient', 'sklearn', 'Proposed Method']
selected_methods = ['Gauss-Seidel','Shooting','Log-Barrier','ProjectionL1','InteriorPoint', 'sklearn', 'Proposed Method']
## matlab results
score_a1a = 1.0 - sio.loadmat("./results/a1a.mat")["score"]
score_a9a = 1.0 - sio.loadmat("./results/a9a.mat")["score"]
score_splice = 1.0 - sio.loadmat("./results/splice.mat")["score"]
score_ijcnn1 = 1.0 - sio.loadmat("./results/ijcnn1.mat")["score"]
score_liver = 1.0 - sio.loadmat("./results/liver-disorders.mat")["score"]
score_madelon = 1.0 - sio.loadmat("./results/madelon.mat")["score"]
score_leu = 1.0 - sio.loadmat("./results/leu.mat")["score"]
score_gisette = 1.0 - sio.loadmat("./results/gisette.mat")["score"]
print(score_a1a.shape)
print(score_a9a.shape)
print(score_splice.shape)
print(score_ijcnn1.shape)
print(score_liver.shape)
print(score_leu.shape)
print(score_madelon)
print(score_gisette)
## add our python results
score1 = sio.loadmat("./results/ijcnn1_ours.mat")["score_sk"]
score2 = sio.loadmat("./results/ijcnn1_ours.mat")["score_our"]
score_ijcnn1 = np.vstack([score_ijcnn1, score1, score2])
print(score_ijcnn1.shape)
score1 = sio.loadmat("./results/a1a_ours.mat")["score_sk"]
score2 = sio.loadmat("./results/a1a_ours.mat")["score_our"]
score_a1a = np.vstack([score_a1a, score1, score2])
print(score_a1a.shape)
score1 = sio.loadmat("./results/a9a_ours.mat")["score_sk"]
score2 = sio.loadmat("./results/a9a_ours.mat")["score_our"]
score_a9a = np.vstack([score_a9a, score1, score2])
print(score_a9a.shape)
score1 = sio.loadmat("./results/leu_ours.mat")["score_sk"]
score2 = sio.loadmat("./results/leu_ours.mat")["score_our"]
score_leu = np.vstack([score_leu, score1, score2])
print(score_leu.shape)
score1 = sio.loadmat("./results/splice_ours.mat")["score_sk"]
score2 = sio.loadmat("./results/splice_ours.mat")["score_our"]
score_splice = np.vstack([score_splice, score1, score2])
print(score_splice.shape)
score1 = sio.loadmat("./results/liver-disorders_ours.mat")["score_sk"]
score2 = sio.loadmat("./results/liver-disorders_ours.mat")["score_our"]
score_liver = np.vstack([score_liver, score1, score2])
print(score_liver.shape)
score1 = sio.loadmat("./results/madelon_ours.mat")["score_sk"]
score2 = sio.loadmat("./results/madelon_ours.mat")["score_our"]
score_madelon = np.vstack([score_madelon, score1, score2])
print(score_madelon.shape)
score1 = sio.loadmat("./results/gisette_ours.mat")["score_sk"]
score2 = sio.loadmat("./results/gisette_ours.mat")["score_our"]
score_gisette = np.vstack([score_gisette, score1, score2])
print(score_gisette.shape)
fig, ax = plt.subplots(4, 2,sharex=True,sharey=True,figsize=(16, 22))
fig.tight_layout()
lw=3.0
line_styles = ['-.','--','-',':']
_, ytrue = load_svmlight_file('./datasets/leu.t')
ytrue[ytrue==-1] = 0
for k, name in enumerate(method_names):
if name not in selected_methods:
continue
fpr, tpr, _ = roc_curve(ytrue, score_leu[k])
roc_auc = auc(fpr, tpr)
ax[3,0].plot(fpr, tpr, lw=lw, label=f'{name} (area = {roc_auc:.3f})', linestyle=line_styles[k % len(line_styles)])
ax[3,0].set_title('leukemia')
ax[3,0].legend(loc="lower right", prop={'size': 16})
ax[3,0].plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
_, ytrue = load_svmlight_file('./datasets/liver-disorders.t')
ytrue[ytrue==-1] = 0
for k, name in enumerate(method_names):
if name not in selected_methods:
continue
fpr, tpr, _ = roc_curve(ytrue, score_liver[k])
roc_auc = auc(fpr, tpr)
ax[1,0].plot(fpr, tpr, lw=lw, label=f'{name} (area = {roc_auc:.3f})', linestyle=line_styles[k % len(line_styles)])
ax[1,0].set_title('liver-disorders')
ax[1,0].legend(loc="lower right", prop={'size': 16})
ax[1,0].plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
_, ytrue = load_svmlight_file('./datasets/madelon.t')
ytrue[ytrue==-1] = 0
for k, name in enumerate(method_names):
if name not in selected_methods:
continue
fpr, tpr, _ = roc_curve(ytrue, score_madelon[k])
roc_auc = auc(fpr, tpr)
ax[0,1].plot(fpr, tpr, lw=lw, label=f'{name} (area = {roc_auc:.3f})', linestyle=line_styles[k % len(line_styles)])
ax[0,1].set_title('madelon')
ax[0,1].legend(loc="lower right", prop={'size': 16})
ax[0,1].plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
_, ytrue = load_svmlight_file('./datasets/splice.t')
ytrue[ytrue==-1] = 0
for k, name in enumerate(method_names):
if name not in selected_methods:
continue
fpr, tpr, _ = roc_curve(ytrue, score_splice[k])
roc_auc = auc(fpr, tpr)
ax[0,0].plot(fpr, tpr, lw=lw, label=f'{name} (area = {roc_auc:.3f})', linestyle=line_styles[k % len(line_styles)])
ax[0,0].set_title('splice')
ax[0,0].legend(loc="lower right", prop={'size': 16})
ax[0,0].plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
_, ytrue = load_svmlight_file('./datasets/ijcnn1.t')
ytrue[ytrue==-1] = 0
for k, name in enumerate(method_names):
if name not in selected_methods:
continue
fpr, tpr, _ = roc_curve(ytrue, score_ijcnn1[k])
roc_auc = auc(fpr, tpr)
ax[1,1].plot(fpr, tpr, lw=lw, label=f'{name} (area = {roc_auc:.3f})', linestyle=line_styles[k % len(line_styles)])
ax[1,1].set_title('ijcnn1')
ax[1,1].legend(loc="lower right", prop={'size': 16})
ax[1,1].plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
_, ytrue = load_svmlight_file('./datasets/a1a.t')
ytrue[ytrue==-1] = 0
for k, name in enumerate(method_names):
if name not in selected_methods:
continue
fpr, tpr, _ = roc_curve(ytrue, score_a1a[k])
roc_auc = auc(fpr, tpr)
ax[2,0].plot(fpr, tpr, lw=lw, label=f'{name} (area = {roc_auc:.3f})', linestyle=line_styles[k % len(line_styles)])
ax[2,0].set_title('a1a')
ax[2,0].legend(loc="lower right", prop={'size': 16})
ax[2,0].plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
_, ytrue = load_svmlight_file('./datasets/a9a.t')
ytrue[ytrue==-1] = 0
for k, name in enumerate(method_names):
if name not in selected_methods:
continue
fpr, tpr, _ = roc_curve(ytrue, score_a9a[k])
roc_auc = auc(fpr, tpr)
ax[2,1].plot(fpr, tpr, lw=lw, label=f'{name} (area = {roc_auc:.3f})', linestyle=line_styles[k % len(line_styles)])
ax[2,1].set_title('a9a')
ax[2,1].legend(loc="lower right", prop={'size': 16})
ax[2,1].plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
_, ytrue = load_svmlight_file('./datasets/gisette_scale.t')
ytrue[ytrue==-1] = 0
for k, name in enumerate(method_names):
if name not in selected_methods:
continue
fpr, tpr, _ = roc_curve(ytrue, score_gisette[k])
roc_auc = auc(fpr, tpr)
ax[3,1].plot(fpr, tpr, lw=lw, label=f'{name} (area = {roc_auc:.3f})', linestyle=line_styles[k % len(line_styles)])
ax[3,1].set_title('gisette')
ax[3,1].legend(loc="lower right", prop={'size': 16})
ax[3,1].plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
fig.text(0.5, 0.001, 'False Positive Rate', ha='center',fontsize=28)
fig.text(0.001, 0.5, 'True Positive Rate', va='center', rotation='vertical',fontsize=28)
plt.setp(ax, xlim=[0.0, 1.0], ylim=[0.0, 1.05])
plt.savefig(f'./Chart_ROC_all8.png', format='png', bbox_inches='tight')
plt.savefig(f'./Chart_ROC_all8.eps', format='eps', bbox_inches='tight')
plt.show()
| 42.964646
| 308
| 0.630187
|
4cbd38a2e018585b35ec403d0bd2827b307eafef
| 2,954
|
py
|
Python
|
scripts/OrbitalDFTU_Searcher.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 67
|
2015-01-31T07:44:55.000Z
|
2022-03-21T21:43:34.000Z
|
scripts/OrbitalDFTU_Searcher.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 13
|
2016-06-03T19:07:51.000Z
|
2022-03-31T04:20:40.000Z
|
scripts/OrbitalDFTU_Searcher.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 37
|
2015-01-22T15:37:23.000Z
|
2022-03-21T15:38:10.000Z
|
#!/usr/bin/env python
import os
import argparse
import logging
import pychemia
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Orbital DFT+U Searcher Manager')
parser.add_argument('-host', type=str, help='Hostname or IP of the Mongo Server (default: localhost)',
required=False, metavar='hostname')
parser.add_argument('-dbname', type=str, help='Name of Database', required=True, metavar='name')
parser.add_argument('-ssl', help='Use SSL (default: False)', action='store_true')
parser.add_argument('-generation_size', type=int, help='Generation Size (default: 32)', metavar='N', default=32)
parser.add_argument('-abinit_input', type=str, help='Path to Abinit input file', metavar='path',
default='abinit.in')
parser.add_argument('-new', help='Create new database (default: False)', action='store_true')
parser.add_argument('-debug', help='Activate debug mode (default: False)', action='store_true')
parser.add_argument('-clean', help='Clean database before start (default: False)', action='store_true')
args = parser.parse_args()
if args.debug:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
logging.basicConfig(level=loglevel)
logger = logging.getLogger('pychemia')
logger.addHandler(logging.NullHandler())
logger.setLevel(loglevel)
if args.host is None:
host = 'localhost'
else:
host = args.host
print("Host: %s" % host)
print("Database Name: %s" % args.dbname)
print("SSL: %s" % args.ssl)
print("Create new database: %s" % args.new)
print("Generation Size: %s" % args.generation_size)
print("Abinit input: %s" % args.abinit_input)
if not os.path.exists(args.abinit_input):
raise ValueError("ERROR: File %s not found" % args.abinit_input)
if args.new:
print("Creating new database")
admin_name = input('Admin Username:')
admin_passwd = input('Admin Password:')
user = input('Username:')
if user == '':
user = None
passwd = None
else:
passwd = input('Password:')
if args.ssl is None:
ssl = False
else:
ssl = args.ssl
if args.new:
pcdb = pychemia.db.create_database(name=args.dbname, admin_name=admin_name, admin_passwd=admin_passwd,
user_name=user, user_passwd=passwd, host=host, ssl=args.ssl)
else:
dbsettings = {'host': host, 'name': args.dbname, 'user': user, 'passwd': passwd}
pcdb = pychemia.db.get_database(dbsettings)
if args.clean:
pcdb.clean()
popu = pychemia.population.orbitaldftu.OrbitalDFTU(pcdb, input_path=args.abinit_input)
searcher = pychemia.searcher.FireFly(popu, generation_size=args.generation_size)
print("Starting search...")
searcher.run()
| 36.02439
| 116
| 0.634733
|
5a2d512aa04de90482885b223683fafd85ad42db
| 4,227
|
py
|
Python
|
som_ae/somae_train.py
|
zutotonno/SOM-VAE
|
76130589a47955f55af414a6e21c8d35d293a211
|
[
"MIT"
] | null | null | null |
som_ae/somae_train.py
|
zutotonno/SOM-VAE
|
76130589a47955f55af414a6e21c8d35d293a211
|
[
"MIT"
] | null | null | null |
som_ae/somae_train.py
|
zutotonno/SOM-VAE
|
76130589a47955f55af414a6e21c8d35d293a211
|
[
"MIT"
] | null | null | null |
import pickle as pickle
import numpy as np
import tensorflow as tf
from somae_model import SOMAE
import pandas as pd
# data_set = "/home/aritacco/SOM_AE/SOM-VAE/data/training_dataset.obj"
dataset_train = pd.read_csv('../data/HAPT_Dataset/Train/X_train.txt', sep=' ', header=None)
dataset_test = pd.read_csv('../data/HAPT_Dataset/Test/X_test.txt', sep=' ', header=None)
def get_data_generator():
"""Creates a data generator for the training.
Args:
time_series (bool): Indicates whether or not we want interpolated MNIST time series or just
normal MNIST batches.
Returns:
generator: Data generator for the batches."""
def batch_generator(mode="train", batch_size=100):
"""Generator for the data batches.
Args:
mode (str): Mode in ['train', 'val'] that decides which data set the generator
samples from (default: 'train').
batch_size (int): The size of the batches (default: 100).
Yields:
np.array: Data batch.
"""
assert mode in ["train", "val"], "The mode should be in {train, val}."
if mode=="train":
images = data_train.copy()
labels = labels_train.copy()
elif mode=="val":
images = data_val.copy()
labels = labels_val.copy()
while True:
indices = np.random.permutation(np.arange(len(images)))
images = images[indices]
labels = labels[indices]
for i in range(len(images)//batch_size):
yield images[i*batch_size:(i+1)*batch_size]
return batch_generator
# with open(data_set, 'rb') as som_dump:
# _dataset = pickle.load(som_dump)
# dataset = _dataset['datasetNorm'].astype('float32')
# data = dataset.reshape(-1,288*3,1)
data_train = np.array(dataset_train)
labels_train = np.array(dataset_train)
data_val = np.array(dataset_test)
labels_val = np.array(dataset_test)
# numSamples = data.shape[0]
# numTrainSamples = int(numSamples*0.75)
# data_train = data[:numTrainSamples]
# labels_train = data[:numTrainSamples]
# data_val = data[numTrainSamples:numTrainSamples+10000]
# labels_val = data[numTrainSamples:numTrainSamples+10000]
input_length = data_train.shape[1]
input_channels = 1
latent_dim = 64
som_dim=[4,4]
encoder_hidden_size= 16
learning_rate = 0.0005
alpha = 1.0
beta = 0.9
gamma = 1.8
tau = 1.4
decay_factor = 0.9
batch_size =100
x = tf.placeholder(tf.float32, shape=[None, input_length, input_channels])
data_generator = get_data_generator()
# from keras.layers import Input, Dense
# from keras.models import Model
# encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# # with tf.device('/gpu:0'):
# input_img = tf.keras.Input(shape=(784,))
# encoded = tf.keras.layers.Dense(encoding_dim, activation='relu')(input_img)
# decoded = tf.keras.layers.Dense(784, activation='sigmoid')(encoded)
# autoencoder = tf.keras.models.Model(input_img, decoded)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
with tf.Session() as sess:
model = SOMAE(inputs=x, latent_dim=latent_dim, encoder_hidden_size=encoder_hidden_size, som_dim=som_dim, learning_rate=learning_rate, decay_factor=decay_factor,
input_length=input_length, input_channels=input_channels, alpha=alpha, beta=beta, gamma=gamma,
tau=tau)
# print(model.encoder().summary())
# print(model.decoder().summary())
input_data = tf.keras.Input(shape=(input_length, input_channels, ), name='enc_input')
h_1 = tf.keras.layers.LSTM(latent_dim, activation="relu", name='input2hid')(input_data)
encoded_data = tf.keras.layers.Dense(encoder_hidden_size*latent_dim, activation="relu", name='hid2enc')(h_1)
nearest_neuron_layer = tf.keras.layers.Lambda(model.winning_unit)(encoded_data)
h_2 = tf.keras.layers.Dense(latent_dim, activation="relu",name='enc2hid')(nearest_neuron_layer)
decoded = tf.keras.layers.Dense(input_length*input_channels, activation="linear", name='hid2dec')(h_2)
autoencoder = tf.keras.models.Model(inputs=input_data, outputs=decoded)
print('OK')
| 35.822034
| 164
| 0.688432
|
18917d030b1a7b090dd6b8a26789f93b921718d6
| 7,125
|
py
|
Python
|
core/self_supervision_summarization_cat_batch.py
|
hbdat/eccv20_Multi_Task_Procedure_Learning
|
9212f321e12cafb12030b982b4873fc7ddec8799
|
[
"MIT"
] | 2
|
2020-09-08T19:53:17.000Z
|
2021-01-20T04:57:09.000Z
|
core/self_supervision_summarization_cat_batch.py
|
hbdat/eccv20_Multi_Task_Procedure_Learning
|
9212f321e12cafb12030b982b4873fc7ddec8799
|
[
"MIT"
] | 7
|
2021-02-02T12:29:56.000Z
|
2021-12-09T17:55:41.000Z
|
core/self_supervision_summarization_cat_batch.py
|
hbdat/eccv20_Multi_Task_Procedure_Learning
|
9212f321e12cafb12030b982b4873fc7ddec8799
|
[
"MIT"
] | 3
|
2021-02-10T13:31:49.000Z
|
2021-05-31T07:08:33.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 21 17:09:50 2019
@author: Warmachine
"""
import numpy as np
import sklearn
import torch
import scipy
import pdb
from sklearn.cluster import KMeans,MiniBatchKMeans
from sklearn.externals import joblib
class SelfSupervisionSummarization:
def __init__(self,M,repNum,dim = 512):
print("!!!!!!!!! CAT BATCH New !!!!!!!!!")
self.M = M
self.dim = dim
self.flush()
#self.kmeans = KMeans(n_clusters=M, init='k-means++', max_iter=1000, n_init=50, random_state=0)
self.kmeans = MiniBatchKMeans(n_clusters=M,init='k-means++',max_iter=1000, n_init=50,random_state=0,batch_size=256*20) #each video has around 256 frame then we have 12 task so the batch_size of 256*20 would cover all category especially we alternative load video
self.repNum = repNum
self.reps = {}
self.assignments = {}
def save(self,path):
joblib.dump(self.kmeans, path)
def load(self,path):
self.kmeans = joblib.load(path)
def flush(self):
self.dict_fbar_seg = {} #data buffer that hold all the aggregate attention feature of each video segment
self.dict_labels = {}
self.dict_video_lens = {}
self.dict_video_idx = {}
self.v_counter = {}
def add_video(self,fbar_seg,video_name,cat):
assert fbar_seg.size(0)==1
video_name = video_name[0]
if len(fbar_seg.size()) == 3: #[1,n,d]
fbar_seg = fbar_seg[0]
if fbar_seg.size(-1) != self.dim:
fbar_seg = torch.transpose(fbar_seg,1,0)
assert fbar_seg.size(-1) == self.dim
if cat not in self.dict_fbar_seg:
self.dict_fbar_seg[cat]= []
self.dict_labels[cat]= []
self.dict_video_lens[cat] = []
self.dict_video_idx[cat]= {}
self.v_counter[cat] = 0
self.dict_fbar_seg[cat].append(fbar_seg.cpu().numpy())
self.dict_video_lens[cat].append(fbar_seg.size(0))
if video_name in self.dict_video_idx[cat]:
raise Exception('Reload a video twice {}'.format(video_name))
self.dict_video_idx[cat][video_name] = self.v_counter[cat]
self.v_counter[cat] += 1
def get_key_step_label(self,video_name,cat):
assert len(video_name)==1
video_name = video_name[0]
idx = self.dict_video_idx[cat][video_name]
return self.dict_labels[cat][idx]
def foward(self):
for cat in self.dict_fbar_seg:
all_fbar = np.concatenate(self.dict_fbar_seg[cat],axis = 0)
Y = all_fbar
# Apply kmeans to data to get centroids
if Y.shape[0] > self.M:
self.kmeans.fit(Y)
else:
print("Skip cat {} that cannot be fitted".format(cat))
X = self.kmeans.cluster_centers_ # X is the M x d array of M centers in d-dimension
# Compute similarity between X and Y
S = -scipy.spatial.distance.cdist(X, Y, metric='euclidean')
# Run subset selection
# repNum: number of representative centers
# reps: representative centers
# assignments: assignments of segments to representative centers
self.reps[cat], self.assignments[cat] = self.run_ss(S,self.repNum)
all_keystep_labels = self.reps[cat][self.assignments[cat]]
# pdb.set_trace()
assert len(self.dict_labels[cat]) == 0
accum_len = 0
for l in self.dict_video_lens[cat]:
step_key_label = all_keystep_labels[accum_len:accum_len+l]
assert step_key_label.shape[0] == l
## format back to torch
step_key_label = torch.from_numpy(step_key_label[np.newaxis])
## format back to torch
self.dict_labels[cat].append(step_key_label)
accum_len += l
return None, None
def predict(self,fbar,cat):
if len(fbar.size()) == 3: #[1,n,d]
fbar = fbar[0]
if fbar.size(-1) != self.dim:
fbar = torch.transpose(fbar,1,0)
assert fbar.size(-1) == self.dim
fbar= fbar.cpu().numpy()
Y = fbar
# Get centroid without applying kmeans
X = self.kmeans.cluster_centers_ # X is the M x d array of M centers in d-dimension
# Compute similarity between X and Y
S = -scipy.spatial.distance.cdist(X, Y, metric='euclidean')
# Run subset selection
# repNum: number of representative centers
# reps: representative centers
# assignments: assignments of segments to representative centers
cost, assgn = self.ss_cost(S, self.reps[cat])
keystep_labels = self.reps[cat][assgn]
return torch.from_numpy(self.reps[cat]), torch.from_numpy(keystep_labels[np.newaxis])
################# Ehsan code #################
# Funtion that takes the similarity matrix between Kmeans centers and segment features
# and resturns the set of representative centers and assignments to representatives
# S: similarity matrix between X and Y
# repNum: number of representatives from X
def run_ss(self,S,repNum):
N = S.shape[0]
active_set = np.empty(0)
remaining_set = np.array(list(set(range(N)) - set(active_set)))
cost1 = -float('inf')
best_cost = -float('inf')
assignment = np.array([0, N])
for iter in range(repNum):
for i in range(len(remaining_set)):
element = remaining_set[i]
[cost2, assignment2] = self.ss_cost(S, np.append(active_set,element).astype(int))
if (cost2 > best_cost):
best_cost = cost2
best_index = element
best_assignment = assignment2
if (best_cost > cost1):
active_set = np.append(active_set, best_index)
remaining_set = np.array(list( set(range(N)) - set(active_set) ))
cost1 = best_cost
assignment = best_assignment
else:
break
return active_set.astype(int), assignment.astype(int)
# Function to compute the best assignment for a given active set
# S: similarity matrix between X and Y
# aset: subset of indices from X
def ss_cost(self,S, aset):
N = S.shape[0]
#[v, assgn] = torch.max(S[aset,:],0)
v = np.ndarray.max(S[aset,:], 0)
assgn = np.ndarray.argmax(S[aset,:], 0)
#cost = sum(v).detach().numpy()
cost = sum(v)
return cost, assgn
################# Ehsan code #################
| 37.698413
| 280
| 0.556912
|
b1f95f237c6b77d2d814bfefa7ace3a320fd30b4
| 2,020
|
py
|
Python
|
7-assets/past-student-repos/Whiteboard-Pairing-master/BSTFromArray/model_solution.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/Whiteboard-Pairing-master/BSTFromArray/model_solution.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/Whiteboard-Pairing-master/BSTFromArray/model_solution.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
import math
def create_minimal_BST(sorted_array):
return create_minimal_BST_helper(sorted_array, 0, len(sorted_array) - 1)
def create_minimal_BST_helper(sorted_array, left, right):
if right < left:
return None
mid = math.floor((left + right) / 2)
node = BinaryTreeNode(sorted_array[mid])
node.left = create_minimal_BST_helper(sorted_array, left, mid - 1)
node.right = create_minimal_BST_helper(sorted_array, mid + 1, right)
return node
class BinaryTreeNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Helper function to validate that the created tree is a valid BST
def is_BST(root):
node_and_bounds_stack = []
node_and_bounds_stack.append({"node": root, "lower_bound": -math.inf, "upper_bound": math.inf})
while node_and_bounds_stack != []:
node_and_bounds = node_and_bounds_stack.pop()
node = node_and_bounds["node"]
lower_bound = node_and_bounds["lower_bound"]
upper_bound = node_and_bounds["upper_bound"]
if node.value <= lower_bound or node.value >= upper_bound:
return False
if node.left != None:
node_and_bounds_stack.append({"node": node.left, "lower_bound": lower_bound, "upper_bound": node.value})
if node.right != None:
node_and_bounds_stack.append({"node": node.right, "lower_bound": node.value, "upper_bound": upper_bound})
return True
# Helper function to check the max height of a BST
def max_depth(node):
if node == None: return 0
return 1 + max(max_depth(node.left), max_depth(node.right))
# Some tests
sorted_array = [1, 2, 3, 4, 5, 6, 7]
bst = create_minimal_BST(sorted_array)
print(is_BST(bst)) # should print true
print(max_depth(bst)) # should print 3
sorted_array = [4, 10, 11, 18, 42, 43, 47, 49, 55, 67, 79, 89, 90, 95, 98, 100]
bst = create_minimal_BST(sorted_array)
print(is_BST(bst)) # should print true
print(max_depth(bst)) # should print 5
| 31.076923
| 117
| 0.679208
|
06f7c6efa27850f78bb5173faadb684fdaa9a35c
| 595
|
py
|
Python
|
wagtailsharing/checks.py
|
fabrique/wagtail-sharing
|
7943029c8c8a412da6bd0918c8e0a2fd78f76210
|
[
"CC0-1.0"
] | 43
|
2017-01-10T17:26:02.000Z
|
2022-03-23T08:09:50.000Z
|
wagtailsharing/checks.py
|
fabrique/wagtail-sharing
|
7943029c8c8a412da6bd0918c8e0a2fd78f76210
|
[
"CC0-1.0"
] | 25
|
2017-01-13T20:27:56.000Z
|
2022-03-10T15:55:51.000Z
|
wagtailsharing/checks.py
|
fabrique/wagtail-sharing
|
7943029c8c8a412da6bd0918c8e0a2fd78f76210
|
[
"CC0-1.0"
] | 12
|
2017-01-31T20:47:50.000Z
|
2021-11-03T09:38:06.000Z
|
from django.apps import apps
from django.core.checks import Error, register
@register()
def modeladmin_installed_check(app_configs, **kwargs):
errors = []
MODELADMIN_APP = "wagtail.contrib.modeladmin"
if not apps.is_installed(MODELADMIN_APP):
error_hint = "Is '{}' in settings.INSTALLED_APPS?".format(
MODELADMIN_APP
)
errors.append(
Error(
"wagtail-sharing requires the Wagtail ModelAdmin app.",
hint=error_hint,
id="wagtailsharing.E001",
)
)
return errors
| 24.791667
| 71
| 0.605042
|
a0f0a01da72ea5567d5c573efd99daf952d027b6
| 384
|
py
|
Python
|
bmlfs/examples/k_means.py
|
nguyentritai2906/bmlfs
|
cf5128065289caae9f38a29bbb762bcf2fe7fd3a
|
[
"MIT"
] | null | null | null |
bmlfs/examples/k_means.py
|
nguyentritai2906/bmlfs
|
cf5128065289caae9f38a29bbb762bcf2fe7fd3a
|
[
"MIT"
] | null | null | null |
bmlfs/examples/k_means.py
|
nguyentritai2906/bmlfs
|
cf5128065289caae9f38a29bbb762bcf2fe7fd3a
|
[
"MIT"
] | null | null | null |
from sklearn import datasets
from bmlfs.unsupervised_learning import KMeans
from bmlfs.utils import Plot
def main():
X, y = datasets.make_blobs()
classifier = KMeans(k=3)
y_pred = classifier.predict(X)
p = Plot()
p.plot_in_2d(X, y_pred, title="K-Means Clustering")
p.plot_in_2d(X, y, title="Groundtruth Clustering")
if __name__ == "__main__":
main()
| 20.210526
| 55
| 0.690104
|
054596bd1c94edc76b12f49c95534fb3ec97b048
| 622
|
py
|
Python
|
saleor/shipping/utils.py
|
ammogcoder/saleor
|
e4160652b68de002b51708a775050c95bfd1a3a5
|
[
"BSD-3-Clause"
] | 1
|
2018-03-17T02:41:15.000Z
|
2018-03-17T02:41:15.000Z
|
saleor/shipping/utils.py
|
ammogcoder/saleor
|
e4160652b68de002b51708a775050c95bfd1a3a5
|
[
"BSD-3-Clause"
] | 86
|
2018-03-08T14:19:19.000Z
|
2018-05-12T14:55:16.000Z
|
saleor/shipping/utils.py
|
ammogcoder/saleor
|
e4160652b68de002b51708a775050c95bfd1a3a5
|
[
"BSD-3-Clause"
] | 2
|
2018-03-05T12:29:10.000Z
|
2018-09-28T12:40:52.000Z
|
from prices import PriceRange
from .models import ShippingMethodCountry
def get_shipment_options(country_code):
shipping_methods_qs = ShippingMethodCountry.objects.select_related(
'shipping_method')
shipping_methods = shipping_methods_qs.filter(country_code=country_code)
if not shipping_methods.exists():
shipping_methods = shipping_methods_qs.filter(country_code='')
if shipping_methods:
shipping_methods = shipping_methods.values_list('price', flat=True)
return PriceRange(
min_price=min(shipping_methods), max_price=max(shipping_methods))
return None
| 36.588235
| 77
| 0.763666
|
4aba4e4b33271e6f867460a257c527185adba79c
| 17
|
py
|
Python
|
projects/pycountry/test.py
|
quinn-dougherty/python-on-nix
|
910d3f6554acd4a4ef0425ebccd31104dccb283c
|
[
"Unlicense"
] | 25
|
2021-10-30T19:54:59.000Z
|
2022-03-29T06:11:02.000Z
|
projects/pycountry/test.py
|
quinn-dougherty/python-on-nix
|
910d3f6554acd4a4ef0425ebccd31104dccb283c
|
[
"Unlicense"
] | 21
|
2021-10-19T01:09:38.000Z
|
2022-03-24T16:08:53.000Z
|
projects/pycountry/test.py
|
quinn-dougherty/python-on-nix
|
910d3f6554acd4a4ef0425ebccd31104dccb283c
|
[
"Unlicense"
] | 3
|
2022-01-25T20:25:13.000Z
|
2022-03-08T02:58:50.000Z
|
import pycountry
| 8.5
| 16
| 0.882353
|
65c2301a3c797e0440d0905ff47deaadab035ee4
| 3,561
|
py
|
Python
|
numpy_demo/distutils/fcompiler/ibm.py
|
mpmkp2020/numpy_demo
|
796262e06c84b7e9aa446b244a3faf3891d9ece1
|
[
"BSD-3-Clause"
] | null | null | null |
numpy_demo/distutils/fcompiler/ibm.py
|
mpmkp2020/numpy_demo
|
796262e06c84b7e9aa446b244a3faf3891d9ece1
|
[
"BSD-3-Clause"
] | null | null | null |
numpy_demo/distutils/fcompiler/ibm.py
|
mpmkp2020/numpy_demo
|
796262e06c84b7e9aa446b244a3faf3891d9ece1
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import re
import sys
import subprocess
from numpy_demo.distutils.fcompiler import FCompiler
from numpy_demo.distutils.exec_command import find_executable
from numpy_demo.distutils.misc_util import make_temp_file
from distutils import log
compilers = ['IBMFCompiler']
class IBMFCompiler(FCompiler):
compiler_type = 'ibm'
description = 'IBM XL Fortran Compiler'
version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)'
#IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004
executables = {
'version_cmd' : ["<F77>", "-qversion"],
'compiler_f77' : ["xlf"],
'compiler_fix' : ["xlf90", "-qfixed"],
'compiler_f90' : ["xlf90"],
'linker_so' : ["xlf95"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_version(self,*args,**kwds):
version = FCompiler.get_version(self,*args,**kwds)
if version is None and sys.platform.startswith('aix'):
# use lslpp to find out xlf version
lslpp = find_executable('lslpp')
xlf = find_executable('xlf')
if os.path.exists(xlf) and os.path.exists(lslpp):
try:
o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp'])
except (OSError, subprocess.CalledProcessError):
pass
else:
m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o)
if m: version = m.group('version')
xlf_dir = '/etc/opt/ibmcmp/xlf'
if version is None and os.path.isdir(xlf_dir):
# linux:
# If the output of xlf does not contain version info
# (that's the case with xlf 8.1, for instance) then
# let's try another method:
l = sorted(os.listdir(xlf_dir))
l.reverse()
l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))]
if l:
from distutils.version import LooseVersion
self.version = version = LooseVersion(l[0])
return version
def get_flags(self):
return ['-qextname']
def get_flags_debug(self):
return ['-g']
def get_flags_linker_so(self):
opt = []
if sys.platform=='darwin':
opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress')
else:
opt.append('-bshared')
version = self.get_version(ok_status=[0, 40])
if version is not None:
if sys.platform.startswith('aix'):
xlf_cfg = '/etc/xlf.cfg'
else:
xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version
fo, new_cfg = make_temp_file(suffix='_xlf.cfg')
log.info('Creating '+new_cfg)
with open(xlf_cfg, 'r') as fi:
crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match
for line in fi:
m = crt1_match(line)
if m:
fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
else:
fo.write(line)
fo.close()
opt.append('-F'+new_cfg)
return opt
def get_flags_opt(self):
return ['-O3']
if __name__ == '__main__':
from numpy_demo.distutils import customized_fcompiler
log.set_verbosity(2)
print(customized_fcompiler(compiler='ibm').get_version())
| 36.336735
| 137
| 0.550126
|
d7125fd37625311ba75775ad492e00fec4e10d85
| 1,016
|
py
|
Python
|
OMDB.py
|
Hydrayt777/OMDb-Bot
|
79273f188e326adab628a3c4aa48957ba9c82bc8
|
[
"MIT"
] | null | null | null |
OMDB.py
|
Hydrayt777/OMDb-Bot
|
79273f188e326adab628a3c4aa48957ba9c82bc8
|
[
"MIT"
] | null | null | null |
OMDB.py
|
Hydrayt777/OMDb-Bot
|
79273f188e326adab628a3c4aa48957ba9c82bc8
|
[
"MIT"
] | null | null | null |
#sheriyenna
import requests
from info import API_KEY
user = {"User-Agent":"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Mobile Safari/537.36 Edg/87.0.664.57"}
def get_movie_info(query):
try:
url = f'http://www.omdbapi.com/?apikey={API_KEY}&t={query}'
resp = requests.get(url, headers=user).json()
poster=resp['Poster']
id=resp['imdbID']
text=f"""📀 𝖳𝗂𝗍𝗅𝖾 : <b><u>{resp['Title']}</u></b>
⏱️ 𝖱𝗎𝗇𝗍𝗂𝗆𝖾 : <b>{resp['Runtime']}</b>
🌟 𝖱𝖺𝗍𝗂𝗇𝗀 : <b>{resp['imdbRating']}/10</b>
🗳️ 𝖵𝗈𝗍𝖾𝗌 : <b>{resp['imdbVotes']}</b>
📆 𝖱𝖾𝗅𝖾𝖺𝗌𝖾 : <b>{resp['Released']}</b>
🎭 𝖦𝖾𝗇𝗋𝖾 : <b>{resp['Genre']}</b>
🎙 𝖫𝖺𝗇𝗀𝗎𝖺𝗀𝖾 : <b>{resp['Language']}</b>
🌐 𝖢𝗈𝗎𝗇𝗍𝗋𝗒 : <b>{resp['Country']}</b>
🎥 𝖣𝗂𝗋𝖾𝖼𝗍𝗈𝗋𝗌 : <b>{resp['Director']}</b>
📝 𝖶𝗋𝗂𝗍𝖾𝗋𝗌 : <b>{resp['Writer']}</b>
🔆 𝖲𝗍𝖺𝗋𝗌 : <b>{resp['Actors']}</b>
🗒 𝖯𝗅𝗈𝗍 : <code>{resp['Plot']}</code>"""
except Exception as error:
print(error)
| 29.028571
| 174
| 0.564961
|
51114255ff318fd0c777cec01507e2d188978c03
| 905
|
py
|
Python
|
treeio/core/trash/urls.py
|
Andrea-MariaDB-2/treeio
|
f50ab9bae93f7a0a062b5316485a7bbeb4b6ac4e
|
[
"MIT"
] | 242
|
2015-01-01T15:08:23.000Z
|
2022-01-19T21:14:24.000Z
|
treeio/core/trash/urls.py
|
J4CODE/treeio
|
bae3115f4015aad2cbc5ab45572232ceec990495
|
[
"MIT"
] | 52
|
2015-01-05T09:13:17.000Z
|
2018-12-26T14:52:43.000Z
|
treeio/core/trash/urls.py
|
J4CODE/treeio
|
bae3115f4015aad2cbc5ab45572232ceec990495
|
[
"MIT"
] | 99
|
2015-01-09T23:28:14.000Z
|
2021-12-30T09:19:51.000Z
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Core module Administration panel URLs
"""
from django.conf.urls import patterns, url
urlpatterns = patterns('treeio.core.trash.views',
url(r'^(\.(?P<response_format>\w+))?/?$',
'index', name='core_trash'),
url(r'^index(\.(?P<response_format>\w+))?/?$',
'index', name='core_trash_index'),
# Actions
url(r'^delete/(?P<object_id>\d+)(\.(?P<response_format>\w+))?/?$',
'object_delete', name='core_trash_object_delete'),
url(r'^untrash/(?P<object_id>\d+)(\.(?P<response_format>\w+))?/?$',
'object_untrash', name='core_trash_object_untrash'),
)
| 37.708333
| 90
| 0.496133
|
b5cd030c68aff6a3458acbea6ccf34897629d672
| 614
|
py
|
Python
|
live_notes/exp/nb_lesson82.py
|
timdavidlee/fastai_dl_p2_2019
|
760a28d4dc320848ba28dfd7146ff6bab51499e9
|
[
"MIT"
] | null | null | null |
live_notes/exp/nb_lesson82.py
|
timdavidlee/fastai_dl_p2_2019
|
760a28d4dc320848ba28dfd7146ff6bab51499e9
|
[
"MIT"
] | null | null | null |
live_notes/exp/nb_lesson82.py
|
timdavidlee/fastai_dl_p2_2019
|
760a28d4dc320848ba28dfd7146ff6bab51499e9
|
[
"MIT"
] | null | null | null |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/lesson82.ipynb
# standard libraries
from pathlib import Path
from IPython.core.debugger import set_trace
import pickle, gzip, math, torch, matplotlib as mpl
import matplotlib.pyplot as plt
# datasets
from fastai import datasets
# basic pytorch
from torch import tensor
MNIST_URL='http://deeplearning.net/data/mnist/mnist.pkl'
def near(a,b):
return torch.allclose(a, b, rtol=1e-3, atol=1e-5)
def test_near(a,b):
test(a,b,near)
| 23.615385
| 56
| 0.623779
|
92b35e1e0f94463fe35b18ddd8a172dd1ece07f3
| 3,754
|
py
|
Python
|
travis_pypi_setup.py
|
mitodl/epithet
|
4f95054fbdfbae0e9d6db2e3309993d00a8a6867
|
[
"MIT"
] | 6
|
2017-05-25T18:30:44.000Z
|
2019-08-13T20:39:34.000Z
|
travis_pypi_setup.py
|
mitodl/epithet
|
4f95054fbdfbae0e9d6db2e3309993d00a8a6867
|
[
"MIT"
] | 2
|
2021-03-25T21:39:52.000Z
|
2021-11-15T17:46:46.000Z
|
travis_pypi_setup.py
|
mitodl/epithet
|
4f95054fbdfbae0e9d6db2e3309993d00a8a6867
|
[
"MIT"
] | 1
|
2019-08-13T20:39:39.000Z
|
2019-08-13T20:39:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'phildini/epithet'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| 30.520325
| 79
| 0.70032
|
d6ce734893d926e7e0d155223ff70c8f29c03196
| 268
|
py
|
Python
|
niyopolymers/niyopolymers/doctype/interview_round_feedback/interview_round_feedback.py
|
venku31/niyopolymers
|
f150ee591d2ea10720d8e98c5f6abf7c6e2edb2d
|
[
"MIT"
] | null | null | null |
niyopolymers/niyopolymers/doctype/interview_round_feedback/interview_round_feedback.py
|
venku31/niyopolymers
|
f150ee591d2ea10720d8e98c5f6abf7c6e2edb2d
|
[
"MIT"
] | null | null | null |
niyopolymers/niyopolymers/doctype/interview_round_feedback/interview_round_feedback.py
|
venku31/niyopolymers
|
f150ee591d2ea10720d8e98c5f6abf7c6e2edb2d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Atriina and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class InterviewRoundFeedback(Document):
pass
| 24.363636
| 49
| 0.783582
|
1f62db1216ca574207b9b8e070e4691f02426f64
| 131,162
|
py
|
Python
|
tests/hwsim/test_fst_module.py
|
waittrue/wireless
|
3c64f015dc62aec4da0b696f45cc4bcf41594c5d
|
[
"Unlicense"
] | 1
|
2016-04-22T19:32:57.000Z
|
2016-04-22T19:32:57.000Z
|
tests/hwsim/test_fst_module.py
|
Acidburn0zzz/third_party-hostap
|
0542463c4de76fde6e8164f75b3a52ce0ddd8087
|
[
"Unlicense"
] | null | null | null |
tests/hwsim/test_fst_module.py
|
Acidburn0zzz/third_party-hostap
|
0542463c4de76fde6e8164f75b3a52ce0ddd8087
|
[
"Unlicense"
] | null | null | null |
# FST functionality tests
# Copyright (c) 2015, Qualcomm Atheros, Inc.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import struct
import subprocess
import time
import os
import re
import hwsim_utils
from hwsim import HWSimRadio
import hostapd
from wpasupplicant import WpaSupplicant
import fst_test_common
import fst_module_aux
from utils import alloc_fail, HwsimSkip
#enum - bad parameter types
bad_param_none = 0
bad_param_session_add_no_params = 1
bad_param_group_id = 2
bad_param_session_set_no_params = 3
bad_param_session_set_unknown_param = 4
bad_param_session_id = 5
bad_param_old_iface = 6
bad_param_new_iface = 7
bad_param_negative_llt = 8
bad_param_zero_llt = 9
bad_param_llt_too_big = 10
bad_param_llt_nan = 11
bad_param_peer_addr = 12
bad_param_session_initiate_no_params = 13
bad_param_session_initiate_bad_session_id = 14
bad_param_session_initiate_with_no_new_iface_set = 15
bad_param_session_initiate_with_bad_peer_addr_set = 16
bad_param_session_initiate_request_with_bad_stie = 17
bad_param_session_initiate_response_with_reject = 18
bad_param_session_initiate_response_with_bad_stie = 19
bad_param_session_initiate_response_with_zero_llt = 20
bad_param_session_initiate_stt_no_response = 21
bad_param_session_initiate_concurrent_setup_request = 22
bad_param_session_transfer_no_params = 23
bad_param_session_transfer_bad_session_id = 24
bad_param_session_transfer_setup_skipped = 25
bad_param_session_teardown_no_params = 26
bad_param_session_teardown_bad_session_id = 27
bad_param_session_teardown_setup_skipped = 28
bad_param_session_teardown_bad_fsts_id = 29
bad_param_names = ("None",
"No params passed to session add",
"Group ID",
"No params passed to session set",
"Unknown param passed to session set",
"Session ID",
"Old interface name",
"New interface name",
"Negative LLT",
"Zero LLT",
"LLT too big",
"LLT is not a number",
"Peer address",
"No params passed to session initiate",
"Session ID",
"No new_iface was set",
"Peer address",
"Request with bad st ie",
"Response with reject",
"Response with bad st ie",
"Response with zero llt",
"No response, STT",
"Concurrent setup request",
"No params passed to session transfer",
"Session ID",
"Session setup skipped",
"No params passed to session teardown",
"Bad session",
"Session setup skipped",
"Bad fsts_id")
def fst_start_session(apdev, test_params, bad_param_type, start_on_ap,
peer_addr = None):
"""This function makes the necessary preparations and the adds and sets a
session using either correct or incorrect parameters depending on the value
of bad_param_type. If the call ends as expected (with session being
successfully added and set in case of correct parameters or with the
expected exception in case of incorrect parameters), the function silently
exits. Otherwise, it throws an exception thus failing the test."""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
bad_parameter_detected = False
exception_already_raised = False
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if start_on_ap:
initiator = ap1
responder = sta1
new_iface = ap2.ifname()
new_peer_addr = ap2.get_actual_peer_addr()
else:
initiator = sta1
responder = ap1
new_iface = sta2.ifname()
new_peer_addr = sta2.get_actual_peer_addr()
initiator.add_peer(responder, peer_addr, new_peer_addr)
group_id = None
if bad_param_type == bad_param_group_id:
group_id = '-1'
elif bad_param_type == bad_param_session_add_no_params:
group_id = ''
initiator.set_fst_parameters(group_id=group_id)
sid = initiator.add_session()
if bad_param_type == bad_param_session_set_no_params:
res = initiator.set_session_param(None)
if not res.startswith("OK"):
raise Exception("Session set operation failed")
elif bad_param_type == bad_param_session_set_unknown_param:
res = initiator.set_session_param("bad_param=1")
if not res.startswith("OK"):
raise Exception("Session set operation failed")
else:
if bad_param_type == bad_param_session_initiate_with_no_new_iface_set:
new_iface = None
elif bad_param_type == bad_param_new_iface:
new_iface = 'wlan12'
old_iface = None if bad_param_type != bad_param_old_iface else 'wlan12'
llt = None
if bad_param_type == bad_param_negative_llt:
llt = '-1'
elif bad_param_type == bad_param_zero_llt:
llt = '0'
elif bad_param_type == bad_param_llt_too_big:
llt = '4294967296' #0x100000000
elif bad_param_type == bad_param_llt_nan:
llt = 'nan'
elif bad_param_type == bad_param_session_id:
sid = '-1'
initiator.set_fst_parameters(llt=llt)
initiator.configure_session(sid, new_iface, old_iface)
except Exception, e:
if e.args[0].startswith("Cannot add FST session with groupid"):
if bad_param_type == bad_param_group_id or bad_param_type == bad_param_session_add_no_params:
bad_parameter_detected = True
elif e.args[0].startswith("Cannot set FST session new_ifname:"):
if bad_param_type == bad_param_new_iface:
bad_parameter_detected = True
elif e.args[0].startswith("Session set operation failed"):
if (bad_param_type == bad_param_session_set_no_params or
bad_param_type == bad_param_session_set_unknown_param):
bad_parameter_detected = True
elif e.args[0].startswith("Cannot set FST session old_ifname:"):
if (bad_param_type == bad_param_old_iface or
bad_param_type == bad_param_session_id or
bad_param_type == bad_param_session_set_no_params):
bad_parameter_detected = True
elif e.args[0].startswith("Cannot set FST session llt:"):
if (bad_param_type == bad_param_negative_llt or
bad_param_type == bad_param_llt_too_big or
bad_param_type == bad_param_llt_nan):
bad_parameter_detected = True
elif e.args[0].startswith("Cannot set FST session peer address:"):
if bad_param_type == bad_param_peer_addr:
bad_parameter_detected = True
if not bad_parameter_detected:
# The exception was unexpected
logger.info(e)
exception_already_raised = True
raise
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if not exception_already_raised:
if bad_parameter_detected:
logger.info("Success. Bad parameter was detected (%s)" % bad_param_names[bad_param_type])
else:
if bad_param_type == bad_param_none or bad_param_type == bad_param_zero_llt:
logger.info("Success. Session added and set")
else:
exception_text = ""
if bad_param_type == bad_param_peer_addr:
exception_text = "Failure. Bad parameter was not detected (Peer address == %s)" % ap1.get_new_peer_addr()
else:
exception_text = "Failure. Bad parameter was not detected (%s)" % bad_param_names[bad_param_type]
raise Exception(exception_text)
else:
print "Failure. Unexpected exception"
def fst_initiate_session(apdev, test_params, bad_param_type, init_on_ap):
"""This function makes the necessary preparations and then adds, sets and
initiates a session using either correct or incorrect parameters at each
stage depending on the value of bad_param_type. If the call ends as expected
(with session being successfully added, set and initiated in case of correct
parameters or with the expected exception in case of incorrect parameters),
the function silently exits. Otherwise it throws an exception thus failing
the test."""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
bad_parameter_detected = False
exception_already_raised = False
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
# This call makes sure FstHostapd singleton object is created and, as a
# result, the global control interface is registered (this is done from
# the constructor).
ap1.get_global_instance()
if init_on_ap:
initiator = ap1
responder = sta1
new_iface = ap2.ifname() if bad_param_type != bad_param_session_initiate_with_no_new_iface_set else None
new_peer_addr = ap2.get_actual_peer_addr()
resp_newif = sta2.ifname()
else:
initiator = sta1
responder = ap1
new_iface = sta2.ifname() if bad_param_type != bad_param_session_initiate_with_no_new_iface_set else None
new_peer_addr = sta2.get_actual_peer_addr()
resp_newif = ap2.ifname()
peeraddr = None if bad_param_type != bad_param_session_initiate_with_bad_peer_addr_set else '10:DE:AD:DE:AD:11'
initiator.add_peer(responder, peeraddr, new_peer_addr)
if bad_param_type == bad_param_session_initiate_response_with_zero_llt:
initiator.set_fst_parameters(llt='0')
sid = initiator.add_session()
initiator.configure_session(sid, new_iface)
if bad_param_type == bad_param_session_initiate_no_params:
sid = ''
elif bad_param_type == bad_param_session_initiate_bad_session_id:
sid = '-1'
if bad_param_type == bad_param_session_initiate_request_with_bad_stie:
actual_fsts_id = initiator.get_fsts_id_by_sid(sid)
initiator.send_test_session_setup_request(str(actual_fsts_id), "bad_new_band")
responder.wait_for_session_event(5)
elif bad_param_type == bad_param_session_initiate_response_with_reject:
initiator.send_session_setup_request(sid)
initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
setup_event = responder.wait_for_session_event(5, [],
['EVENT_FST_SETUP'])
if not 'id' in setup_event:
raise Exception("No session id in FST setup event")
responder.send_session_setup_response(str(setup_event['id']),
"reject")
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "INITIAL" or event['reason'] != "REASON_REJECT":
raise Exception("Response with reject not handled as expected")
bad_parameter_detected = True
elif bad_param_type == bad_param_session_initiate_response_with_bad_stie:
initiator.send_session_setup_request(sid)
initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
responder.wait_for_session_event(5, [], ['EVENT_FST_SETUP'])
actual_fsts_id = initiator.get_fsts_id_by_sid(sid)
responder.send_test_session_setup_response(str(actual_fsts_id),
"accept", "bad_new_band")
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "INITIAL" or event['reason'] != "REASON_ERROR_PARAMS":
raise Exception("Response with bad STIE not handled as expected")
bad_parameter_detected = True
elif bad_param_type == bad_param_session_initiate_response_with_zero_llt:
initiator.initiate_session(sid, "accept")
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "TRANSITION_DONE":
raise Exception("Response reception for a session with llt=0 not handled as expected")
bad_parameter_detected = True
elif bad_param_type == bad_param_session_initiate_stt_no_response:
initiator.send_session_setup_request(sid)
initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
responder.wait_for_session_event(5, [], ['EVENT_FST_SETUP'])
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "INITIAL" or event['reason'] != "REASON_STT":
raise Exception("No response scenario not handled as expected")
bad_parameter_detected = True
elif bad_param_type == bad_param_session_initiate_concurrent_setup_request:
responder.add_peer(initiator)
resp_sid = responder.add_session()
responder.configure_session(resp_sid, resp_newif)
initiator.send_session_setup_request(sid)
actual_fsts_id = initiator.get_fsts_id_by_sid(sid)
responder.send_test_session_setup_request(str(actual_fsts_id))
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
initiator_addr = initiator.get_own_mac_address()
responder_addr = responder.get_own_mac_address()
if initiator_addr < responder_addr:
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "INITIAL" or event['reason'] != "REASON_SETUP":
raise Exception("Concurrent setup scenario not handled as expected")
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SETUP"])
# The incoming setup request received by the initiator has
# priority over the one sent previously by the initiator itself
# because the initiator's MAC address is numerically lower than
# the one of the responder. Thus, the initiator should generate
# an FST_SETUP event.
else:
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "INITIAL" or event['reason'] != "REASON_STT":
raise Exception("Concurrent setup scenario not handled as expected")
# The incoming setup request was dropped at the initiator
# because its MAC address is numerically bigger than the one of
# the responder. Thus, the initiator continue to wait for a
# setup response until the STT event fires.
bad_parameter_detected = True
else:
initiator.initiate_session(sid, "accept")
except Exception, e:
if e.args[0].startswith("Cannot initiate fst session"):
if bad_param_type != bad_param_none:
bad_parameter_detected = True
elif e.args[0].startswith("No FST-EVENT-SESSION received"):
if bad_param_type == bad_param_session_initiate_request_with_bad_stie:
bad_parameter_detected = True
if not bad_parameter_detected:
#The exception was unexpected
logger.info(e)
exception_already_raised = True
raise
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if not exception_already_raised:
if bad_parameter_detected:
logger.info("Success. Bad parameter was detected (%s)" % bad_param_names[bad_param_type])
else:
if bad_param_type == bad_param_none:
logger.info("Success. Session initiated")
else:
raise Exception("Failure. Bad parameter was not detected (%s)" % bad_param_names[bad_param_type])
else:
print "Failure. Unexpected exception"
def fst_transfer_session(apdev, test_params, bad_param_type, init_on_ap,
rsn=False):
"""This function makes the necessary preparations and then adds, sets,
initiates and attempts to transfer a session using either correct or
incorrect parameters at each stage depending on the value of bad_param_type.
If the call ends as expected the function silently exits. Otherwise, it
throws an exception thus failing the test."""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev, rsn=rsn)
bad_parameter_detected = False
exception_already_raised = False
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2, rsn=rsn)
# This call makes sure FstHostapd singleton object is created and, as a
# result, the global control interface is registered (this is done from
# the constructor).
ap1.get_global_instance()
if init_on_ap:
initiator = ap1
responder = sta1
new_iface = ap2.ifname()
new_peer_addr = ap2.get_actual_peer_addr()
else:
initiator = sta1
responder = ap1
new_iface = sta2.ifname()
new_peer_addr = sta2.get_actual_peer_addr()
initiator.add_peer(responder, new_peer_addr = new_peer_addr)
sid = initiator.add_session()
initiator.configure_session(sid, new_iface)
if bad_param_type != bad_param_session_transfer_setup_skipped:
initiator.initiate_session(sid, "accept")
if bad_param_type == bad_param_session_transfer_no_params:
sid = ''
elif bad_param_type == bad_param_session_transfer_bad_session_id:
sid = '-1'
initiator.transfer_session(sid)
except Exception, e:
if e.args[0].startswith("Cannot transfer fst session"):
if bad_param_type != bad_param_none:
bad_parameter_detected = True
if not bad_parameter_detected:
# The exception was unexpected
logger.info(e)
exception_already_raised = True
raise
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if not exception_already_raised:
if bad_parameter_detected:
logger.info("Success. Bad parameter was detected (%s)" % bad_param_names[bad_param_type])
else:
if bad_param_type == bad_param_none:
logger.info("Success. Session transferred")
else:
raise Exception("Failure. Bad parameter was not detected (%s)" % bad_param_names[bad_param_type])
else:
print "Failure. Unexpected exception"
def fst_tear_down_session(apdev, test_params, bad_param_type, init_on_ap):
"""This function makes the necessary preparations and then adds, sets, and
initiates a session. It then issues a tear down command using either
correct or incorrect parameters at each stage. If the call ends as expected,
the function silently exits. Otherwise, it throws an exception thus failing
the test."""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
bad_parameter_detected = False
exception_already_raised = False
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
# This call makes sure FstHostapd singleton object is created and, as a
# result, the global control interface is registered (this is done from
# the constructor).
ap1.get_global_instance()
if init_on_ap:
initiator = ap1
responder = sta1
new_iface = ap2.ifname()
new_peer_addr = ap2.get_actual_peer_addr()
else:
initiator = sta1
responder = ap1
new_iface = sta2.ifname()
new_peer_addr = sta2.get_actual_peer_addr()
initiator.add_peer(responder, new_peer_addr = new_peer_addr)
sid = initiator.add_session()
initiator.configure_session(sid, new_iface)
if bad_param_type != bad_param_session_teardown_setup_skipped:
initiator.initiate_session(sid, "accept")
if bad_param_type == bad_param_session_teardown_bad_fsts_id:
initiator.send_test_tear_down('-1')
responder.wait_for_session_event(5)
else:
if bad_param_type == bad_param_session_teardown_no_params:
sid = ''
elif bad_param_type == bad_param_session_teardown_bad_session_id:
sid = '-1'
initiator.teardown_session(sid)
except Exception, e:
if e.args[0].startswith("Cannot tear down fst session"):
if (bad_param_type == bad_param_session_teardown_no_params or
bad_param_type == bad_param_session_teardown_bad_session_id or
bad_param_type == bad_param_session_teardown_setup_skipped):
bad_parameter_detected = True
elif e.args[0].startswith("No FST-EVENT-SESSION received"):
if bad_param_type == bad_param_session_teardown_bad_fsts_id:
bad_parameter_detected = True
if not bad_parameter_detected:
# The exception was unexpected
logger.info(e)
exception_already_raised = True
raise
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if not exception_already_raised:
if bad_parameter_detected:
logger.info("Success. Bad parameter was detected (%s)" % bad_param_names[bad_param_type])
else:
if bad_param_type == bad_param_none:
logger.info("Success. Session torn down")
else:
raise Exception("Failure. Bad parameter was not detected (%s)" % bad_param_names[bad_param_type])
else:
print "Failure. Unexpected exception"
#enum - remove session scenarios
remove_scenario_no_params = 0
remove_scenario_bad_session_id = 1
remove_scenario_non_established_session = 2
remove_scenario_established_session = 3
remove_scenario_names = ("No params",
"Bad session id",
"Remove non-established session",
"Remove established session")
def fst_remove_session(apdev, test_params, remove_session_scenario, init_on_ap):
"""This function attempts to remove a session at various stages of its
formation, depending on the value of remove_session_scenario. If the call
ends as expected, the function silently exits. Otherwise, it throws an
exception thus failing the test."""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
bad_parameter_detected = False
exception_already_raised = False
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
# This call makes sure FstHostapd singleton object is created and, as a
# result, the global control interface is registered (this is done from
# the constructor).
ap1.get_global_instance()
if init_on_ap:
initiator = ap1
responder = sta1
new_iface = ap2.ifname()
new_peer_addr = ap2.get_actual_peer_addr()
else:
initiator = sta1
responder = ap1
new_iface = sta2.ifname()
new_peer_addr = sta2.get_actual_peer_addr()
initiator.add_peer(responder, new_peer_addr = new_peer_addr)
sid = initiator.add_session()
initiator.configure_session(sid, new_iface)
if remove_session_scenario != remove_scenario_no_params:
if remove_session_scenario != remove_scenario_non_established_session:
initiator.initiate_session(sid, "accept")
if remove_session_scenario == remove_scenario_no_params:
sid = ''
elif remove_session_scenario == remove_scenario_bad_session_id:
sid = '-1'
initiator.remove_session(sid)
except Exception, e:
if e.args[0].startswith("Cannot remove fst session"):
if (remove_session_scenario == remove_scenario_no_params or
remove_session_scenario == remove_scenario_bad_session_id):
bad_parameter_detected = True
elif e.args[0].startswith("No FST-EVENT-SESSION received"):
if remove_session_scenario == remove_scenario_non_established_session:
bad_parameter_detected = True
if not bad_parameter_detected:
#The exception was unexpected
logger.info(e)
exception_already_raised = True
raise
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if not exception_already_raised:
if bad_parameter_detected:
logger.info("Success. Remove scenario ended as expected (%s)" % remove_scenario_names[remove_session_scenario])
else:
if remove_session_scenario == remove_scenario_established_session:
logger.info("Success. Session removed")
else:
raise Exception("Failure. Remove scenario ended in an unexpected way (%s)" % remove_scenario_names[remove_session_scenario])
else:
print "Failure. Unexpected exception"
#enum - frame types
frame_type_session_request = 0
frame_type_session_response = 1
frame_type_ack_request = 2
frame_type_ack_response = 3
frame_type_tear_down = 4
frame_type_names = ("Session request",
"Session Response",
"Ack request",
"Ack response",
"Tear down")
def fst_send_unexpected_frame(apdev, test_params, frame_type, send_from_ap, additional_param = ''):
"""This function creates two pairs of APs and stations, makes them connect
and then causes one side to send an unexpected FST frame of the specified
type to the other. The other side should then identify and ignore the
frame."""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
exception_already_raised = False
frame_receive_timeout = False
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
# This call makes sure FstHostapd singleton object is created and, as a
# result, the global control interface is registered (this is done from
# the constructor).
ap1.get_global_instance()
if send_from_ap:
sender = ap1
receiver = sta1
new_iface = ap2.ifname()
new_peer_addr = ap2.get_actual_peer_addr()
else:
sender = sta1
receiver = ap1
new_iface = sta2.ifname()
new_peer_addr = sta2.get_actual_peer_addr()
sender.add_peer(receiver, new_peer_addr = new_peer_addr)
sid=sender.add_session()
sender.configure_session(sid, new_iface)
if frame_type == frame_type_session_request:
sender.send_session_setup_request(sid)
event = receiver.wait_for_session_event(5)
if event['type'] != 'EVENT_FST_SETUP':
raise Exception("Unexpected indication: " + event['type'])
elif frame_type == frame_type_session_response:
#fsts_id doesn't matter, no actual session exists
sender.send_test_session_setup_response('0', additional_param)
receiver.wait_for_session_event(5)
elif frame_type == frame_type_ack_request:
#fsts_id doesn't matter, no actual session exists
sender.send_test_ack_request('0')
receiver.wait_for_session_event(5)
elif frame_type == frame_type_ack_response:
#fsts_id doesn't matter, no actual session exists
sender.send_test_ack_response('0')
receiver.wait_for_session_event(5)
elif frame_type == frame_type_tear_down:
#fsts_id doesn't matter, no actual session exists
sender.send_test_tear_down('0')
receiver.wait_for_session_event(5)
except Exception, e:
if e.args[0].startswith("No FST-EVENT-SESSION received"):
if frame_type != frame_type_session_request:
frame_receive_timeout = True
else:
logger.info(e)
exception_already_raised = True
raise
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if not exception_already_raised:
if frame_receive_timeout:
logger.info("Success. Frame was ignored (%s)" % frame_type_names[frame_type])
else:
if frame_type == frame_type_session_request:
logger.info("Success. Frame received, session created")
else:
raise Exception("Failure. Frame was not ignored (%s)" % frame_type_names[frame_type])
else:
print "Failure. Unexpected exception"
#enum - bad session transfer scenarios
bad_scenario_none = 0
bad_scenario_ack_req_session_not_set_up = 1
bad_scenario_ack_req_session_not_established_init_side = 2
bad_scenario_ack_req_session_not_established_resp_side = 3
bad_scenario_ack_req_bad_fsts_id = 4
bad_scenario_ack_resp_session_not_set_up = 5
bad_scenario_ack_resp_session_not_established_init_side = 6
bad_scenario_ack_resp_session_not_established_resp_side = 7
bad_scenario_ack_resp_no_ack_req = 8
bad_scenario_ack_resp_bad_fsts_id = 9
bad_scenario_names = ("None",
"Ack request received before the session was set up",
"Ack request received on the initiator side before session was established",
"Ack request received on the responder side before session was established",
"Ack request received with bad fsts_id",
"Ack response received before the session was set up",
"Ack response received on the initiator side before session was established",
"Ack response received on the responder side before session was established",
"Ack response received before ack request was sent",
"Ack response received with bad fsts_id")
def fst_bad_transfer(apdev, test_params, bad_scenario_type, init_on_ap):
"""This function makes the necessary preparations and then adds and sets a
session. It then initiates and it unless instructed otherwise) and attempts
to send one of the frames involved in the session transfer protocol,
skipping or distorting one of the stages according to the value of
bad_scenario_type parameter."""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
bad_parameter_detected = False
exception_already_raised = False
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
# This call makes sure FstHostapd singleton object is created and, as a
# result, the global control interface is registered (this is done from
# the constructor).
ap1.get_global_instance()
if init_on_ap:
initiator = ap1
responder = sta1
new_iface = ap2.ifname()
new_peer_addr = ap2.get_actual_peer_addr()
else:
initiator = sta1
responder = ap1
new_iface = sta2.ifname()
new_peer_addr = sta2.get_actual_peer_addr()
initiator.add_peer(responder, new_peer_addr = new_peer_addr)
sid = initiator.add_session()
initiator.configure_session(sid, new_iface)
if (bad_scenario_type != bad_scenario_ack_req_session_not_set_up and
bad_scenario_type != bad_scenario_ack_resp_session_not_set_up):
if (bad_scenario_type != bad_scenario_ack_req_session_not_established_init_side and
bad_scenario_type != bad_scenario_ack_resp_session_not_established_init_side and
bad_scenario_type != bad_scenario_ack_req_session_not_established_resp_side and
bad_scenario_type != bad_scenario_ack_resp_session_not_established_resp_side):
response = "accept"
else:
response = ''
initiator.initiate_session(sid, response)
if bad_scenario_type == bad_scenario_ack_req_session_not_set_up:
#fsts_id doesn't matter, no actual session exists
responder.send_test_ack_request('0')
initiator.wait_for_session_event(5)
# We want to send the unexpected frame to the side that already has
# a session created
elif bad_scenario_type == bad_scenario_ack_resp_session_not_set_up:
#fsts_id doesn't matter, no actual session exists
responder.send_test_ack_response('0')
initiator.wait_for_session_event(5)
# We want to send the unexpected frame to the side that already has
# a session created
elif bad_scenario_type == bad_scenario_ack_req_session_not_established_init_side:
#fsts_id doesn't matter, no actual session exists
initiator.send_test_ack_request('0')
responder.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_req_session_not_established_resp_side:
#fsts_id doesn't matter, no actual session exists
responder.send_test_ack_request('0')
initiator.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_resp_session_not_established_init_side:
#fsts_id doesn't matter, no actual session exists
initiator.send_test_ack_response('0')
responder.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_resp_session_not_established_resp_side:
#fsts_id doesn't matter, no actual session exists
responder.send_test_ack_response('0')
initiator.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_req_bad_fsts_id:
initiator.send_test_ack_request('-1')
responder.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_resp_bad_fsts_id:
initiator.send_test_ack_response('-1')
responder.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_resp_no_ack_req:
actual_fsts_id = initiator.get_fsts_id_by_sid(sid)
initiator.send_test_ack_response(str(actual_fsts_id))
responder.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
else:
raise Exception("Unknown bad scenario identifier")
except Exception, e:
if e.args[0].startswith("No FST-EVENT-SESSION received"):
bad_parameter_detected = True
if not bad_parameter_detected:
# The exception was unexpected
logger.info(e)
exception_already_raised = True
raise
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if not exception_already_raised:
if bad_parameter_detected:
logger.info("Success. Bad scenario was handled correctly (%s)" % bad_scenario_names[bad_scenario_type])
else:
raise Exception("Failure. Bad scenario was handled incorrectly (%s)" % bad_scenario_names[bad_scenario_type])
else:
print "Failure. Unexpected exception"
def test_fst_sta_connect_to_non_fst_ap(dev, apdev, test_params):
"""FST STA connecting to non-FST AP"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g",
key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (orig_sta1_mbies.startswith("FAIL") or
orig_sta2_mbies.startswith("FAIL") or
not res_sta1_mbies.startswith("FAIL") or
not res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs have not been removed on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_sta_connect_to_fst_ap(dev, apdev, test_params):
"""FST STA connecting to FST AP"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
orig_sta2_mbies = sta2.get_local_mbies()
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(ap1, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_a)
time.sleep(2)
res_sta2_mbies = sta2.get_local_mbies()
if res_sta2_mbies == orig_sta2_mbies:
raise Exception("Failure. MB IEs have not been updated")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def test_fst_ap_connect_to_fst_sta(dev, apdev, test_params):
"""FST AP connecting to FST STA"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
orig_ap_mbies = ap1.get_local_mbies()
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(ap1, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_a)
time.sleep(2)
res_ap_mbies = ap1.get_local_mbies()
if res_ap_mbies != orig_ap_mbies:
raise Exception("Failure. MB IEs have been unexpectedly updated on the AP")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def test_fst_ap_connect_to_non_fst_sta(dev, apdev, test_params):
"""FST AP connecting to non-FST STA"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
orig_ap_mbies = ap2.get_local_mbies()
vals = dev[0].scan(None, fst_test_common.fst_test_def_freq_g)
fst_module_aux.external_sta_connect(dev[0], ap2, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_g)
time.sleep(2)
res_ap_mbies = ap2.get_local_mbies()
if res_ap_mbies != orig_ap_mbies:
raise Exception("Failure. MB IEs have been unexpectedly updated on the AP")
except Exception, e:
logger.info(e)
raise
finally:
fst_module_aux.disconnect_external_sta(dev[0], ap2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def test_fst_second_sta_connect_to_non_fst_ap(dev, apdev, test_params):
"""FST STA 2nd connecting to non-FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (orig_sta1_mbies.startswith("FAIL") or
orig_sta2_mbies.startswith("FAIL") or
not res_sta1_mbies.startswith("FAIL") or
not res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs have not been removed on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_second_sta_connect_to_fst_ap(dev, apdev, test_params):
"""FST STA 2nd connecting to FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (not orig_sta1_mbies.startswith("FAIL") or
not orig_sta2_mbies.startswith("FAIL") or
not res_sta1_mbies.startswith("FAIL") or
not res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs should have stayed non-present on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_disconnect_1_of_2_stas_from_non_fst_ap(dev, apdev, test_params):
"""FST disconnect 1 of 2 STAs from non-FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
sta2.disconnect_from_external_ap()
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (not orig_sta1_mbies.startswith("FAIL") or
not orig_sta2_mbies.startswith("FAIL") or
res_sta1_mbies.startswith("FAIL") or
res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs haven't reappeared on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_disconnect_1_of_2_stas_from_fst_ap(dev, apdev, test_params):
"""FST disconnect 1 of 2 STAs from FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
sta1.disconnect()
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (not orig_sta1_mbies.startswith("FAIL") or
not orig_sta2_mbies.startswith("FAIL") or
not res_sta1_mbies.startswith("FAIL") or
not res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs should have stayed non-present on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_disconnect_2_of_2_stas_from_non_fst_ap(dev, apdev, test_params):
"""FST disconnect 2 of 2 STAs from non-FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
sta1.disconnect()
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
sta2.disconnect_from_external_ap()
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (not orig_sta1_mbies.startswith("FAIL") or
not orig_sta2_mbies.startswith("FAIL") or
res_sta1_mbies.startswith("FAIL") or
res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs haven't reappeared on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_disconnect_2_of_2_stas_from_fst_ap(dev, apdev, test_params):
"""FST disconnect 2 of 2 STAs from FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g"})
try:
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
sta2.disconnect_from_external_ap()
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
sta1.disconnect()
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (orig_sta1_mbies.startswith("FAIL") or
orig_sta2_mbies.startswith("FAIL") or
res_sta1_mbies.startswith("FAIL") or
res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs should have stayed present on both stations")
# Mandatory part of 8.4.2.140 Multi-band element is 24 bytes = 48 hex chars
basic_sta1_mbies = res_sta1_mbies[0:48] + res_sta1_mbies[60:108]
basic_sta2_mbies = res_sta2_mbies[0:48] + res_sta2_mbies[60:108]
if (basic_sta1_mbies != basic_sta2_mbies):
raise Exception("Failure. Basic MB IEs should have become identical on both stations")
addr_sta1_str = sta1.get_own_mac_address().replace(":", "")
addr_sta2_str = sta2.get_own_mac_address().replace(":", "")
# Mandatory part of 8.4.2.140 Multi-band element is followed by STA MAC Address field (6 bytes = 12 hex chars)
addr_sta1_mbie1 = res_sta1_mbies[48:60]
addr_sta1_mbie2 = res_sta1_mbies[108:120]
addr_sta2_mbie1 = res_sta2_mbies[48:60]
addr_sta2_mbie2 = res_sta2_mbies[108:120]
if (addr_sta1_mbie1 != addr_sta1_mbie2 or
addr_sta1_mbie1 != addr_sta2_str or
addr_sta2_mbie1 != addr_sta2_mbie2 or
addr_sta2_mbie1 != addr_sta1_str):
raise Exception("Failure. STA Address in MB IEs should have been same as the other STA's")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_disconnect_non_fst_sta(dev, apdev, test_params):
"""FST disconnect non-FST STA"""
ap1, ap2, fst_sta1, fst_sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
external_sta_connected = False
try:
vals = fst_sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
fst_sta1.connect(ap1, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_a)
vals = dev[0].scan(None, fst_test_common.fst_test_def_freq_g)
fst_module_aux.external_sta_connect(dev[0], ap2, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_g)
external_sta_connected = True
time.sleep(2)
fst_sta1.disconnect()
time.sleep(2)
orig_ap_mbies = ap2.get_local_mbies()
fst_module_aux.disconnect_external_sta(dev[0], ap2)
external_sta_connected = False
time.sleep(2)
res_ap_mbies = ap2.get_local_mbies()
if res_ap_mbies != orig_ap_mbies:
raise Exception("Failure. MB IEs have been unexpectedly updated on the AP")
except Exception, e:
logger.info(e)
raise
finally:
fst_sta1.disconnect()
if external_sta_connected:
fst_module_aux.disconnect_external_sta(dev[0], ap2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, fst_sta1, fst_sta2)
def test_fst_disconnect_fst_sta(dev, apdev, test_params):
"""FST disconnect FST STA"""
ap1, ap2, fst_sta1, fst_sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
external_sta_connected = False;
try:
vals = fst_sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
fst_sta1.connect(ap1, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_a)
vals = dev[0].scan(None, fst_test_common.fst_test_def_freq_g)
fst_module_aux.external_sta_connect(dev[0], ap2, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_g)
external_sta_connected = True
time.sleep(2)
fst_module_aux.disconnect_external_sta(dev[0], ap2)
external_sta_connected = False
time.sleep(2)
orig_ap_mbies = ap2.get_local_mbies()
fst_sta1.disconnect()
time.sleep(2)
res_ap_mbies = ap2.get_local_mbies()
if res_ap_mbies != orig_ap_mbies:
raise Exception("Failure. MB IEs have been unexpectedly updated on the AP")
except Exception, e:
logger.info(e)
raise
finally:
fst_sta1.disconnect()
if external_sta_connected:
fst_module_aux.disconnect_external_sta(dev[0], ap2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, fst_sta1, fst_sta2)
def test_fst_dynamic_iface_attach(dev, apdev, test_params):
"""FST dynamic interface attach"""
ap1 = fst_module_aux.FstAP(apdev[0]['ifname'], 'fst_11a', 'a',
fst_test_common.fst_test_def_chan_a,
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_low,
fst_test_common.fst_test_def_llt)
ap1.start()
ap2 = fst_module_aux.FstAP(apdev[1]['ifname'], 'fst_11g', 'b',
fst_test_common.fst_test_def_chan_g,
'', '', '')
ap2.start()
sta1 = fst_module_aux.FstSTA('wlan5',
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_low,
fst_test_common.fst_test_def_llt)
sta1.start()
sta2 = fst_module_aux.FstSTA('wlan6', '', '', '')
sta2.start()
try:
orig_sta2_mbies = sta2.get_local_mbies()
orig_ap2_mbies = ap2.get_local_mbies()
sta2.send_iface_attach_request(sta2.ifname(),
fst_test_common.fst_test_def_group,
'52', '27')
event = sta2.wait_for_iface_event(5)
if event['event_type'] != 'attached':
raise Exception("Failure. Iface was not properly attached")
ap2.send_iface_attach_request(ap2.ifname(),
fst_test_common.fst_test_def_group,
'102', '77')
event = ap2.wait_for_iface_event(5)
if event['event_type'] != 'attached':
raise Exception("Failure. Iface was not properly attached")
time.sleep(2)
res_sta2_mbies = sta2.get_local_mbies()
res_ap2_mbies = ap2.get_local_mbies()
sta2.send_iface_detach_request(sta2.ifname())
event = sta2.wait_for_iface_event(5)
if event['event_type'] != 'detached':
raise Exception("Failure. Iface was not properly detached")
ap2.send_iface_detach_request(ap2.ifname())
event = ap2.wait_for_iface_event(5)
if event['event_type'] != 'detached':
raise Exception("Failure. Iface was not properly detached")
if (not orig_sta2_mbies.startswith("FAIL") or
not orig_ap2_mbies.startswith("FAIL") or
res_sta2_mbies.startswith("FAIL") or
res_ap2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs should have appeared on the station and on the AP")
except Exception, e:
logger.info(e)
raise
finally:
ap1.stop()
ap2.stop()
sta1.stop()
sta2.stop()
# AP side FST module tests
def test_fst_ap_start_session(dev, apdev, test_params):
"""FST AP start session"""
fst_start_session(apdev, test_params, bad_param_none, True)
def test_fst_ap_start_session_no_add_params(dev, apdev, test_params):
"""FST AP start session - no add params"""
fst_start_session(apdev, test_params, bad_param_session_add_no_params, True)
def test_fst_ap_start_session_bad_group_id(dev, apdev, test_params):
"""FST AP start session - bad group id"""
fst_start_session(apdev, test_params, bad_param_group_id, True)
def test_fst_ap_start_session_no_set_params(dev, apdev, test_params):
"""FST AP start session - no set params"""
fst_start_session(apdev, test_params, bad_param_session_set_no_params, True)
def test_fst_ap_start_session_set_unknown_param(dev, apdev, test_params):
"""FST AP start session - set unknown param"""
fst_start_session(apdev, test_params, bad_param_session_set_unknown_param,
True)
def test_fst_ap_start_session_bad_session_id(dev, apdev, test_params):
"""FST AP start session - bad session id"""
fst_start_session(apdev, test_params, bad_param_session_id, True)
def test_fst_ap_start_session_bad_new_iface(dev, apdev, test_params):
"""FST AP start session - bad new iface"""
fst_start_session(apdev, test_params, bad_param_new_iface, True)
def test_fst_ap_start_session_bad_old_iface(dev, apdev, test_params):
"""FST AP start session - bad old iface"""
fst_start_session(apdev, test_params, bad_param_old_iface, True)
def test_fst_ap_start_session_negative_llt(dev, apdev, test_params):
"""FST AP start session - negative llt"""
fst_start_session(apdev, test_params, bad_param_negative_llt, True)
def test_fst_ap_start_session_zero_llt(dev, apdev, test_params):
"""FST AP start session - zero llt"""
fst_start_session(apdev, test_params, bad_param_zero_llt, True)
def test_fst_ap_start_session_llt_too_big(dev, apdev, test_params):
"""FST AP start session - llt too large"""
fst_start_session(apdev, test_params, bad_param_llt_too_big, True)
def test_fst_ap_start_session_invalid_peer_addr(dev, apdev, test_params):
"""FST AP start session - invalid peer address"""
fst_start_session(apdev, test_params, bad_param_peer_addr, True,
'GG:GG:GG:GG:GG:GG')
def test_fst_ap_start_session_multicast_peer_addr(dev, apdev, test_params):
"""FST AP start session - multicast peer address"""
fst_start_session(apdev, test_params, bad_param_peer_addr, True,
'01:00:11:22:33:44')
def test_fst_ap_start_session_broadcast_peer_addr(dev, apdev, test_params):
"""FST AP start session - broadcast peer address"""
fst_start_session(apdev, test_params, bad_param_peer_addr, True,
'FF:FF:FF:FF:FF:FF')
def test_fst_ap_initiate_session(dev, apdev, test_params):
"""FST AP initiate session"""
fst_initiate_session(apdev, test_params, bad_param_none, True)
def test_fst_ap_initiate_session_no_params(dev, apdev, test_params):
"""FST AP initiate session - no params"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_no_params, True)
def test_fst_ap_initiate_session_invalid_session_id(dev, apdev, test_params):
"""FST AP initiate session - invalid session id"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_bad_session_id, True)
def test_fst_ap_initiate_session_no_new_iface(dev, apdev, test_params):
"""FST AP initiate session - no new iface"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_with_no_new_iface_set, True)
def test_fst_ap_initiate_session_bad_peer_addr(dev, apdev, test_params):
"""FST AP initiate session - bad peer address"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_with_bad_peer_addr_set,
True)
def test_fst_ap_initiate_session_request_with_bad_stie(dev, apdev, test_params):
"""FST AP initiate session - request with bad stie"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_request_with_bad_stie, True)
def test_fst_ap_initiate_session_response_with_reject(dev, apdev, test_params):
"""FST AP initiate session - response with reject"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_response_with_reject, True)
def test_fst_ap_initiate_session_response_with_bad_stie(dev, apdev,
test_params):
"""FST AP initiate session - response with bad stie"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_response_with_bad_stie,
True)
def test_fst_ap_initiate_session_response_with_zero_llt(dev, apdev,
test_params):
"""FST AP initiate session - zero llt"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_response_with_zero_llt,
True)
def test_fst_ap_initiate_session_stt_no_response(dev, apdev, test_params):
"""FST AP initiate session - stt no response"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_stt_no_response, True)
def test_fst_ap_initiate_session_concurrent_setup_request(dev, apdev,
test_params):
"""FST AP initiate session - concurrent setup request"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_concurrent_setup_request,
True)
def test_fst_ap_session_request_with_no_session(dev, apdev, test_params):
"""FST AP session request with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_session_request,
True)
def test_fst_ap_session_response_accept_with_no_session(dev, apdev,
test_params):
"""FST AP session response accept with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_session_response,
True, "accept")
def test_fst_ap_session_response_reject_with_no_session(dev, apdev,
test_params):
"""FST AP session response reject with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_session_response,
True, "reject")
def test_fst_ap_ack_request_with_no_session(dev, apdev, test_params):
"""FST AP ack request with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_ack_request, True)
def test_fst_ap_ack_response_with_no_session(dev, apdev, test_params):
"""FST AP ack response with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_ack_response, True)
def test_fst_ap_tear_down_response_with_no_session(dev, apdev, test_params):
"""FST AP tear down response with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_tear_down, True)
def test_fst_ap_transfer_session(dev, apdev, test_params):
"""FST AP transfer session"""
fst_transfer_session(apdev, test_params, bad_param_none, True)
def test_fst_ap_transfer_session_no_params(dev, apdev, test_params):
"""FST AP transfer session - no params"""
fst_transfer_session(apdev, test_params,
bad_param_session_transfer_no_params, True)
def test_fst_ap_transfer_session_bad_session_id(dev, apdev, test_params):
"""FST AP transfer session - bad session id"""
fst_transfer_session(apdev, test_params,
bad_param_session_transfer_bad_session_id, True)
def test_fst_ap_transfer_session_setup_skipped(dev, apdev, test_params):
"""FST AP transfer session - setup skipped"""
fst_transfer_session(apdev, test_params,
bad_param_session_transfer_setup_skipped, True)
def test_fst_ap_ack_request_with_session_not_set_up(dev, apdev, test_params):
"""FST AP ack request with session not set up"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_req_session_not_set_up, True)
def test_fst_ap_ack_request_with_session_not_established_init_side(dev, apdev,
test_params):
"""FST AP ack request with session not established init side"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_req_session_not_established_init_side,
True)
def test_fst_ap_ack_request_with_session_not_established_resp_side(dev, apdev,
test_params):
"""FST AP ack request with session not established resp side"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_req_session_not_established_resp_side,
True)
def test_fst_ap_ack_request_with_bad_fsts_id(dev, apdev, test_params):
"""FST AP ack request with bad fsts id"""
fst_bad_transfer(apdev, test_params, bad_scenario_ack_req_bad_fsts_id, True)
def test_fst_ap_ack_response_with_session_not_set_up(dev, apdev, test_params):
"""FST AP ack response with session not set up"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_resp_session_not_set_up, True)
def test_fst_ap_ack_response_with_session_not_established_init_side(dev, apdev, test_params):
"""FST AP ack response with session not established init side"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_resp_session_not_established_init_side,
True)
def test_fst_ap_ack_response_with_session_not_established_resp_side(dev, apdev, test_params):
"""FST AP ack response with session not established resp side"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_resp_session_not_established_resp_side,
True)
def test_fst_ap_ack_response_with_no_ack_request(dev, apdev, test_params):
"""FST AP ack response with no ack request"""
fst_bad_transfer(apdev, test_params, bad_scenario_ack_resp_no_ack_req, True)
def test_fst_ap_tear_down_session(dev, apdev, test_params):
"""FST AP tear down session"""
fst_tear_down_session(apdev, test_params, bad_param_none, True)
def test_fst_ap_tear_down_session_no_params(dev, apdev, test_params):
"""FST AP tear down session - no params"""
fst_tear_down_session(apdev, test_params,
bad_param_session_teardown_no_params, True)
def test_fst_ap_tear_down_session_bad_session_id(dev, apdev, test_params):
"""FST AP tear down session - bad session id"""
fst_tear_down_session(apdev, test_params,
bad_param_session_teardown_bad_session_id, True)
def test_fst_ap_tear_down_session_setup_skipped(dev, apdev, test_params):
"""FST AP tear down session - setup skipped"""
fst_tear_down_session(apdev, test_params,
bad_param_session_teardown_setup_skipped, True)
def test_fst_ap_tear_down_session_bad_fsts_id(dev, apdev, test_params):
"""FST AP tear down session - bad fsts id"""
fst_tear_down_session(apdev, test_params,
bad_param_session_teardown_bad_fsts_id, True)
def test_fst_ap_remove_session_not_established(dev, apdev, test_params):
"""FST AP remove session - not established"""
fst_remove_session(apdev, test_params,
remove_scenario_non_established_session, True)
def test_fst_ap_remove_session_established(dev, apdev, test_params):
"""FST AP remove session - established"""
fst_remove_session(apdev, test_params,
remove_scenario_established_session, True)
def test_fst_ap_remove_session_no_params(dev, apdev, test_params):
"""FST AP remove session - no params"""
fst_remove_session(apdev, test_params, remove_scenario_no_params, True)
def test_fst_ap_remove_session_bad_session_id(dev, apdev, test_params):
"""FST AP remove session - bad session id"""
fst_remove_session(apdev, test_params, remove_scenario_bad_session_id, True)
def test_fst_ap_ctrl_iface(dev, apdev, test_params):
"""FST control interface behavior"""
hglobal = hostapd.HostapdGlobal()
start_num_groups = 0
res = hglobal.request("FST-MANAGER LIST_GROUPS")
del hglobal
if "FAIL" not in res:
start_num_groups = len(res.splitlines())
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
initiator = ap1
responder = sta1
initiator.add_peer(responder, None)
initiator.set_fst_parameters(group_id=None)
sid = initiator.add_session()
res = initiator.get_session_params(sid)
logger.info("Initial session params:\n" + str(res))
if res['state'] != 'INITIAL':
raise Exception("Unexpected state: " + res['state'])
initiator.set_fst_parameters(llt=None)
initiator.configure_session(sid, ap2.ifname(), None)
res = initiator.get_session_params(sid)
logger.info("Session params after configuration:\n" + str(res))
res = initiator.iface_peers(initiator.ifname())
logger.info("Interface peers: " + str(res))
if len(res) != 1:
raise Exception("Unexpected number of peers")
res = initiator.get_peer_mbies(initiator.ifname(),
initiator.get_new_peer_addr())
logger.info("Peer MB IEs: " + str(res))
res = initiator.list_ifaces()
logger.info("Interfaces: " + str(res))
if len(res) != 2:
raise Exception("Unexpected number of interfaces")
res = initiator.list_groups()
logger.info("Groups: " + str(res))
if len(res) != 1 + start_num_groups:
raise Exception("Unexpected number of groups")
tests = [ "LIST_IFACES unknown",
"LIST_IFACES unknown2",
"SESSION_GET 12345678",
"SESSION_SET " + sid + " unknown=foo",
"SESSION_RESPOND 12345678 foo",
"SESSION_RESPOND " + sid,
"SESSION_RESPOND " + sid + " foo",
"TEST_REQUEST foo",
"TEST_REQUEST SEND_SETUP_REQUEST",
"TEST_REQUEST SEND_SETUP_REQUEST foo",
"TEST_REQUEST SEND_SETUP_RESPONSE",
"TEST_REQUEST SEND_SETUP_RESPONSE foo",
"TEST_REQUEST SEND_ACK_REQUEST",
"TEST_REQUEST SEND_ACK_REQUEST foo",
"TEST_REQUEST SEND_ACK_RESPONSE",
"TEST_REQUEST SEND_ACK_RESPONSE foo",
"TEST_REQUEST SEND_TEAR_DOWN",
"TEST_REQUEST SEND_TEAR_DOWN foo",
"TEST_REQUEST GET_FSTS_ID",
"TEST_REQUEST GET_FSTS_ID foo",
"TEST_REQUEST GET_LOCAL_MBIES",
"TEST_REQUEST GET_LOCAL_MBIES foo",
"GET_PEER_MBIES",
"GET_PEER_MBIES ",
"GET_PEER_MBIES unknown",
"GET_PEER_MBIES unknown unknown",
"GET_PEER_MBIES unknown " + initiator.get_new_peer_addr(),
"GET_PEER_MBIES " + initiator.ifname() + " 01:ff:ff:ff:ff:ff",
"GET_PEER_MBIES " + initiator.ifname() + " 00:ff:ff:ff:ff:ff",
"GET_PEER_MBIES " + initiator.ifname() + " 00:00:00:00:00:00",
"IFACE_PEERS",
"IFACE_PEERS ",
"IFACE_PEERS unknown",
"IFACE_PEERS unknown unknown",
"IFACE_PEERS " + initiator.fst_group,
"IFACE_PEERS " + initiator.fst_group + " unknown" ]
for t in tests:
if "FAIL" not in initiator.grequest("FST-MANAGER " + t):
raise Exception("Unexpected response for invalid FST-MANAGER command " + t)
if "UNKNOWN FST COMMAND" not in initiator.grequest("FST-MANAGER unknown"):
raise Exception("Unexpected response for unknown FST-MANAGER command")
tests = [ "FST-DETACH", "FST-DETACH ", "FST-DETACH unknown",
"FST-ATTACH", "FST-ATTACH ", "FST-ATTACH unknown",
"FST-ATTACH unknown unknown" ]
for t in tests:
if "FAIL" not in initiator.grequest(t):
raise Exception("Unexpected response for invalid command " + t)
try:
# Trying to add same interface again needs to fail.
ap1.send_iface_attach_request(ap1.iface, ap1.fst_group,
ap1.fst_llt, ap1.fst_pri)
raise Exception("Duplicate FST-ATTACH succeeded")
except Exception, e:
if not str(e).startswith("Cannot attach"):
raise
try:
ap1.get_fsts_id_by_sid("123")
except Exception, e:
if not str(e).startswith("Cannot get fsts_id for sid"):
raise
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def test_fst_ap_start_session_oom(dev, apdev, test_params):
"""FST AP setup failing due to OOM"""
ap1 = fst_module_aux.FstAP(apdev[0]['ifname'], 'fst_11a', 'a',
fst_test_common.fst_test_def_chan_a,
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_low,
fst_test_common.fst_test_def_llt)
ap1.start()
with alloc_fail(ap1, 1, "fst_iface_create"):
ap2_started = False
try:
ap2 = fst_module_aux.FstAP(apdev[1]['ifname'], 'fst_11g', 'b',
fst_test_common.fst_test_def_chan_g,
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_high,
fst_test_common.fst_test_def_llt)
try:
# This will fail in fst_iface_create() OOM
ap2.start()
except:
pass
finally:
ap1.stop()
try:
ap2.stop()
except:
pass
# STA side FST module tests
def test_fst_sta_start_session(dev, apdev, test_params):
"""FST STA start session"""
fst_start_session(apdev, test_params, bad_param_none, False)
def test_fst_sta_start_session_no_add_params(dev, apdev, test_params):
"""FST STA start session - no add params"""
fst_start_session(apdev, test_params, bad_param_session_add_no_params,
False)
def test_fst_sta_start_session_bad_group_id(dev, apdev, test_params):
"""FST STA start session - bad group id"""
fst_start_session(apdev, test_params, bad_param_group_id, False)
def test_fst_sta_start_session_no_set_params(dev, apdev, test_params):
"""FST STA start session - no set params"""
fst_start_session(apdev, test_params, bad_param_session_set_no_params,
False)
def test_fst_sta_start_session_set_unknown_param(dev, apdev, test_params):
"""FST STA start session - set unknown param"""
fst_start_session(apdev, test_params, bad_param_session_set_unknown_param,
False)
def test_fst_sta_start_session_bad_session_id(dev, apdev, test_params):
"""FST STA start session - bad session id"""
fst_start_session(apdev, test_params, bad_param_session_id, False)
def test_fst_sta_start_session_bad_new_iface(dev, apdev, test_params):
"""FST STA start session - bad new iface"""
fst_start_session(apdev, test_params, bad_param_new_iface, False)
def test_fst_sta_start_session_bad_old_iface(dev, apdev, test_params):
"""FST STA start session - bad old iface"""
fst_start_session(apdev, test_params, bad_param_old_iface, False)
def test_fst_sta_start_session_negative_llt(dev, apdev, test_params):
"""FST STA start session - negative llt"""
fst_start_session(apdev, test_params, bad_param_negative_llt, False)
def test_fst_sta_start_session_zero_llt(dev, apdev, test_params):
"""FST STA start session - zero llt"""
fst_start_session(apdev, test_params, bad_param_zero_llt, False)
def test_fst_sta_start_session_llt_too_big(dev, apdev, test_params):
"""FST STA start session - llt too large"""
fst_start_session(apdev, test_params, bad_param_llt_too_big, False)
def test_fst_sta_start_session_invalid_peer_addr(dev, apdev, test_params):
"""FST STA start session - invalid peer address"""
fst_start_session(apdev, test_params, bad_param_peer_addr, False,
'GG:GG:GG:GG:GG:GG')
def test_fst_sta_start_session_multicast_peer_addr(dev, apdev, test_params):
"""FST STA start session - multicast peer address"""
fst_start_session(apdev, test_params, bad_param_peer_addr, False,
'11:00:11:22:33:44')
def test_fst_sta_start_session_broadcast_peer_addr(dev, apdev, test_params):
"""FST STA start session - broadcast peer addr"""
fst_start_session(apdev, test_params, bad_param_peer_addr, False,
'FF:FF:FF:FF:FF:FF')
def test_fst_sta_initiate_session(dev, apdev, test_params):
"""FST STA initiate session"""
fst_initiate_session(apdev, test_params, bad_param_none, False)
def test_fst_sta_initiate_session_no_params(dev, apdev, test_params):
"""FST STA initiate session - no params"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_no_params, False)
def test_fst_sta_initiate_session_invalid_session_id(dev, apdev, test_params):
"""FST STA initiate session - invalid session id"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_bad_session_id, False)
def test_fst_sta_initiate_session_no_new_iface(dev, apdev, test_params):
"""FST STA initiate session - no new iface"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_with_no_new_iface_set,
False)
def test_fst_sta_initiate_session_bad_peer_addr(dev, apdev, test_params):
"""FST STA initiate session - bad peer address"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_with_bad_peer_addr_set,
False)
def test_fst_sta_initiate_session_request_with_bad_stie(dev, apdev,
test_params):
"""FST STA initiate session - request with bad stie"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_request_with_bad_stie,
False)
def test_fst_sta_initiate_session_response_with_reject(dev, apdev, test_params):
"""FST STA initiate session - response with reject"""
fst_initiate_session(apdev, test_params, bad_param_session_initiate_response_with_reject, False)
def test_fst_sta_initiate_session_response_with_bad_stie(dev, apdev, test_params):
"""FST STA initiate session - response with bad stie"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_response_with_bad_stie,
False)
def test_fst_sta_initiate_session_response_with_zero_llt(dev, apdev,
test_params):
"""FST STA initiate session - response with zero llt"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_response_with_zero_llt,
False)
def test_fst_sta_initiate_session_stt_no_response(dev, apdev, test_params):
"""FST STA initiate session - stt no response"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_stt_no_response, False)
def test_fst_sta_initiate_session_concurrent_setup_request(dev, apdev,
test_params):
"""FST STA initiate session - concurrent setup request"""
fst_initiate_session(apdev, test_params,
bad_param_session_initiate_concurrent_setup_request,
False)
def test_fst_sta_session_request_with_no_session(dev, apdev, test_params):
"""FST STA session request with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_session_request,
False)
def test_fst_sta_session_response_accept_with_no_session(dev, apdev,
test_params):
"""FST STA session response accept with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_session_response,
False, "accept")
def test_fst_sta_session_response_reject_with_no_session(dev, apdev,
test_params):
"""FST STA session response reject with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_session_response,
False, "reject")
def test_fst_sta_ack_request_with_no_session(dev, apdev, test_params):
"""FST STA ack request with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_ack_request, False)
def test_fst_sta_ack_response_with_no_session(dev, apdev, test_params):
"""FST STA ack response with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_ack_response,
False)
def test_fst_sta_tear_down_response_with_no_session(dev, apdev, test_params):
"""FST STA tear down response with no session"""
fst_send_unexpected_frame(apdev, test_params, frame_type_tear_down, False)
def test_fst_sta_transfer_session(dev, apdev, test_params):
"""FST STA transfer session"""
fst_transfer_session(apdev, test_params, bad_param_none, False)
def test_fst_sta_transfer_session_no_params(dev, apdev, test_params):
"""FST STA transfer session - no params"""
fst_transfer_session(apdev, test_params,
bad_param_session_transfer_no_params, False)
def test_fst_sta_transfer_session_bad_session_id(dev, apdev, test_params):
"""FST STA transfer session - bad session id"""
fst_transfer_session(apdev, test_params,
bad_param_session_transfer_bad_session_id, False)
def test_fst_sta_transfer_session_setup_skipped(dev, apdev, test_params):
"""FST STA transfer session - setup skipped"""
fst_transfer_session(apdev, test_params,
bad_param_session_transfer_setup_skipped, False)
def test_fst_sta_ack_request_with_session_not_set_up(dev, apdev, test_params):
"""FST STA ack request with session not set up"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_req_session_not_set_up, False)
def test_fst_sta_ack_request_with_session_not_established_init_side(dev, apdev, test_params):
"""FST STA ack request with session not established init side"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_req_session_not_established_init_side,
False)
def test_fst_sta_ack_request_with_session_not_established_resp_side(dev, apdev, test_params):
"""FST STA ack request with session not established resp side"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_req_session_not_established_resp_side,
False)
def test_fst_sta_ack_request_with_bad_fsts_id(dev, apdev, test_params):
"""FST STA ack request with bad fsts id"""
fst_bad_transfer(apdev, test_params, bad_scenario_ack_req_bad_fsts_id,
False)
def test_fst_sta_ack_response_with_session_not_set_up(dev, apdev, test_params):
"""FST STA ack response with session not set up"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_resp_session_not_set_up, False)
def test_fst_sta_ack_response_with_session_not_established_init_side(dev, apdev, test_params):
"""FST STA ack response with session not established init side"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_resp_session_not_established_init_side,
False)
def test_fst_sta_ack_response_with_session_not_established_resp_side(dev, apdev, test_params):
"""FST STA ack response with session not established resp side"""
fst_bad_transfer(apdev, test_params,
bad_scenario_ack_resp_session_not_established_resp_side,
False)
def test_fst_sta_ack_response_with_no_ack_request(dev, apdev, test_params):
"""FST STA ack response with no ack request"""
fst_bad_transfer(apdev, test_params, bad_scenario_ack_resp_no_ack_req,
False)
def test_fst_sta_tear_down_session(dev, apdev, test_params):
"""FST STA tear down session"""
fst_tear_down_session(apdev, test_params, bad_param_none, False)
def test_fst_sta_tear_down_session_no_params(dev, apdev, test_params):
"""FST STA tear down session - no params"""
fst_tear_down_session(apdev, test_params,
bad_param_session_teardown_no_params, False)
def test_fst_sta_tear_down_session_bad_session_id(dev, apdev, test_params):
"""FST STA tear down session - bad session id"""
fst_tear_down_session(apdev, test_params,
bad_param_session_teardown_bad_session_id, False)
def test_fst_sta_tear_down_session_setup_skipped(dev, apdev, test_params):
"""FST STA tear down session - setup skipped"""
fst_tear_down_session(apdev, test_params,
bad_param_session_teardown_setup_skipped, False)
def test_fst_sta_tear_down_session_bad_fsts_id(dev, apdev, test_params):
"""FST STA tear down session - bad fsts id"""
fst_tear_down_session(apdev, test_params,
bad_param_session_teardown_bad_fsts_id, False)
def test_fst_sta_remove_session_not_established(dev, apdev, test_params):
"""FST STA tear down session - not established"""
fst_remove_session(apdev, test_params,
remove_scenario_non_established_session, False)
def test_fst_sta_remove_session_established(dev, apdev, test_params):
"""FST STA remove session - established"""
fst_remove_session(apdev, test_params,
remove_scenario_established_session, False)
def test_fst_sta_remove_session_no_params(dev, apdev, test_params):
"""FST STA remove session - no params"""
fst_remove_session(apdev, test_params, remove_scenario_no_params, False)
def test_fst_sta_remove_session_bad_session_id(dev, apdev, test_params):
"""FST STA remove session - bad session id"""
fst_remove_session(apdev, test_params, remove_scenario_bad_session_id,
False)
def test_fst_rsn_ap_transfer_session(dev, apdev, test_params):
"""FST RSN AP transfer session"""
fst_transfer_session(apdev, test_params, bad_param_none, True, rsn=True)
MGMT_SUBTYPE_ACTION = 13
ACTION_CATEG_FST = 18
FST_ACTION_SETUP_REQUEST = 0
FST_ACTION_SETUP_RESPONSE = 1
FST_ACTION_TEAR_DOWN = 2
FST_ACTION_ACK_REQUEST = 3
FST_ACTION_ACK_RESPONSE = 4
FST_ACTION_ON_CHANNEL_TUNNEL = 5
def hostapd_tx_and_status(hapd, msg):
hapd.set("ext_mgmt_frame_handling", "1")
hapd.mgmt_tx(msg)
ev = hapd.wait_event([ "MGMT-TX-STATUS" ], timeout=1)
if ev is None or "ok=1" not in ev:
raise Exception("No ACK")
hapd.set("ext_mgmt_frame_handling", "0")
def test_fst_proto(dev, apdev, test_params):
"""FST protocol testing"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
hapd = ap1.get_instance()
sta = sta1.get_instance()
dst = sta.own_addr()
src = apdev[0]['bssid']
msg = {}
msg['fc'] = MGMT_SUBTYPE_ACTION << 4
msg['da'] = dst
msg['sa'] = src
msg['bssid'] = src
# unknown FST Action (255) received!
msg['payload'] = struct.pack("<BB", ACTION_CATEG_FST, 255)
hostapd_tx_and_status(hapd, msg)
# FST Request dropped: too short
msg['payload'] = struct.pack("<BB", ACTION_CATEG_FST,
FST_ACTION_SETUP_REQUEST)
hostapd_tx_and_status(hapd, msg)
# FST Request dropped: invalid STIE (EID)
msg['payload'] = struct.pack("<BBBLBBLBBBBBBB", ACTION_CATEG_FST,
FST_ACTION_SETUP_REQUEST, 0, 0,
163, 11, 0, 0, 0, 0, 0, 0, 0, 0)
hostapd_tx_and_status(hapd, msg)
# FST Request dropped: invalid STIE (Len)
msg['payload'] = struct.pack("<BBBLBBLBBBBBBB", ACTION_CATEG_FST,
FST_ACTION_SETUP_REQUEST, 0, 0,
164, 10, 0, 0, 0, 0, 0, 0, 0, 0)
hostapd_tx_and_status(hapd, msg)
# FST Request dropped: new and old band IDs are the same
msg['payload'] = struct.pack("<BBBLBBLBBBBBBB", ACTION_CATEG_FST,
FST_ACTION_SETUP_REQUEST, 0, 0,
164, 11, 0, 0, 0, 0, 0, 0, 0, 0)
hostapd_tx_and_status(hapd, msg)
ifaces = sta1.list_ifaces()
id = int(ifaces[0]['name'].split('|')[1])
# FST Request dropped: new iface not found (new_band_id mismatch)
msg['payload'] = struct.pack("<BBBLBBLBBBBBBB", ACTION_CATEG_FST,
FST_ACTION_SETUP_REQUEST, 0, 0,
164, 11, 0, 0, id + 1, 0, 0, 0, 0, 0)
hostapd_tx_and_status(hapd, msg)
# FST Action 'Setup Response' dropped: no session in progress found
msg['payload'] = struct.pack("<BB", ACTION_CATEG_FST,
FST_ACTION_SETUP_RESPONSE)
hostapd_tx_and_status(hapd, msg)
# Create session
initiator = ap1
responder = sta1
new_iface = ap2.ifname()
new_peer_addr = ap2.get_actual_peer_addr()
resp_newif = sta2.ifname()
peeraddr = None
initiator.add_peer(responder, peeraddr, new_peer_addr)
sid = initiator.add_session()
initiator.configure_session(sid, new_iface)
initiator.initiate_session(sid, "accept")
# FST Response dropped due to wrong state: SETUP_COMPLETION
msg['payload'] = struct.pack("<BB", ACTION_CATEG_FST,
FST_ACTION_SETUP_RESPONSE)
hostapd_tx_and_status(hapd, msg)
# Too short FST Tear Down dropped
msg['payload'] = struct.pack("<BB", ACTION_CATEG_FST,
FST_ACTION_TEAR_DOWN)
hostapd_tx_and_status(hapd, msg)
# tear down for wrong FST Setup ID (0)
msg['payload'] = struct.pack("<BBL", ACTION_CATEG_FST,
FST_ACTION_TEAR_DOWN, 0)
hostapd_tx_and_status(hapd, msg)
# Ack received on wrong interface
msg['payload'] = struct.pack("<BB", ACTION_CATEG_FST,
FST_ACTION_ACK_REQUEST)
hostapd_tx_and_status(hapd, msg)
# Ack Response in inappropriate session state (SETUP_COMPLETION)
msg['payload'] = struct.pack("<BB", ACTION_CATEG_FST,
FST_ACTION_ACK_RESPONSE)
hostapd_tx_and_status(hapd, msg)
# Unsupported FST Action frame (On channel tunnel)
msg['payload'] = struct.pack("<BB", ACTION_CATEG_FST,
FST_ACTION_ON_CHANNEL_TUNNEL)
hostapd_tx_and_status(hapd, msg)
# FST Request dropped: new iface not found (new_band_id match)
# FST Request dropped due to MAC comparison
msg['payload'] = struct.pack("<BBBLBBLBBBBBBB", ACTION_CATEG_FST,
FST_ACTION_SETUP_REQUEST, 0, 0,
164, 11, 0, 0, id, 0, 0, 0, 0, 0)
hostapd_tx_and_status(hapd, msg)
hapd2 = ap2.get_instance()
dst2 = sta2.get_instance().own_addr()
src2 = apdev[1]['bssid']
msg2 = {}
msg2['fc'] = MGMT_SUBTYPE_ACTION << 4
msg2['da'] = dst2
msg2['sa'] = src2
msg2['bssid'] = src2
# FST Response dropped: wlan6 is not the old iface
msg2['payload'] = struct.pack("<BB", ACTION_CATEG_FST,
FST_ACTION_SETUP_RESPONSE)
hostapd_tx_and_status(hapd2, msg2)
sta.dump_monitor()
group = ap1.fst_group
ap1.send_iface_detach_request(ap1.iface)
sta.flush_scan_cache()
sta.request("REASSOCIATE")
sta.wait_connected()
# FST Request dropped due to no interface connection
msg['payload'] = struct.pack("<BBBLBBLBBBBBBB", ACTION_CATEG_FST,
FST_ACTION_SETUP_REQUEST, 0, 0,
164, 11, 0, 0, id, 0, 0, 0, 0, 0)
hostapd_tx_and_status(hapd, msg)
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
try:
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
except:
pass
def test_fst_setup_response_proto(dev, apdev, test_params):
"""FST protocol testing for Setup Response"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
hapd = ap1.get_instance()
sta = sta1.get_instance()
dst = sta.own_addr()
src = apdev[0]['bssid']
sta1.add_peer(ap1, None, sta2.get_actual_peer_addr())
sta1.set_fst_parameters(llt='0')
sid = sta1.add_session()
sta1.configure_session(sid, sta2.ifname())
sta1.initiate_session(sid, "")
msg = {}
msg['fc'] = MGMT_SUBTYPE_ACTION << 4
msg['da'] = dst
msg['sa'] = src
msg['bssid'] = src
# Too short FST Response dropped
msg['payload'] = struct.pack("<BB", ACTION_CATEG_FST,
FST_ACTION_SETUP_RESPONSE)
hostapd_tx_and_status(hapd, msg)
# FST Response dropped: invalid STIE (EID)
dialog_token = 1
status_code = 0
id = 0
msg['payload'] = struct.pack("<BBBBBBLBBBBBBB", ACTION_CATEG_FST,
FST_ACTION_SETUP_RESPONSE, dialog_token,
status_code,
163, 11, 0, 0, id, 0, 0, 0, 0, 0)
hostapd_tx_and_status(hapd, msg)
# FST Response dropped: invalid STIE (Len)
dialog_token = 1
status_code = 0
id = 0
msg['payload'] = struct.pack("<BBBBBBLBBBBBBB", ACTION_CATEG_FST,
FST_ACTION_SETUP_RESPONSE, dialog_token,
status_code,
164, 10, 0, 0, id, 0, 0, 0, 0, 0)
hostapd_tx_and_status(hapd, msg)
# FST Response dropped due to wrong dialog token
dialog_token = 123
status_code = 0
id = 0
msg['payload'] = struct.pack("<BBBBBBLBBBBBBB", ACTION_CATEG_FST,
FST_ACTION_SETUP_RESPONSE, dialog_token,
status_code,
164, 11, 0, 0, id, 0, 0, 0, 0, 0)
hostapd_tx_and_status(hapd, msg)
# FST Response dropped due to wrong FST Session ID
dialog_token = 1
status_code = 0
id = 1
msg['payload'] = struct.pack("<BBBBBBLBBBBBBB", ACTION_CATEG_FST,
FST_ACTION_SETUP_RESPONSE, dialog_token,
status_code,
164, 11, int(sid) + 123456,
0, id, 0, 0, 0, 0, 0)
hostapd_tx_and_status(hapd, msg)
# FST Response with non-zero status code
dialog_token = 1
status_code = 1
id = 1
msg['payload'] = struct.pack("<BBBBBBLBBBBBBB", ACTION_CATEG_FST,
FST_ACTION_SETUP_RESPONSE, dialog_token,
status_code,
164, 11, int(sid), 0, id, 0, 0, 0, 0, 0)
hostapd_tx_and_status(hapd, msg)
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def test_fst_ack_response_proto(dev, apdev, test_params):
"""FST protocol testing for Ack Response"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
hapd = ap2.get_instance()
sta = sta2.get_instance()
dst = sta.own_addr()
src = apdev[1]['bssid']
sta1.add_peer(ap1, None, sta2.get_actual_peer_addr())
sta1.set_fst_parameters(llt='0')
sid = sta1.add_session()
sta1.configure_session(sid, sta2.ifname())
s = sta1.grequest("FST-MANAGER SESSION_INITIATE "+ sid)
if not s.startswith('OK'):
raise Exception("Cannot initiate fst session: %s" % s)
ev = sta1.peer_obj.wait_gevent([ "FST-EVENT-SESSION" ], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION received")
event = fst_module_aux.parse_fst_session_event(ev)
if event == None:
raise Exception("Unrecognized FST event: " % ev)
if event['type'] != 'EVENT_FST_SETUP':
raise Exception("Expected FST_SETUP event, got: " + event['type'])
ev = sta1.peer_obj.wait_gevent(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION received")
event = fst_module_aux.parse_fst_session_event(ev)
if event == None:
raise Exception("Unrecognized FST event: " % ev)
if event['type'] != 'EVENT_FST_SESSION_STATE':
raise Exception("Expected EVENT_FST_SESSION_STATE event, got: " + event['type'])
if event['new_state'] != "SETUP_COMPLETION":
raise Exception("Expected new state SETUP_COMPLETION, got: " + event['new_state'])
hapd.set("ext_mgmt_frame_handling", "1")
s = sta1.peer_obj.grequest("FST-MANAGER SESSION_RESPOND "+ event['id'] + " accept")
if not s.startswith('OK'):
raise Exception("Error session_respond: %s" % s)
req = hapd.mgmt_rx()
if req is None:
raise Exception("No Ack Request seen")
msg = {}
msg['fc'] = MGMT_SUBTYPE_ACTION << 4
msg['da'] = dst
msg['sa'] = src
msg['bssid'] = src
# Too short FST Ack Response dropped
msg['payload'] = struct.pack("<BB", ACTION_CATEG_FST,
FST_ACTION_ACK_RESPONSE)
hapd.mgmt_tx(msg)
ev = hapd.wait_event([ "MGMT-TX-STATUS" ], timeout=1)
if ev is None or "ok=1" not in ev:
raise Exception("No ACK")
# Ack Response for wrong FSt Setup ID
msg['payload'] = struct.pack("<BBBL", ACTION_CATEG_FST,
FST_ACTION_ACK_RESPONSE,
0, int(sid) + 123456)
hostapd_tx_and_status(hapd, msg)
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def test_fst_ap_config_oom(dev, apdev, test_params):
"""FST AP configuration and OOM"""
ap1 = fst_module_aux.FstAP(apdev[0]['ifname'], 'fst_11a', 'a',
fst_test_common.fst_test_def_chan_a,
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_low)
hapd = ap1.start(return_early=True)
with alloc_fail(hapd, 1, "fst_group_create"):
res = ap1.grequest("FST-ATTACH %s %s" % (ap1.iface, ap1.fst_group))
if not res.startswith("FAIL"):
raise Exception("FST-ATTACH succeeded unexpectedly")
with alloc_fail(hapd, 1, "fst_iface_create"):
res = ap1.grequest("FST-ATTACH %s %s" % (ap1.iface, ap1.fst_group))
if not res.startswith("FAIL"):
raise Exception("FST-ATTACH succeeded unexpectedly")
with alloc_fail(hapd, 1, "fst_group_create_mb_ie"):
res = ap1.grequest("FST-ATTACH %s %s" % (ap1.iface, ap1.fst_group))
# This is allowed to complete currently
ap1.stop()
def test_fst_send_oom(dev, apdev, test_params):
"""FST send action OOM"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
hapd = ap1.get_instance()
sta = sta1.get_instance()
dst = sta.own_addr()
src = apdev[0]['bssid']
# Create session
initiator = ap1
responder = sta1
new_iface = ap2.ifname()
new_peer_addr = ap2.get_actual_peer_addr()
resp_newif = sta2.ifname()
peeraddr = None
initiator.add_peer(responder, peeraddr, new_peer_addr)
sid = initiator.add_session()
initiator.configure_session(sid, new_iface)
with alloc_fail(hapd, 1, "fst_session_send_action"):
res = initiator.grequest("FST-MANAGER SESSION_INITIATE " + sid)
if not res.startswith("FAIL"):
raise Exception("Unexpected SESSION_INITIATE result")
res = initiator.grequest("FST-MANAGER SESSION_INITIATE " + sid)
if not res.startswith("OK"):
raise Exception("SESSION_INITIATE failed")
tests = [ "", "foo", sid, sid + " foo", sid + " foo=bar" ]
for t in tests:
res = initiator.grequest("FST-MANAGER SESSION_SET " + t)
if not res.startswith("FAIL"):
raise Exception("Invalid SESSION_SET accepted")
with alloc_fail(hapd, 1, "fst_session_send_action"):
res = initiator.grequest("FST-MANAGER SESSION_TEARDOWN " + sid)
if not res.startswith("FAIL"):
raise Exception("Unexpected SESSION_TEARDOWN result")
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def test_fst_session_oom(dev, apdev, test_params):
"""FST session create OOM"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
hapd = ap1.get_instance()
sta = sta1.get_instance()
dst = sta.own_addr()
src = apdev[0]['bssid']
# Create session
initiator = ap1
responder = sta1
new_iface = ap2.ifname()
new_peer_addr = ap2.get_actual_peer_addr()
resp_newif = sta2.ifname()
peeraddr = None
initiator.add_peer(responder, peeraddr, new_peer_addr)
with alloc_fail(hapd, 1, "fst_session_create"):
sid = initiator.grequest("FST-MANAGER SESSION_ADD " + initiator.fst_group)
if not sid.startswith("FAIL"):
raise Exception("Unexpected SESSION_ADD success")
sid = initiator.add_session()
initiator.configure_session(sid, new_iface)
with alloc_fail(sta, 1, "fst_session_create"):
res = initiator.grequest("FST-MANAGER SESSION_INITIATE " + sid)
if not res.startswith("OK"):
raise Exception("Unexpected SESSION_INITIATE result")
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def test_fst_attach_zero_llt(dev, apdev):
"""FST attach with llt=0"""
sta1 = fst_module_aux.FstSTA('wlan5', fst_test_common.fst_test_def_group,
"100", "0")
sta1.start()
sta1.stop()
def test_fst_session_respond_fail(dev, apdev, test_params):
"""FST-MANAGER SESSION_RESPOND failure"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
sta1.add_peer(ap1, None, sta2.get_actual_peer_addr())
sid = sta1.add_session()
sta1.configure_session(sid, sta2.ifname())
sta1.send_session_setup_request(sid)
sta1.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
ev = ap1.wait_for_session_event(5, [], ['EVENT_FST_SETUP'])
if not 'id' in ev:
raise Exception("No session id in FST setup event")
# Disconnect STA to make SESSION_RESPOND fail due to no peer found
sta = sta1.get_instance()
sta.request("DISCONNECT")
sta.wait_disconnected()
req = "FST-MANAGER SESSION_RESPOND %s reject" % ev['id']
s = ap1.grequest(req)
if not s.startswith("FAIL"):
raise Exception("SESSION_RESPOND succeeded unexpectedly")
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def fst_session_set(dev, sid, param, value):
cmd = "FST-MANAGER SESSION_SET %s %s=%s" % (sid, param, value)
if "OK" not in dev.global_request(cmd):
raise Exception(cmd + " failed")
def fst_session_set_ap(dev, sid, param, value):
cmd = "FST-MANAGER SESSION_SET %s %s=%s" % (sid, param, value)
if "OK" not in dev.request(cmd):
raise Exception(cmd + " failed")
def fst_attach_ap(dev, ifname, group):
cmd = "FST-ATTACH %s %s" % (ifname, group)
if "OK" not in dev.request(cmd):
raise Exception("FST-ATTACH (AP) failed")
ev = dev.wait_event(['FST-EVENT-IFACE'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-IFACE attached (AP)")
for t in [ "attached", "ifname=" + ifname, "group=" + group ]:
if t not in ev:
raise Exception("Unexpected FST-EVENT-IFACE data (AP): " + ev)
def fst_attach_sta(dev, ifname, group):
if "OK" not in dev.global_request("FST-ATTACH %s %s" % (ifname, group)):
raise Exception("FST-ATTACH (STA) failed")
ev = dev.wait_global_event(['FST-EVENT-IFACE'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-IFACE attached (STA)")
for t in [ "attached", "ifname=" + ifname, "group=" + group ]:
if t not in ev:
raise Exception("Unexpected FST-EVENT-IFACE data (STA): " + ev)
def fst_detach_ap(dev, ifname, group):
if "OK" not in dev.request("FST-DETACH " + ifname):
raise Exception("FST-DETACH (AP) failed for " + ifname)
ev = dev.wait_event(['FST-EVENT-IFACE'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-IFACE detached (AP) for " + ifname)
for t in [ "detached", "ifname=" + ifname, "group=" + group ]:
if t not in ev:
raise Exception("Unexpected FST-EVENT-IFACE data (AP): " + ev)
def fst_detach_sta(dev, ifname, group):
dev.dump_monitor()
if "OK" not in dev.global_request("FST-DETACH " + ifname):
raise Exception("FST-DETACH (STA) failed for " + ifname)
ev = dev.wait_global_event(['FST-EVENT-IFACE'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-IFACE detached (STA) for " + ifname)
for t in [ "detached", "ifname=" + ifname, "group=" + group ]:
if t not in ev:
raise Exception("Unexpected FST-EVENT-IFACE data (STA): " + ev)
def fst_wait_event_peer_ap(dev, event, ifname, addr):
ev = dev.wait_event(['FST-EVENT-PEER'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-PEER connected (AP)")
for t in [ " " + event + " ", "ifname=" + ifname, "peer_addr=" + addr ]:
if t not in ev:
raise Exception("Unexpected FST-EVENT-PEER data (AP): " + ev)
def fst_wait_event_peer_sta(dev, event, ifname, addr):
ev = dev.wait_global_event(['FST-EVENT-PEER'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-PEER connected (STA)")
for t in [ " " + event + " ", "ifname=" + ifname, "peer_addr=" + addr ]:
if t not in ev:
raise Exception("Unexpected FST-EVENT-PEER data (STA): " + ev)
def fst_setup_req(dev, hglobal, freq, dst, req, stie, mbie="", no_wait=False):
act = req + stie + mbie
dev.request("MGMT_TX %s %s freq=%d action=%s" % (dst, dst, freq, act))
ev = dev.wait_event(['MGMT-TX-STATUS'], timeout=5)
if ev is None or "result=SUCCESS" not in ev:
raise Exception("FST Action frame not ACKed")
if no_wait:
return
while True:
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP)")
if "new_state=SETUP_COMPLETION" in ev:
break
def fst_start_and_connect(apdev, group, sgroup):
hglobal = hostapd.HostapdGlobal()
if "OK" not in hglobal.request("FST-MANAGER TEST_REQUEST IS_SUPPORTED"):
raise HwsimSkip("No FST testing support")
params = { "ssid": "fst_11a", "hw_mode": "a", "channel": "36",
"country_code": "US" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
fst_attach_ap(hglobal, apdev[0]['ifname'], group)
cmd = "FST-ATTACH %s %s" % (apdev[0]['ifname'], group)
if "FAIL" not in hglobal.request(cmd):
raise Exception("Duplicated FST-ATTACH (AP) accepted")
params = { "ssid": "fst_11g", "hw_mode": "g", "channel": "1",
"country_code": "US" }
hapd2 = hostapd.add_ap(apdev[1]['ifname'], params)
fst_attach_ap(hglobal, apdev[1]['ifname'], group)
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
fst_attach_sta(wpas, wpas.ifname, sgroup)
wpas.interface_add("wlan6", set_ifname=False)
wpas2 = WpaSupplicant(ifname="wlan6")
fst_attach_sta(wpas, wpas2.ifname, sgroup)
wpas.connect("fst_11a", key_mgmt="NONE", scan_freq="5180",
wait_connect=False)
wpas.wait_connected()
fst_wait_event_peer_sta(wpas, "connected", wpas.ifname, apdev[0]['bssid'])
fst_wait_event_peer_ap(hglobal, "connected", apdev[0]['ifname'],
wpas.own_addr())
wpas2.connect("fst_11g", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
wpas2.wait_connected()
fst_wait_event_peer_sta(wpas, "connected", wpas2.ifname, apdev[1]['bssid'])
fst_wait_event_peer_ap(hglobal, "connected", apdev[1]['ifname'],
wpas2.own_addr())
return hglobal, wpas, wpas2, hapd, hapd2
def test_fst_test_setup(dev, apdev, test_params):
"""FST setup using separate commands"""
try:
_test_fst_test_setup(dev, apdev, test_params)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
def _test_fst_test_setup(dev, apdev, test_params):
group = "fstg0b"
sgroup = "fstg1b"
hglobal, wpas, wpas2, hapd, hapd2 = fst_start_and_connect(apdev, group, sgroup)
sid = wpas.global_request("FST-MANAGER SESSION_ADD " + sgroup).strip()
if "FAIL" in sid:
raise Exception("FST-MANAGER SESSION_ADD (STA) failed")
fst_session_set(wpas, sid, "old_ifname", wpas.ifname)
fst_session_set(wpas, sid, "old_peer_addr", apdev[0]['bssid'])
fst_session_set(wpas, sid, "new_ifname", wpas2.ifname)
fst_session_set(wpas, sid, "new_peer_addr", apdev[1]['bssid'])
if "OK" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("FST-MANAGER SESSION_INITIATE failed")
while True:
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP)")
if "new_state=SETUP_COMPLETION" in ev:
f = re.search("session_id=(\d+)", ev)
if f is None:
raise Exception("No session_id in FST-EVENT-SESSION")
sid_ap = f.group(1)
cmd = "FST-MANAGER SESSION_RESPOND %s accept" % sid_ap
if "OK" not in hglobal.request(cmd):
raise Exception("FST-MANAGER SESSION_RESPOND failed on AP")
break
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION")
if "new_state=SETUP_COMPLETION" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data: " + ev)
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION")
if "event_type=EVENT_FST_ESTABLISHED" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data: " + ev)
cmd = "FST-MANAGER SESSION_REMOVE " + sid
if "OK" not in wpas.global_request(cmd):
raise Exception("FST-MANAGER SESSION_REMOVE failed")
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION")
if "new_state=INITIAL" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data (STA): " + ev)
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP)")
if "new_state=INITIAL" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data (AP): " + ev)
if "FAIL" not in wpas.global_request(cmd):
raise Exception("Duplicated FST-MANAGER SESSION_REMOVE accepted")
hglobal.request("FST-MANAGER SESSION_REMOVE " + sid_ap)
wpas.request("DISCONNECT")
wpas.wait_disconnected()
fst_wait_event_peer_sta(wpas, "disconnected", wpas.ifname,
apdev[0]['bssid'])
fst_wait_event_peer_ap(hglobal, "disconnected", apdev[0]['ifname'],
wpas.own_addr())
wpas2.request("DISCONNECT")
wpas2.wait_disconnected()
fst_wait_event_peer_sta(wpas, "disconnected", wpas2.ifname,
apdev[1]['bssid'])
fst_wait_event_peer_ap(hglobal, "disconnected", apdev[1]['ifname'],
wpas2.own_addr())
fst_detach_ap(hglobal, apdev[0]['ifname'], group)
if "FAIL" not in hglobal.request("FST-DETACH " + apdev[0]['ifname']):
raise Exception("Duplicated FST-DETACH (AP) accepted")
hapd.disable()
fst_detach_ap(hglobal, apdev[1]['ifname'], group)
hapd2.disable()
fst_detach_sta(wpas, wpas.ifname, sgroup)
fst_detach_sta(wpas, wpas2.ifname, sgroup)
def test_fst_setup_mbie_diff(dev, apdev, test_params):
"""FST setup and different MBIE in FST Setup Request"""
try:
_test_fst_setup_mbie_diff(dev, apdev, test_params)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
def _test_fst_setup_mbie_diff(dev, apdev, test_params):
group = "fstg0c"
sgroup = "fstg1c"
hglobal, wpas, wpas2, hapd, hapd2 = fst_start_and_connect(apdev, group, sgroup)
# FST Setup Request: Category, FST Action, Dialog Token (non-zero),
# LLT (32 bits, see 10.32), Session Transition (see 8.4.2.147),
# Multi-band element (optional, see 8.4.2.140)
# Session Transition: EID, Len, FSTS ID(4), Session Control,
# New Band (Band ID, Setup, Operation), Old Band (Band ID, Setup, Operation)
# Multi-band element: EID, Len, Multi-band Control, Band ID,
# Operating Class, Channel Number, BSSID (6), Beacon Interval (2),
# TSF Offset (8), Multi-band Connection Capability, FSTSessionTimeOut,
# STA MAC Address (6, optional), Pairwise Cipher Suite Count (2, optional),
# Pairwise Cipher Suite List (4xm, optional)
# MBIE with the non-matching STA MAC Address:
req = "1200011a060000"
stie = "a40b0100000000020001040001"
mbie = "9e1c0c0200010200000004000000000000000000000000ff0200000006ff"
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie, mbie)
# MBIE without the STA MAC Address:
req = "1200011a060000"
stie = "a40b0100000000020001040001"
mbie = "9e16040200010200000004000000000000000000000000ff"
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie, mbie)
# MBIE with unsupported STA Role:
req = "1200011a060000"
stie = "a40b0100000000020001040001"
mbie = "9e16070200010200000004000000000000000000000000ff"
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie, mbie)
# MBIE with unsupported Band ID:
req = "1200011a060000"
stie = "a40b0100000000020001040001"
mbie = "9e1604ff00010200000004000000000000000000000000ff"
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie, mbie)
# FST Setup Request without MBIE (different FSTS ID):
req = "1200011a060000"
stie = "a40b0200000000020001040001"
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie)
# MBIE update OOM on AP
req = "1200011a060000"
stie = "a40b0100000000020001040001"
mbie = "9e16040200010200000004000000000000000000000000ff"
with alloc_fail(hapd, 1, "mb_ies_by_info"):
fst_setup_req(wpas, hglobal, 5180, apdev[0]['bssid'], req, stie, mbie,
no_wait=True)
def test_fst_many_setup(dev, apdev, test_params):
"""FST setup multiple times"""
try:
_test_fst_many_setup(dev, apdev, test_params)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
def _test_fst_many_setup(dev, apdev, test_params):
group = "fstg0d"
sgroup = "fstg1d"
hglobal, wpas, wpas2, hapd, hapd2 = fst_start_and_connect(apdev, group, sgroup)
sid = wpas.global_request("FST-MANAGER SESSION_ADD " + sgroup).strip()
if "FAIL" in sid:
raise Exception("FST-MANAGER SESSION_ADD (STA) failed")
fst_session_set(wpas, sid, "old_ifname", wpas.ifname)
fst_session_set(wpas, sid, "old_peer_addr", apdev[0]['bssid'])
fst_session_set(wpas, sid, "new_ifname", wpas2.ifname)
fst_session_set(wpas, sid, "new_peer_addr", apdev[1]['bssid'])
for i in range(257):
if "OK" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("FST-MANAGER SESSION_INITIATE failed")
while True:
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP)")
if "new_state=SETUP_COMPLETION" in ev:
f = re.search("session_id=(\d+)", ev)
if f is None:
raise Exception("No session_id in FST-EVENT-SESSION")
sid_ap = f.group(1)
cmd = "FST-MANAGER SESSION_RESPOND %s accept" % sid_ap
if "OK" not in hglobal.request(cmd):
raise Exception("FST-MANAGER SESSION_RESPOND failed on AP")
break
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (STA)")
if "new_state=SETUP_COMPLETION" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data: " + ev)
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (STA)")
if "event_type=EVENT_FST_ESTABLISHED" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data: " + ev)
if "OK" not in wpas.global_request("FST-MANAGER SESSION_TEARDOWN " + sid):
raise Exception("FST-MANAGER SESSION_INITIATE failed")
if i == 0:
if "FAIL" not in wpas.global_request("FST-MANAGER SESSION_TEARDOWN " + sid):
raise Exception("Duplicate FST-MANAGER SESSION_TEARDOWN accepted")
ev = wpas.wait_global_event(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (STA teardown -->initial)")
if "new_state=INITIAL" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data (STA): " + ev)
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP teardown -->initial)")
if "new_state=INITIAL" not in ev:
raise Exception("Unexpected FST-EVENT-SESSION data (AP): " + ev)
if "OK" not in hglobal.request("FST-MANAGER SESSION_REMOVE " + sid_ap):
raise Exception("FST-MANAGER SESSION_REMOVE (AP) failed")
if "OK" not in wpas.global_request("FST-MANAGER SESSION_REMOVE " + sid):
raise Exception("FST-MANAGER SESSION_REMOVE failed")
wpas.request("DISCONNECT")
wpas.wait_disconnected()
fst_wait_event_peer_sta(wpas, "disconnected", wpas.ifname,
apdev[0]['bssid'])
fst_wait_event_peer_ap(hglobal, "disconnected", apdev[0]['ifname'],
wpas.own_addr())
wpas2.request("DISCONNECT")
wpas2.wait_disconnected()
fst_wait_event_peer_sta(wpas, "disconnected", wpas2.ifname,
apdev[1]['bssid'])
fst_wait_event_peer_ap(hglobal, "disconnected", apdev[1]['ifname'],
wpas2.own_addr())
fst_detach_ap(hglobal, apdev[0]['ifname'], group)
fst_detach_ap(hglobal, apdev[1]['ifname'], group)
hapd.disable()
hapd2.disable()
fst_detach_sta(wpas, wpas.ifname, sgroup)
fst_detach_sta(wpas, wpas2.ifname, sgroup)
def test_fst_attach_wpas_error(dev, apdev, test_params):
"""FST attach errors in wpa_supplicant"""
if "OK" not in dev[0].global_request("FST-MANAGER TEST_REQUEST IS_SUPPORTED"):
raise HwsimSkip("No FST testing support")
group = "fstg0"
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
fst_attach_sta(wpas, wpas.ifname, group)
if "FAIL" not in wpas.global_request("FST-ATTACH %s %s" % (wpas.ifname,
group)):
raise Exception("Duplicated FST-ATTACH accepted")
if "FAIL" not in wpas.global_request("FST-ATTACH %s %s" % ("foofoo",
group)):
raise Exception("FST-ATTACH for unknown interface accepted")
def test_fst_session_initiate_errors(dev, apdev, test_params):
"""FST SESSION_INITIATE error cases"""
try:
_test_fst_session_initiate_errors(dev, apdev, test_params)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
def _test_fst_session_initiate_errors(dev, apdev, test_params):
group = "fstg0"
sgroup = "fstg1"
hglobal, wpas, wpas2, hapd, hapd2 = fst_start_and_connect(apdev, group, sgroup)
sid = wpas.global_request("FST-MANAGER SESSION_ADD " + sgroup).strip()
if "FAIL" in sid:
raise Exception("FST-MANAGER SESSION_ADD (STA) failed")
# No old peer MAC address
if "FAIL" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("Invalid FST-MANAGER SESSION_INITIATE accepted")
fst_session_set(wpas, sid, "old_peer_addr", "00:ff:ff:ff:ff:ff")
# No new peer MAC address
if "FAIL" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("Invalid FST-MANAGER SESSION_INITIATE accepted")
fst_session_set(wpas, sid, "new_peer_addr", "00:ff:ff:ff:ff:fe")
# No old interface defined
if "FAIL" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("Invalid FST-MANAGER SESSION_INITIATE accepted")
fst_session_set(wpas, sid, "old_ifname", wpas.ifname)
# No new interface defined
if "FAIL" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("Invalid FST-MANAGER SESSION_INITIATE accepted")
fst_session_set(wpas, sid, "new_ifname", wpas.ifname)
# Same interface set as old and new
if "FAIL" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("Invalid FST-MANAGER SESSION_INITIATE accepted")
fst_session_set(wpas, sid, "new_ifname", wpas2.ifname)
# The preset old peer address is not connected
if "FAIL" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("Invalid FST-MANAGER SESSION_INITIATE accepted")
fst_session_set(wpas, sid, "old_peer_addr", apdev[0]['bssid'])
# The preset new peer address is not connected
if "FAIL" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("Invalid FST-MANAGER SESSION_INITIATE accepted")
fst_session_set(wpas, sid, "new_peer_addr", apdev[1]['bssid'])
# Initiate session setup
if "OK" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("FST-MANAGER SESSION_INITIATE failed")
# Session in progress
if "FAIL" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("Duplicated FST-MANAGER SESSION_INITIATE accepted")
sid2 = wpas.global_request("FST-MANAGER SESSION_ADD " + sgroup).strip()
if "FAIL" in sid:
raise Exception("FST-MANAGER SESSION_ADD (STA) failed")
fst_session_set(wpas, sid2, "old_ifname", wpas.ifname)
fst_session_set(wpas, sid2, "old_peer_addr", apdev[0]['bssid'])
fst_session_set(wpas, sid2, "new_ifname", wpas2.ifname)
fst_session_set(wpas, sid2, "new_peer_addr", apdev[1]['bssid'])
# There is another session in progress (old)
if "FAIL" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid2):
raise Exception("Duplicated FST-MANAGER SESSION_INITIATE accepted")
if "OK" not in wpas.global_request("FST-MANAGER SESSION_REMOVE " + sid):
raise Exception("FST-MANAGER SESSION_REMOVE failed")
while True:
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP)")
if "new_state=SETUP_COMPLETION" in ev:
f = re.search("session_id=(\d+)", ev)
if f is None:
raise Exception("No session_id in FST-EVENT-SESSION")
sid_ap = f.group(1)
break
if "OK" not in hglobal.request("FST-MANAGER SESSION_REMOVE " + sid_ap):
raise Exception("FST-MANAGER SESSION_REMOVE (AP) failed")
if "OK" not in wpas.global_request("FST-MANAGER SESSION_REMOVE " + sid2):
raise Exception("FST-MANAGER SESSION_REMOVE failed")
def test_fst_session_respond_errors(dev, apdev, test_params):
"""FST SESSION_RESPOND error cases"""
try:
_test_fst_session_respond_errors(dev, apdev, test_params)
finally:
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
dev[1].flush_scan_cache()
def _test_fst_session_respond_errors(dev, apdev, test_params):
group = "fstg0b"
sgroup = "fstg1b"
hglobal, wpas, wpas2, hapd, hapd2 = fst_start_and_connect(apdev, group, sgroup)
sid = wpas.global_request("FST-MANAGER SESSION_ADD " + sgroup).strip()
if "FAIL" in sid:
raise Exception("FST-MANAGER SESSION_ADD (STA) failed")
fst_session_set(wpas, sid, "old_ifname", wpas.ifname)
fst_session_set(wpas, sid, "old_peer_addr", apdev[0]['bssid'])
fst_session_set(wpas, sid, "new_ifname", wpas2.ifname)
fst_session_set(wpas, sid, "new_peer_addr", apdev[1]['bssid'])
if "OK" not in wpas.global_request("FST-MANAGER SESSION_INITIATE " + sid):
raise Exception("FST-MANAGER SESSION_INITIATE failed")
while True:
ev = hglobal.wait_event(['FST-EVENT-SESSION'], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION (AP)")
if "new_state=SETUP_COMPLETION" in ev:
f = re.search("session_id=(\d+)", ev)
if f is None:
raise Exception("No session_id in FST-EVENT-SESSION")
sid_ap = f.group(1)
break
# The preset peer address is not in the peer list
fst_session_set_ap(hglobal, sid_ap, "old_peer_addr", "00:00:00:00:00:01")
cmd = "FST-MANAGER SESSION_RESPOND %s accept" % sid_ap
if "FAIL" not in hglobal.request(cmd):
raise Exception("Invalid FST-MANAGER SESSION_RESPOND accepted")
# Same interface set as old and new
fst_session_set_ap(hglobal, sid_ap, "old_peer_addr", wpas.own_addr())
fst_session_set_ap(hglobal, sid_ap, "old_ifname", apdev[1]['ifname'])
cmd = "FST-MANAGER SESSION_RESPOND %s accept" % sid_ap
if "FAIL" not in hglobal.request(cmd):
raise Exception("Invalid FST-MANAGER SESSION_RESPOND accepted")
# valid command
fst_session_set_ap(hglobal, sid_ap, "old_ifname", apdev[0]['ifname'])
cmd = "FST-MANAGER SESSION_RESPOND %s accept" % sid_ap
if "OK" not in hglobal.request(cmd):
raise Exception("FST-MANAGER SESSION_RESPOND failed")
# incorrect state
cmd = "FST-MANAGER SESSION_RESPOND %s accept" % sid_ap
if "FAIL" not in hglobal.request(cmd):
raise Exception("Invalid FST-MANAGER SESSION_RESPOND accepted")
cmd = "FST-MANAGER SESSION_REMOVE " + sid
if "OK" not in wpas.global_request(cmd):
raise Exception("FST-MANAGER SESSION_REMOVE (STA) failed")
cmd = "FST-MANAGER SESSION_REMOVE %s" % sid_ap
if "OK" not in hglobal.request(cmd):
raise Exception("FST-MANAGER SESSION_REMOVE (AP) failed")
| 46.577415
| 144
| 0.646849
|
4f84ac9fdc7da38f8f3809747bc7bc54b164effa
| 1,603
|
py
|
Python
|
bulk_resize.py
|
timothyyu/au_utils
|
6d1f1095b7f5de823a329ca9beb787c72aaea53b
|
[
"BSD-3-Clause"
] | 1
|
2019-02-01T05:09:37.000Z
|
2019-02-01T05:09:37.000Z
|
bulk_resize.py
|
timothyyu/au_utils
|
6d1f1095b7f5de823a329ca9beb787c72aaea53b
|
[
"BSD-3-Clause"
] | null | null | null |
bulk_resize.py
|
timothyyu/au_utils
|
6d1f1095b7f5de823a329ca9beb787c72aaea53b
|
[
"BSD-3-Clause"
] | null | null | null |
# https://kishstats.com/python/2018/04/27/bulk-image-resizing-python.html
import os
import argparse
from PIL import Image
DEFAULT_SIZE = (320, 180)
def resize_image(input_dir, infile, output_dir="resized", size=DEFAULT_SIZE):
outfile = os.path.splitext(infile)[0] + "_resized"
extension = os.path.splitext(infile)[1]
try:
img = Image.open(input_dir + '/' + infile)
img = img.resize((size[0], size[1]), Image.LANCZOS)
new_file = output_dir + "/" + outfile + extension
img.save(new_file)
except IOError:
print("unable to resize image {}".format(infile))
if __name__ == "__main__":
dir = os.getcwd()
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_dir', help='Full Input Path')
parser.add_argument('-o', '--output_dir', help='Full Output Path')
parser.add_argument('-w', '--width', help='Resized Width')
parser.add_argument('-t', '--height', help='Resized Height')
args = parser.parse_args()
if args.input_dir:
input_dir = args.input_dir
else:
input_dir = dir + '/images'
if args.output_dir:
output_dir = args.output_dir
else:
output_dir = dir + '/resized'
if args.width and args.height:
size = (int(args.width), int(args.height))
else:
size = DEFAULT_SIZE
if not os.path.exists(os.path.join(dir, output_dir)):
os.mkdir(output_dir)
try:
for file in os.listdir(input_dir):
resize_image(input_dir, file, output_dir, size=size)
except OSError:
print('file not found')
| 27.637931
| 77
| 0.632564
|
5ee678e4ad93e4b750bbc05af602d20fceb99baf
| 993
|
py
|
Python
|
inference.py
|
sem-onyalo/dnn-training-model
|
f08d4aee4e87c3a72f9883e5232a628f0fe98cbc
|
[
"MIT"
] | null | null | null |
inference.py
|
sem-onyalo/dnn-training-model
|
f08d4aee4e87c3a72f9883e5232a628f0fe98cbc
|
[
"MIT"
] | null | null | null |
inference.py
|
sem-onyalo/dnn-training-model
|
f08d4aee4e87c3a72f9883e5232a628f0fe98cbc
|
[
"MIT"
] | null | null | null |
from data import Data
from matplotlib import pyplot
from tensorflow.keras.models import load_model
class Inference:
def __init__(self, data:Data, modelPath:str, latentDim:int) -> None:
self.data = data
self.modelPath = modelPath
self.latentDim = latentDim
self.samples = 100
self.evalDirectoryName = 'eval'
def run(self):
model = load_model(self.modelPath)
input = self.data.generateLatentPointsAndOrderedLabels(self.latentDim, self.samples)
output = model.predict(input)
self.plotImageSamples(output)
def plotImageSamples(self, images, n=10):
scaledImages = (images + 1) / 2.0 # scale from -1,1 to 0,1
for i in range(n * n):
pyplot.subplot(n, n, i + 1)
pyplot.axis('off')
pyplot.imshow(scaledImages[i, :, :, 0], cmap='gray_r')
filename = f'{self.evalDirectoryName}/generated_samples.png'
pyplot.savefig(filename)
pyplot.close()
| 34.241379
| 92
| 0.638469
|
6d50741dc7df1cb199b08995ec154c74e1444699
| 916
|
py
|
Python
|
beginning-game-development/Chapter 7/7-7.py
|
CrtomirJuren/pygame-projects
|
f710f36050bfe3ece866bbda7d570caa1e037d7a
|
[
"MIT"
] | 43
|
2015-09-20T02:05:48.000Z
|
2022-03-01T22:00:43.000Z
|
beginning-game-development/Chapter 7/7-7.py
|
CrtomirJuren/pygame-projects
|
f710f36050bfe3ece866bbda7d570caa1e037d7a
|
[
"MIT"
] | null | null | null |
beginning-game-development/Chapter 7/7-7.py
|
CrtomirJuren/pygame-projects
|
f710f36050bfe3ece866bbda7d570caa1e037d7a
|
[
"MIT"
] | 40
|
2015-05-19T06:51:13.000Z
|
2022-03-27T18:11:16.000Z
|
class StateMachine(object):
def __init__(self):
self.states = {} # Stores the states
self.active_state = None # The currently active state
def add_state(self, state):
# Add a state to the internal dictionary
self.states[state.name] = state
def think(self):
# Only continue if there is an active state
if self.active_state is None:
return
# Perform the actions of the active state, and check conditions
self.active_state.do_actions()
new_state_name = self.active_state.check_conditions()
if new_state_name is not None:
self.set_state(new_state_name)
def set_state(self, new_state_name):
# Change states and perform any exit / entry actions
if self.active_state is not None:
self.active_state.exit_actions()
self.active_state = self.states[new_state_name]
self.active_state.entry_actions()
| 26.941176
| 68
| 0.686681
|
84fa4ee95d10ab807258b45abe5cd882e3b89e9b
| 6,984
|
py
|
Python
|
log_caspase/model_21.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_caspase/model_21.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_caspase/model_21.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('C6A', ['C8pro'])
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C3ub')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('Xiap', ['C3A'])
Monomer('C8A', ['C3pro'])
Monomer('C3pro', ['C8A'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('ParpC')
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('C6A_0', 0.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C3ub_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('Xiap_0', 5250.0)
Parameter('C8A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Parameter('Fadd_0', 130000.0)
Parameter('C8pro_0', 130000.0)
Parameter('ParpC_0', 0.0)
Observable('C6A_obs', C6A())
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C3ub_obs', C3ub())
Observable('C3A_obs', C3A())
Observable('Xiap_obs', Xiap())
Observable('C8A_obs', C8A())
Observable('C3pro_obs', C3pro())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Observable('Fadd_obs', Fadd())
Observable('C8pro_obs', C8pro())
Observable('ParpC_obs', ParpC())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(C3pro=None) + C3pro(C8A=None) | C8A(C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(C3pro=1) % C3pro(C8A=1) >> C8A(C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(C6A(C8pro=None), C6A_0)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C3ub(), C3ub_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(Xiap(C3A=None), Xiap_0)
Initial(C8A(C3pro=None), C8A_0)
Initial(C3pro(C8A=None), C3pro_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(ParpC(), ParpC_0)
| 69.84
| 296
| 0.818013
|
08ca4d48422249a173591dfaafab7c2366822d51
| 819
|
py
|
Python
|
config.py
|
dansam100/git2svn-sync
|
a9781f75e7d10074837e7ead50ba448545d4cc6c
|
[
"MIT"
] | null | null | null |
config.py
|
dansam100/git2svn-sync
|
a9781f75e7d10074837e7ead50ba448545d4cc6c
|
[
"MIT"
] | null | null | null |
config.py
|
dansam100/git2svn-sync
|
a9781f75e7d10074837e7ead50ba448545d4cc6c
|
[
"MIT"
] | null | null | null |
import os
DUMMY_SVN = False
TRACE_LOGS = False
SWITCH_USER_FOR_COMMITS = False
USE_PATCH_TOOL = True
USE_SVN_PATCH_FORMAT = False
root_dir = os.getcwd().replace("\\", "/")
patches_dir = f"{root_dir}/patches/"
git_url = "C:/work/git_repo"
git_branch = "master"
git_pull_timeout = 60000
svn_url = 'C:/work/svnroot'
svn_branch = "trunk"
svn_remote_url = "https://svn.company.com/svn/branches/trunk"
print(f"Sys args: TRACE_LOGS={TRACE_LOGS}, USE_PATCH_TOOL={USE_PATCH_TOOL}")
print(f"SVN args: SWITCH_USER_FOR_COMMITS={SWITCH_USER_FOR_COMMITS}, DUMMY_SVN={DUMMY_SVN}, USE_SVN_PATCH_FORMAT={USE_SVN_PATCH_FORMAT}")
print(f"Git repo: {git_url}, branch: {git_branch}, pull timeout: {git_pull_timeout}")
print(f"SVN repo: {svn_url}, branch: {svn_branch} remote: {svn_remote_url}")
print("\n\n")
| 31.5
| 138
| 0.733822
|
145b7fb27b2c4c622fb2eaa86b3fb1b8b57376b1
| 18,679
|
py
|
Python
|
infrastructure-provisioning/src/general/lib/os/debian/ssn_lib.py
|
DmytroLiaskovskyi/incubator-dlab
|
af995e98b3b3cf526fb9741a3e5117dd1e04f3aa
|
[
"Apache-2.0"
] | null | null | null |
infrastructure-provisioning/src/general/lib/os/debian/ssn_lib.py
|
DmytroLiaskovskyi/incubator-dlab
|
af995e98b3b3cf526fb9741a3e5117dd1e04f3aa
|
[
"Apache-2.0"
] | null | null | null |
infrastructure-provisioning/src/general/lib/os/debian/ssn_lib.py
|
DmytroLiaskovskyi/incubator-dlab
|
af995e98b3b3cf526fb9741a3e5117dd1e04f3aa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
from fabric.api import *
import crypt
import yaml
from dlab.fab import *
from dlab.meta_lib import *
import os
import json
import traceback
import sys
def ensure_docker_daemon(dlab_path, os_user, region):
try:
if not exists(dlab_path + 'tmp/docker_daemon_ensured'):
docker_version = os.environ['ssn_docker_version']
sudo('curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -')
sudo('add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) \
stable"')
sudo('apt-get update')
sudo('apt-cache policy docker-ce')
sudo('apt-get install -y docker-ce={}~ce-0~ubuntu'.format(docker_version))
sudo('usermod -a -G docker ' + os_user)
sudo('update-rc.d docker defaults')
sudo('update-rc.d docker enable')
sudo('touch ' + dlab_path + 'tmp/docker_daemon_ensured')
return True
except:
return False
def ensure_nginx(dlab_path):
try:
if not exists(dlab_path + 'tmp/nginx_ensured'):
sudo('apt-get -y install nginx')
sudo('service nginx restart')
sudo('update-rc.d nginx defaults')
sudo('update-rc.d nginx enable')
sudo('touch ' + dlab_path + 'tmp/nginx_ensured')
except Exception as err:
traceback.print_exc()
print('Failed to ensure Nginx: ', str(err))
sys.exit(1)
def ensure_jenkins(dlab_path):
try:
if not exists(dlab_path + 'tmp/jenkins_ensured'):
sudo('wget -q -O - https://pkg.jenkins.io/debian/jenkins-ci.org.key | apt-key add -')
sudo('echo deb http://pkg.jenkins.io/debian-stable binary/ > /etc/apt/sources.list.d/jenkins.list')
sudo('apt-get -y update')
sudo('apt-get -y install openjdk-8-jdk')
sudo('apt-get -y install jenkins')
sudo('touch ' + dlab_path + 'tmp/jenkins_ensured')
except Exception as err:
traceback.print_exc()
print('Failed to ensure Jenkins: ', str(err))
sys.exit(1)
def configure_jenkins(dlab_path, os_user, config, tag_resource_id):
try:
if not exists(dlab_path + 'tmp/jenkins_configured'):
sudo('echo \'JENKINS_ARGS="--prefix=/jenkins --httpPort=8070"\' >> /etc/default/jenkins')
sudo('rm -rf /var/lib/jenkins/*')
sudo('mkdir -p /var/lib/jenkins/jobs/')
sudo('chown -R ' + os_user + ':' + os_user + ' /var/lib/jenkins/')
put('/root/templates/jenkins_jobs/*', '/var/lib/jenkins/jobs/')
sudo("find /var/lib/jenkins/jobs/ -type f | xargs sed -i \'s/OS_USR/{}/g; s/SBN/{}/g; s/CTUN/{}/g; s/SGI/{}/g; s/VPC/{}/g; s/SNI/{}/g; s/AKEY/{}/g\'".format(os_user, config['service_base_name'], tag_resource_id, config['security_group_id'], config['vpc_id'], config['subnet_id'], config['admin_key']))
sudo('chown -R jenkins:jenkins /var/lib/jenkins')
sudo('/etc/init.d/jenkins stop; sleep 5')
sudo('sysv-rc-conf jenkins on')
sudo('service jenkins start')
sudo('touch ' + dlab_path + '/tmp/jenkins_configured')
sudo('echo "jenkins ALL = NOPASSWD:ALL" >> /etc/sudoers')
except Exception as err:
traceback.print_exc()
print('Failed to configure Jenkins: ', str(err))
sys.exit(1)
def configure_nginx(config, dlab_path, hostname):
try:
random_file_part = id_generator(size=20)
if not exists("/etc/nginx/conf.d/nginx_proxy.conf"):
sudo('rm -f /etc/nginx/conf.d/*')
put(config['nginx_template_dir'] + 'nginx_proxy.conf', '/tmp/nginx_proxy.conf')
sudo("sed -i 's|SSN_HOSTNAME|" + hostname + "|' /tmp/nginx_proxy.conf")
sudo('mv /tmp/nginx_proxy.conf ' + dlab_path + 'tmp/')
sudo('\cp ' + dlab_path + 'tmp/nginx_proxy.conf /etc/nginx/conf.d/')
sudo('mkdir -p /etc/nginx/locations')
sudo('rm -f /etc/nginx/sites-enabled/default')
except Exception as err:
traceback.print_exc()
print('Failed to configure Nginx: ', str(err))
sys.exit(1)
try:
if not exists("/etc/nginx/locations/proxy_location_jenkins.conf"):
nginx_password = id_generator()
template_file = config['nginx_template_dir'] + 'proxy_location_jenkins_template.conf'
with open("/tmp/%s-tmpproxy_location_jenkins_template.conf" % random_file_part, 'w') as out:
with open(template_file) as tpl:
for line in tpl:
out.write(line)
put("/tmp/%s-tmpproxy_location_jenkins_template.conf" % random_file_part, '/tmp/proxy_location_jenkins.conf')
sudo('mv /tmp/proxy_location_jenkins.conf ' + os.environ['ssn_dlab_path'] + 'tmp/')
sudo('\cp ' + os.environ['ssn_dlab_path'] + 'tmp/proxy_location_jenkins.conf /etc/nginx/locations/')
sudo("echo 'engineer:" + crypt.crypt(nginx_password, id_generator()) + "' > /etc/nginx/htpasswd")
with open('jenkins_creds.txt', 'w+') as f:
f.write("Jenkins credentials: engineer / " + nginx_password)
except:
return False
try:
sudo('service nginx reload')
return True
except:
return False
def ensure_supervisor():
try:
if not exists(os.environ['ssn_dlab_path'] + 'tmp/superv_ensured'):
sudo('apt-get -y install supervisor')
sudo('update-rc.d supervisor defaults')
sudo('update-rc.d supervisor enable')
sudo('touch ' + os.environ['ssn_dlab_path'] + 'tmp/superv_ensured')
except Exception as err:
traceback.print_exc()
print('Failed to install Supervisor: ', str(err))
sys.exit(1)
def ensure_mongo():
try:
if not exists(os.environ['ssn_dlab_path'] + 'tmp/mongo_ensured'):
sudo('apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927')
sudo('ver=`lsb_release -cs`; echo "deb http://repo.mongodb.org/apt/ubuntu $ver/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list; apt-get update')
sudo('apt-get -y --allow-unauthenticated install mongodb-org')
sudo('systemctl enable mongod.service')
sudo('touch ' + os.environ['ssn_dlab_path'] + 'tmp/mongo_ensured')
except Exception as err:
traceback.print_exc()
print('Failed to install MongoDB: ', str(err))
sys.exit(1)
def start_ss(keyfile, host_string, dlab_conf_dir, web_path,
os_user, mongo_passwd, keystore_passwd, cloud_provider,
service_base_name, tag_resource_id, billing_tag, account_id, billing_bucket,
aws_job_enabled, dlab_path, billing_enabled, cloud_params,
authentication_file, offer_number, currency,
locale, region_info, ldap_login, tenant_id,
application_id, hostname, data_lake_name, subscription_id,
validate_permission_scope, dlab_id, usage_date, product,
usage_type, usage, cost, resource_id, tags, billing_dataset_name, report_path=''):
try:
if not exists(os.environ['ssn_dlab_path'] + 'tmp/ss_started'):
java_path = sudo("update-alternatives --query java | grep 'Value: ' | grep -o '/.*/jre'")
supervisor_conf = '/etc/supervisor/conf.d/supervisor_svc.conf'
local('sed -i "s|MONGO_PASSWORD|{}|g" /root/templates/ssn.yml'.format(mongo_passwd))
local('sed -i "s|KEYSTORE_PASSWORD|{}|g" /root/templates/ssn.yml'.format(keystore_passwd))
local('sed -i "s|CLOUD_PROVIDER|{}|g" /root/templates/ssn.yml'.format(cloud_provider))
local('sed -i "s|\${JRE_HOME}|' + java_path + '|g" /root/templates/ssn.yml')
sudo('sed -i "s|KEYNAME|{}|g" {}/webapp/provisioning-service/conf/provisioning.yml'.
format(os.environ['conf_key_name'], dlab_path))
put('/root/templates/ssn.yml', '/tmp/ssn.yml')
sudo('mv /tmp/ssn.yml ' + os.environ['ssn_dlab_path'] + 'conf/')
put('/root/templates/proxy_location_webapp_template.conf', '/tmp/proxy_location_webapp_template.conf')
sudo('mv /tmp/proxy_location_webapp_template.conf ' + os.environ['ssn_dlab_path'] + 'tmp/')
if cloud_provider == 'gcp':
conf_parameter_name = '--spring.config.location='
with open('/root/templates/supervisor_svc.conf', 'r') as f:
text = f.read()
text = text.replace('WEB_CONF', dlab_conf_dir).replace('OS_USR', os_user)\
.replace('CONF_PARAMETER_NAME', conf_parameter_name)
with open('/root/templates/supervisor_svc.conf', 'w') as f:
f.write(text)
elif cloud_provider == 'aws' or 'azure':
conf_parameter_name = '--conf '
with open('/root/templates/supervisor_svc.conf', 'r') as f:
text = f.read()
text = text.replace('WEB_CONF', dlab_conf_dir).replace('OS_USR', os_user)\
.replace('CONF_PARAMETER_NAME', conf_parameter_name)
with open('/root/templates/supervisor_svc.conf', 'w') as f:
f.write(text)
put('/root/templates/supervisor_svc.conf', '/tmp/supervisor_svc.conf')
sudo('mv /tmp/supervisor_svc.conf ' + os.environ['ssn_dlab_path'] + 'tmp/')
sudo('cp ' + os.environ['ssn_dlab_path'] +
'tmp/proxy_location_webapp_template.conf /etc/nginx/locations/proxy_location_webapp.conf')
sudo('cp ' + os.environ['ssn_dlab_path'] + 'tmp/supervisor_svc.conf {}'.format(supervisor_conf))
sudo('sed -i \'s=WEB_APP_DIR={}=\' {}'.format(web_path, supervisor_conf))
try:
sudo('mkdir -p /var/log/application')
run('mkdir -p /tmp/yml_tmp/')
for service in ['self-service', 'provisioning-service', 'billing']:
jar = sudo('cd {0}{1}/lib/; find {1}*.jar -type f'.format(web_path, service))
sudo('ln -s {0}{2}/lib/{1} {0}{2}/{2}.jar '.format(web_path, jar, service))
sudo('cp {0}/webapp/{1}/conf/*.yml /tmp/yml_tmp/'.format(dlab_path, service))
# Replacing Keycloak and cloud parameters
for item in json.loads(cloud_params):
if "KEYCLOAK_" in item['key']:
sudo('sed -i "s|{0}|{1}|g" /tmp/yml_tmp/self-service.yml'.format(
item['key'], item['value']))
sudo('sed -i "s|{0}|{1}|g" /tmp/yml_tmp/provisioning.yml'.format(
item['key'], item['value']))
sudo('sed -i "s|SERVICE_BASE_NAME|{0}|g" /tmp/yml_tmp/self-service.yml'.format(service_base_name))
sudo('sed -i "s|OPERATION_SYSTEM|debian|g" /tmp/yml_tmp/self-service.yml')
if cloud_provider == 'azure':
sudo('sed -i "s|<LOGIN_USE_LDAP>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(ldap_login))
sudo('sed -i "s|<LOGIN_TENANT_ID>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(tenant_id))
sudo('sed -i "s|<LOGIN_APPLICATION_ID>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(application_id))
sudo('sed -i "s|<DLAB_SUBSCRIPTION_ID>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(subscription_id))
sudo('sed -i "s|<MANAGEMENT_API_AUTH_FILE>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(
authentication_file))
sudo('sed -i "s|<VALIDATE_PERMISSION_SCOPE>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(
validate_permission_scope))
sudo('sed -i "s|<LOGIN_APPLICATION_REDIRECT_URL>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(
hostname))
sudo('sed -i "s|<LOGIN_PAGE>|{0}|g" /tmp/yml_tmp/self-service.yml'.format(hostname))
# if os.environ['azure_datalake_enable'] == 'true':
# permission_scope = 'subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/providers/Microsoft.Authorization/'.format(
# subscription_id, service_base_name, data_lake_name)
# else:
# permission_scope = 'subscriptions/{}/resourceGroups/{}/providers/Microsoft.Authorization/'.format(
# subscription_id, service_base_name
# )
sudo('mv /tmp/yml_tmp/* ' + dlab_conf_dir)
sudo('rmdir /tmp/yml_tmp/')
except:
append_result("Unable to upload webapp jars")
sys.exit(1)
if billing_enabled:
local('scp -i {} /root/scripts/configure_billing.py {}:/tmp/configure_billing.py'.format(keyfile,
host_string))
params = '--cloud_provider {} ' \
'--infrastructure_tag {} ' \
'--tag_resource_id {} ' \
'--billing_tag {} ' \
'--account_id {} ' \
'--billing_bucket {} ' \
'--aws_job_enabled {} ' \
'--report_path "{}" ' \
'--mongo_password {} ' \
'--dlab_dir {} ' \
'--authentication_file "{}" ' \
'--offer_number {} ' \
'--currency {} ' \
'--locale {} ' \
'--region_info {} ' \
'--dlab_id {} ' \
'--usage_date {} ' \
'--product {} ' \
'--usage_type {} ' \
'--usage {} ' \
'--cost {} ' \
'--resource_id {} ' \
'--tags {} ' \
'--billing_dataset_name "{}" '.\
format(cloud_provider,
service_base_name,
tag_resource_id,
billing_tag,
account_id,
billing_bucket,
aws_job_enabled,
report_path,
mongo_passwd,
dlab_path,
authentication_file,
offer_number,
currency,
locale,
region_info,
dlab_id,
usage_date,
product,
usage_type,
usage,
cost,
resource_id,
tags,
billing_dataset_name)
sudo('python /tmp/configure_billing.py {}'.format(params))
try:
sudo('keytool -genkeypair -alias dlab -keyalg RSA -validity 730 -storepass {1} -keypass {1} \
-keystore /home/{0}/keys/dlab.keystore.jks -keysize 2048 -dname "CN=localhost"'.format(os_user, keystore_passwd))
sudo('keytool -exportcert -alias dlab -storepass {1} -file /home/{0}/keys/dlab.crt \
-keystore /home/{0}/keys/dlab.keystore.jks'.format(os_user, keystore_passwd))
sudo('keytool -importcert -trustcacerts -alias dlab -file /home/{0}/keys/dlab.crt -noprompt \
-storepass changeit -keystore {1}/lib/security/cacerts'.format(os_user, java_path))
except:
append_result("Unable to generate cert and copy to java keystore")
sys.exit(1)
sudo('service supervisor start')
sudo('service nginx restart')
sudo('service supervisor restart')
sudo('touch ' + os.environ['ssn_dlab_path'] + 'tmp/ss_started')
except Exception as err:
traceback.print_exc()
print('Failed to start Self-service: ', str(err))
sys.exit(1)
def install_build_dep():
try:
if not exists('{}tmp/build_dep_ensured'.format(os.environ['ssn_dlab_path'])):
maven_version = '3.5.4'
sudo('apt-get install -y openjdk-8-jdk git wget unzip')
with cd('/opt/'):
sudo('wget http://mirrors.sonic.net/apache/maven/maven-{0}/{1}/binaries/apache-maven-{1}-bin.zip'.format(
maven_version.split('.')[0], maven_version))
sudo('unzip apache-maven-{}-bin.zip'.format(maven_version))
sudo('mv apache-maven-{} maven'.format(maven_version))
sudo('bash -c "curl --silent --location https://deb.nodesource.com/setup_12.x | bash -"')
sudo('apt-get install -y nodejs')
sudo('npm config set unsafe-perm=true')
sudo('touch {}tmp/build_dep_ensured'.format(os.environ['ssn_dlab_path']))
except Exception as err:
traceback.print_exc()
print('Failed to install build dependencies for UI: ', str(err))
sys.exit(1)
| 53.829971
| 313
| 0.546924
|
89de3551bbf751e74c9a229abac7290f4b051d84
| 3,634
|
py
|
Python
|
tests/modules/test_molotov.py
|
yurifrl/taurus
|
7e56c4c50142a8294b1a950fb780ce5c1333c245
|
[
"Apache-2.0"
] | 1
|
2019-12-05T14:57:58.000Z
|
2019-12-05T14:57:58.000Z
|
tests/modules/test_molotov.py
|
hiroksarker/taurus
|
0982f64b7cf36669dc88c6286b880d2b9ad2a514
|
[
"Apache-2.0"
] | null | null | null |
tests/modules/test_molotov.py
|
hiroksarker/taurus
|
0982f64b7cf36669dc88c6286b880d2b9ad2a514
|
[
"Apache-2.0"
] | null | null | null |
import sys
import time
import unittest
from os.path import exists, join
from bzt import ToolError
from bzt.modules.aggregator import DataPoint, KPISet
from bzt.modules.molotov import MolotovExecutor, MolotovReportReader
from bzt.utils import EXE_SUFFIX, is_windows
from tests import BZTestCase, ExecutorTestCase, RESOURCES_DIR, close_reader_file, ROOT_LOGGER
TOOL_NAME = 'molotov-mock' + EXE_SUFFIX
TOOL_PATH = join(RESOURCES_DIR, "molotov", TOOL_NAME)
LOADTEST_PY = join(RESOURCES_DIR, "molotov", "loadtest.py")
class TestMolotov(ExecutorTestCase):
EXECUTOR = MolotovExecutor
def tearDown(self):
if self.obj.reader:
close_reader_file(self.obj.reader.ldjson_reader)
super(TestMolotov, self).tearDown()
def test_mocked(self):
self.obj.settings.merge({
"path": TOOL_PATH})
self.obj.execution.merge({
"ramp-up": "10s",
"hold-for": "20s",
"scenario": {
"script": LOADTEST_PY}})
self.obj.prepare()
self.obj.get_widget()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
def test_no_tool(self):
self.obj.settings.merge({
"path": '*'})
self.obj.execution.merge({
"scenario": {
"script": LOADTEST_PY}})
self.assertRaises(ToolError, self.obj.prepare)
def test_diagnostics(self):
self.obj.settings.merge({
"path": TOOL_PATH})
self.obj.execution.merge({
"iterations": 1,
"scenario": {
"script": LOADTEST_PY}})
self.obj.prepare()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
self.obj.shutdown()
self.obj.post_process()
self.assertIsNotNone(self.obj.get_error_diagnostics())
def test_resource_files(self):
self.obj.execution.merge({
"scenario": {
"script": LOADTEST_PY}})
resources = self.obj.get_resource_files()
self.assertEqual(resources, [LOADTEST_PY])
@unittest.skipUnless(sys.version_info >= (3, 5), "enabled only on 3.5+")
@unittest.skipIf(is_windows(), "disabled on windows")
def test_full(self):
self.configure({"execution": {
"concurrency": 3,
"processes": 2,
"hold-for": "5s",
"iterations": 10,
"scenario": {
"script": LOADTEST_PY}}})
self.obj.prepare()
self.obj.get_widget()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertNotEquals(self.obj.process, None)
self.assertTrue(exists(self.obj.report_file_name))
class TestReportReader(BZTestCase):
def test_read(self):
log_path = join(RESOURCES_DIR, "molotov", "molotov-report.csv")
obj = MolotovReportReader(log_path, ROOT_LOGGER)
points = list(obj.datapoints(True))
self.assertEqual(len(points), 3)
for datapoint in points:
self.assertTrue(datapoint['ts'] > 1500000000)
self.assertEqual(points[-1][DataPoint.CUMULATIVE][''][KPISet.SUCCESSES], 10)
self.assertEqual(points[-1][DataPoint.CUMULATIVE][''][KPISet.FAILURES], 2)
| 33.33945
| 93
| 0.606494
|
38527ecd9abcb1b362d75e6fc85ed8ce134a3450
| 11,471
|
py
|
Python
|
distributed/process.py
|
sodre/distributed
|
0b68318112b13d70a9cdd741e5db00da2ec6a8f5
|
[
"BSD-3-Clause"
] | 1
|
2021-06-24T09:12:47.000Z
|
2021-06-24T09:12:47.000Z
|
distributed/process.py
|
sodre/distributed
|
0b68318112b13d70a9cdd741e5db00da2ec6a8f5
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/process.py
|
sodre/distributed
|
0b68318112b13d70a9cdd741e5db00da2ec6a8f5
|
[
"BSD-3-Clause"
] | 1
|
2020-11-25T04:42:07.000Z
|
2020-11-25T04:42:07.000Z
|
import atexit
from datetime import timedelta
import logging
import os
from queue import Queue as PyQueue
import re
import threading
import weakref
import dask
from .utils import mp_context
from tornado import gen
from tornado.concurrent import Future
from tornado.ioloop import IOLoop
logger = logging.getLogger(__name__)
def _loop_add_callback(loop, func, *args):
"""
Helper to silence "IOLoop is closing" exception on IOLoop.add_callback.
"""
try:
loop.add_callback(func, *args)
except RuntimeError as exc:
if not re.search("IOLoop is clos(ed|ing)", str(exc)):
raise
def _call_and_set_future(loop, future, func, *args, **kwargs):
try:
res = func(*args, **kwargs)
except Exception as exc:
# Tornado futures are not thread-safe, need to
# set_result() / set_exc_info() from the loop's thread
_loop_add_callback(loop, future.set_exception, exc)
else:
_loop_add_callback(loop, future.set_result, res)
class _ProcessState(object):
is_alive = False
pid = None
exitcode = None
class AsyncProcess(object):
"""
A coroutine-compatible multiprocessing.Process-alike.
All normally blocking methods are wrapped in Tornado coroutines.
"""
def __init__(self, loop=None, target=None, name=None, args=(), kwargs={}):
if not callable(target):
raise TypeError("`target` needs to be callable, not %r" % (type(target),))
self._state = _ProcessState()
self._loop = loop or IOLoop.current(instance=False)
# _keep_child_alive is the write side of a pipe, which, when it is
# closed, causes the read side of the pipe to unblock for reading. Note
# that it is never closed directly. The write side is closed by the
# kernel when our process exits, or possibly by the garbage collector
# closing the file descriptor when the last reference to
# _keep_child_alive goes away. We can take advantage of this fact to
# monitor from the child and exit when the parent goes away unexpectedly
# (for example due to SIGKILL). This variable is otherwise unused except
# for the assignment here.
parent_alive_pipe, self._keep_child_alive = mp_context.Pipe(duplex=False)
self._process = mp_context.Process(
target=self._run,
name=name,
args=(
target,
args,
kwargs,
parent_alive_pipe,
self._keep_child_alive,
dask.config.global_config,
),
)
_dangling.add(self._process)
self._name = self._process.name
self._watch_q = PyQueue()
self._exit_future = Future()
self._exit_callback = None
self._closed = False
self._start_threads()
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self._name)
def _check_closed(self):
if self._closed:
raise ValueError("invalid operation on closed AsyncProcess")
def _start_threads(self):
self._watch_message_thread = threading.Thread(
target=self._watch_message_queue,
name="AsyncProcess %s watch message queue" % self.name,
args=(
weakref.ref(self),
self._process,
self._loop,
self._state,
self._watch_q,
self._exit_future,
),
)
self._watch_message_thread.daemon = True
self._watch_message_thread.start()
def stop_thread(q):
q.put_nowait({"op": "stop"})
# We don't join the thread here as a finalizer can be called
# asynchronously from anywhere
self._finalizer = weakref.finalize(self, stop_thread, q=self._watch_q)
self._finalizer.atexit = False
def _on_exit(self, exitcode):
# Called from the event loop when the child process exited
self._process = None
if self._exit_callback is not None:
self._exit_callback(self)
self._exit_future.set_result(exitcode)
@classmethod
def _immediate_exit_when_closed(cls, parent_alive_pipe):
"""
Immediately exit the process when parent_alive_pipe is closed.
"""
def monitor_parent():
try:
# The parent_alive_pipe should be held open as long as the
# parent is alive and wants us to stay alive. Nothing writes to
# it, so the read will block indefinitely.
parent_alive_pipe.recv()
except EOFError:
# Parent process went away unexpectedly. Exit immediately. Could
# consider other exiting approches here. My initial preference
# is to unconditionally and immediately exit. If we're in this
# state it is possible that a "clean" process exit won't work
# anyway - if, for example, the system is getting bogged down
# due to the running out of memory, exiting sooner rather than
# later might be needed to restore normal system function.
# If this is in appropriate for your use case, please file a
# bug.
os._exit(-1)
else:
# If we get here, something odd is going on. File descriptors
# got crossed?
raise RuntimeError("unexpected state: should be unreachable")
t = threading.Thread(target=monitor_parent)
t.daemon = True
t.start()
@staticmethod
def reset_logger_locks():
""" Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
@classmethod
def _run(
cls, target, args, kwargs, parent_alive_pipe, _keep_child_alive, inherit_config
):
# On Python 2 with the fork method, we inherit the _keep_child_alive fd,
# whether it is passed or not. Therefore, pass it unconditionally and
# close it here, so that there are no other references to the pipe lying
# around.
cls.reset_logger_locks()
_keep_child_alive.close()
# Child process entry point
cls._immediate_exit_when_closed(parent_alive_pipe)
threading.current_thread().name = "MainThread"
# Update the global config given priority to the existing global config
dask.config.update(dask.config.global_config, inherit_config, priority="old")
target(*args, **kwargs)
@classmethod
def _watch_message_queue(cls, selfref, process, loop, state, q, exit_future):
# As multiprocessing.Process is not thread-safe, we run all
# blocking operations from this single loop and ship results
# back to the caller when needed.
r = repr(selfref())
name = selfref().name
def _start():
process.start()
thread = threading.Thread(
target=AsyncProcess._watch_process,
name="AsyncProcess %s watch process join" % name,
args=(selfref, process, state, q),
)
thread.daemon = True
thread.start()
state.is_alive = True
state.pid = process.pid
logger.debug("[%s] created process with pid %r" % (r, state.pid))
while True:
msg = q.get()
logger.debug("[%s] got message %r" % (r, msg))
op = msg["op"]
if op == "start":
_call_and_set_future(loop, msg["future"], _start)
elif op == "terminate":
_call_and_set_future(loop, msg["future"], process.terminate)
elif op == "stop":
break
else:
assert 0, msg
@classmethod
def _watch_process(cls, selfref, process, state, q):
r = repr(selfref())
process.join()
exitcode = process.exitcode
assert exitcode is not None
logger.debug("[%s] process %r exited with code %r", r, state.pid, exitcode)
state.is_alive = False
state.exitcode = exitcode
# Make sure the process is removed from the global list
# (see _children in multiprocessing/process.py)
# Then notify the Process object
self = selfref() # only keep self alive when required
try:
if self is not None:
_loop_add_callback(self._loop, self._on_exit, exitcode)
finally:
self = None # lose reference
def start(self):
"""
Start the child process.
This method is a coroutine.
"""
self._check_closed()
fut = Future()
self._watch_q.put_nowait({"op": "start", "future": fut})
return fut
def terminate(self):
"""
Terminate the child process.
This method is a coroutine.
"""
self._check_closed()
fut = Future()
self._watch_q.put_nowait({"op": "terminate", "future": fut})
return fut
@gen.coroutine
def join(self, timeout=None):
"""
Wait for the child process to exit.
This method is a coroutine.
"""
self._check_closed()
assert self._state.pid is not None, "can only join a started process"
if self._state.exitcode is not None:
return
if timeout is None:
yield self._exit_future
else:
try:
yield gen.with_timeout(timedelta(seconds=timeout), self._exit_future)
except gen.TimeoutError:
pass
def close(self):
"""
Stop helper thread and release resources. This method returns
immediately and does not ensure the child process has exited.
"""
if not self._closed:
self._finalizer()
self._process = None
self._closed = True
def set_exit_callback(self, func):
"""
Set a function to be called by the event loop when the process exits.
The function is called with the AsyncProcess as sole argument.
The function may be a coroutine function.
"""
# XXX should this be a property instead?
assert callable(func), "exit callback should be callable"
assert (
self._state.pid is None
), "cannot set exit callback when process already started"
self._exit_callback = func
def is_alive(self):
return self._state.is_alive
@property
def pid(self):
return self._state.pid
@property
def exitcode(self):
return self._state.exitcode
@property
def name(self):
return self._name
@property
def daemon(self):
return self._process.daemon
@daemon.setter
def daemon(self, value):
self._process.daemon = value
_dangling = weakref.WeakSet()
@atexit.register
def _cleanup_dangling():
for proc in list(_dangling):
if proc.is_alive():
try:
logger.info("reaping stray process %s" % (proc,))
proc.terminate()
except OSError:
pass
| 32.868195
| 87
| 0.599163
|
8a68a18f841e6b934a86c07c0bb5a88d66297552
| 4,825
|
py
|
Python
|
demos/incompressible_flow/lid_driven_cavity/run_unif.py
|
marc-nguessan/mrpy
|
6fb0bce485234a45bb863f71bc2bdf0a22014de3
|
[
"BSD-3-Clause"
] | 2
|
2020-01-06T10:48:44.000Z
|
2020-01-09T20:07:08.000Z
|
demos/incompressible_flow/lid_driven_cavity/run_unif.py
|
marc-nguessan/mrpy
|
6fb0bce485234a45bb863f71bc2bdf0a22014de3
|
[
"BSD-3-Clause"
] | 1
|
2020-01-09T20:08:50.000Z
|
2020-01-09T20:11:20.000Z
|
demos/incompressible_flow/lid_driven_cavity/run_unif.py
|
marc-nguessan/mrpy
|
6fb0bce485234a45bb863f71bc2bdf0a22014de3
|
[
"BSD-3-Clause"
] | null | null | null |
"""...
"""
import sys, petsc4py
petsc4py.init(sys.argv)
import petsc4py.PETSc as petsc
import mpi4py.MPI as mpi
import numpy as np
import math
import importlib
import config as cfg
from mrpy.mr_utils import mesh
from mrpy.mr_utils import op
import mrpy.discretization.temporal as td
import mrpy.discretization.spatial as sd
#===============================================================================
#===============================================================================
#========================== INITIALISATION =====================================
#===============================================================================
output_module = importlib.import_module(cfg.output_module_name)
writer = output_module.OutputWriter("lid_driven_cavity")
writer.initialize()
tree_velocity_x = mesh.create_new_tree(cfg.dimension, cfg.min_level,
cfg.max_level, cfg.stencil_graduation, cfg.stencil_prediction, cfg.xmin,
cfg.xmax, cfg.ymin, cfg.ymax, cfg.zmin, cfg.zmax)
tree_velocity_y = mesh.create_new_tree(cfg.dimension, cfg.min_level,
cfg.max_level, cfg.stencil_graduation, cfg.stencil_prediction, cfg.xmin,
cfg.xmax, cfg.ymin, cfg.ymax, cfg.zmin, cfg.zmax)
tree_pressure = mesh.create_new_tree(cfg.dimension, cfg.min_level,
cfg.max_level, cfg.stencil_graduation, cfg.stencil_prediction, cfg.xmin,
cfg.xmax, cfg.ymin, cfg.ymax, cfg.zmin, cfg.zmax)
tree_vorticity = mesh.create_new_tree(cfg.dimension, cfg.min_level,
cfg.max_level, cfg.stencil_graduation, cfg.stencil_prediction, cfg.xmin,
cfg.xmax, cfg.ymin, cfg.ymax, cfg.zmin, cfg.zmax)
tree_velocity_x.tag = "u"
tree_velocity_y.tag = "v"
tree_pressure.tag = "p"
tree_vorticity.tag = "omega"
tree_velocity_x.bc = cfg.bc_dict[tree_velocity_x.tag]
tree_velocity_y.bc = cfg.bc_dict[tree_velocity_y.tag]
tree_pressure.bc = cfg.bc_dict[tree_pressure.tag]
mesh.listing_of_leaves(tree_velocity_x, tree_velocity_y, tree_pressure, tree_vorticity)
print("trees creation done")
time_integrator = importlib.import_module(cfg.class_scheme_name)
time_integrator = time_integrator.Scheme(tree_velocity_x=tree_velocity_x,
tree_velocity_y=tree_velocity_y, tree_pressure=tree_pressure)
time_integrator.uniform = True
time_integrator.compute_initial_values(tree_velocity_x=tree_velocity_x,
tree_velocity_y=tree_velocity_y, tree_pressure=tree_pressure)
time_integrator.setup_internal_variables(tree_velocity_x, tree_velocity_y, tree_pressure)
time_integrator.make_operators(tree_velocity_x, tree_velocity_y, tree_pressure)
time_integrator.make_ksps()
time_integrator.make_vorticity_x(tree_velocity_y)
time_integrator.make_vorticity_y(tree_velocity_x)
print("time integrator initiation done")
nsp = petsc.NullSpace().create(constant=True)
#nsp = None
print("2D lid-driven cavity")
print(" Grid informations :")
print(" - dt = " + repr(cfg.dt))
print(" - dx = " + repr((cfg.xmax-cfg.xmin) / 2**(cfg.max_level)))
print(" - dy = " + repr((cfg.ymax-cfg.ymin) / 2**(cfg.max_level)))
print("")
#===============================================================================
#========================== COMPUTATION LOOP ===================================
#===============================================================================
t = cfg.t_ini
#Used for the printing of the solutions
t_print = 0.
v_x = sd.Scalar(tree_velocity_x)
v_y = sd.Scalar(tree_velocity_y)
p = sd.Scalar(tree_pressure)
vorticity = sd.Scalar(tree_vorticity)
for it in range(int(cfg.nt)):
t_previous = t
t = time_integrator.next_time(t)
print("t = " + repr(t))
print("")
time_integrator.advance(v_x=v_x, v_y=v_y, p=p,
t_ini=t_previous, nsp=nsp)
#============================= Printing solutions ==============================
if t >= t_print:
vorticity.sc = (sd.add_scalars(
time_integrator.vorticity_x.apply(v_y),
sd.mul_num_scalar(-1., time_integrator.vorticity_y.apply(v_x)))).sc.copy()
vorticity = time_integrator.velocity_inverse_mass.apply(vorticity)
time_integrator.scalar_to_tree(v_x, tree_velocity_x)
time_integrator.scalar_to_tree(v_y, tree_velocity_y)
time_integrator.scalar_to_tree(p, tree_pressure)
time_integrator.scalar_to_tree(vorticity, tree_vorticity)
writer.write([tree_velocity_x, tree_velocity_y, tree_pressure, tree_vorticity],
output_file_name="lid_driven_cavity_t_" + repr(it).zfill(5),
time=t)
t_print = t_print + cfg.dt_print
#===============================================================================
#============================ TERMINATION ======================================
#===============================================================================
writer.close()
| 37.403101
| 89
| 0.618653
|
b04130d14d7ced55d93254a84524e86f31414c18
| 1,652
|
py
|
Python
|
src/transformers/models/transfo_xl/__init__.py
|
nhatminh46vn/transformers
|
9972a0279f45d81b29e4bdc2a906436a783bc2f7
|
[
"Apache-2.0"
] | 6
|
2021-11-03T05:10:35.000Z
|
2022-03-25T20:28:06.000Z
|
src/transformers/models/transfo_xl/__init__.py
|
juand-r/transformers
|
912f6881d2b69f180522172a5283702bd8c41d9c
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/transfo_xl/__init__.py
|
juand-r/transformers
|
912f6881d2b69f180522172a5283702bd8c41d9c
|
[
"Apache-2.0"
] | 3
|
2021-09-19T08:20:42.000Z
|
2022-02-19T16:32:40.000Z
|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...file_utils import is_tf_available, is_torch_available
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
if is_torch_available():
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
if is_tf_available():
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
| 36.711111
| 95
| 0.757869
|
e261ae3fda29791b10e1f4d18707ecdc859ab12b
| 2,986
|
py
|
Python
|
misc/nms.py
|
Luciano233/OCR_Japanease
|
055bdd0cc8e4d053dfb471cd642b1616ba0938d1
|
[
"MIT"
] | 1
|
2021-03-15T02:57:21.000Z
|
2021-03-15T02:57:21.000Z
|
misc/nms.py
|
Luciano233/OCR_Japanease
|
055bdd0cc8e4d053dfb471cd642b1616ba0938d1
|
[
"MIT"
] | null | null | null |
misc/nms.py
|
Luciano233/OCR_Japanease
|
055bdd0cc8e4d053dfb471cd642b1616ba0938d1
|
[
"MIT"
] | null | null | null |
import numpy as np
def non_max_suppression(boxes, overlapThresh=0.3):
if len(boxes) == 0:
return []
sorted_box = sorted(boxes, key=lambda x:x.score())[::-1]
ignore_flg = [False] * len(sorted_box)
for i in range(len(sorted_box)):
if not ignore_flg[i]:
for j in range(i+1,len(sorted_box),1):
r1 = sorted_box[i]
r2 = sorted_box[j]
if r1.x1 <= r2.x2 and r2.x1 <= r1.x2 and r1.y1<= r2.y2 and r2.y1 <= r1.y2:
w = max(0, min(r1.x2,r2.x2) - max(r1.x1,r2.x1))
h = max(0, min(r1.y2,r2.y2) - max(r1.y1,r2.y1))
if w * h > (r2.x2-r2.x1)*(r2.y2-r2.y1)*overlapThresh:
ignore_flg[j] = True
return [sorted_box[i] for i in range(len(sorted_box)) if not ignore_flg[i]]
def column_wordlines(bbox, overlapThresh=0.1, overlapThresh_line=0.6):
def _1dim_non_suppression(ranges, overlapThresh):
if len(ranges) == 0:
return []
ignore_flg = [False] * len(ranges)
for i in range(len(ranges)):
if not ignore_flg[i]:
for j in range(i+1,len(ranges),1):
r1 = ranges[i]
r2 = ranges[j]
w = max(0, min(r1[1],r2[1]) - max(r1[0],r2[0]))
if w > (r2[1]-r2[0])*overlapThresh:
ignore_flg[j] = True
return [ranges[i] for i in range(len(ranges)) if not ignore_flg[i]]
box_range_x = [(b.x1,b.x2) for b in bbox]
box_range_y = [(b.y1,b.y2) for b in bbox]
cols = _1dim_non_suppression(box_range_x, overlapThresh)
rows = _1dim_non_suppression(box_range_y, overlapThresh)
stocked_flg = [False] * len(bbox)
lines = []
if len(cols) < len(rows): # 縦書き
for c in cols:
stocks = []
for i in range(len(bbox)):
if not stocked_flg[i]:
if c[0] < bbox[i].x2 and c[1] > bbox[i].x1:
w = max(0, min(c[1],bbox[i].x2) - max(c[0],bbox[i].x1))
if w > (bbox[i].x2-bbox[i].x1)*overlapThresh_line:
stocks.append(bbox[i])
stocked_flg[i] = True
lines.append(sorted(stocks, key=lambda x:x.y1))
lines = sorted(lines, key=lambda x: np.mean([y.x1 for y in x]))
else: # 横書き
for r in rows:
stocks = []
for i in range(len(bbox)):
if not stocked_flg[i]:
if r[0] < bbox[i].y2 and r[1] > bbox[i].y1:
h = max(0, min(r[1],bbox[i].y2) - max(r[0],bbox[i].y1))
if h >= (bbox[i].y2-bbox[i].y1)*overlapThresh_line:
stocks.append(bbox[i])
stocked_flg[i] = True
lines.append(sorted(stocks, key=lambda x:x.x1))
lines = sorted(lines, key=lambda x: np.mean([y.y1 for y in x]))
return lines
| 41.472222
| 90
| 0.495311
|
f402c6ad026f4f5a8307796a183c392a47eed0ac
| 694
|
py
|
Python
|
meetings/models.py
|
mauricejulesm/meeting_plannerApp-Django3.0
|
4e99893ea1ce98d983d2f6d19dd621d7ea56a808
|
[
"MIT"
] | null | null | null |
meetings/models.py
|
mauricejulesm/meeting_plannerApp-Django3.0
|
4e99893ea1ce98d983d2f6d19dd621d7ea56a808
|
[
"MIT"
] | null | null | null |
meetings/models.py
|
mauricejulesm/meeting_plannerApp-Django3.0
|
4e99893ea1ce98d983d2f6d19dd621d7ea56a808
|
[
"MIT"
] | null | null | null |
from datetime import time
from django.db import models
# Create your models here.
class Room(models.Model):
room_name = models.CharField(max_length=100)
room_number = models.IntegerField()
floor_number = models.IntegerField()
def __str__(self):
return f"{self.room_name} on {self.floor_number} floor"
class Meeting(models.Model):
title = models.CharField(max_length=200)
date = models.DateField()
start_time = models.TimeField(default=time(9))
duration = models.IntegerField(default=1)
room = models.ForeignKey(Room, on_delete=models.CASCADE)
def __str__(self):
return f"{self.title} at {self.start_time} in {self.room.room_name}"
| 27.76
| 76
| 0.714697
|
79a38f3502166233f1c08c6393e69c6b4727bda4
| 7,568
|
py
|
Python
|
litex_boards/targets/versa_ecp5.py
|
jersey99/litex-boards
|
98c80f0b2b07b8c7807105ec3d7d0c0896b80cac
|
[
"BSD-2-Clause"
] | 1
|
2021-05-29T21:57:17.000Z
|
2021-05-29T21:57:17.000Z
|
litex_boards/targets/versa_ecp5.py
|
jersey99/litex-boards
|
98c80f0b2b07b8c7807105ec3d7d0c0896b80cac
|
[
"BSD-2-Clause"
] | null | null | null |
litex_boards/targets/versa_ecp5.py
|
jersey99/litex-boards
|
98c80f0b2b07b8c7807105ec3d7d0c0896b80cac
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2018-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2018-2019 David Shah <dave@ds0.me>
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex_boards.platforms import versa_ecp5
from litex.build.lattice.trellis import trellis_args, trellis_argdict
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litedram.modules import MT41K64M16
from litedram.phy import ECP5DDRPHY
from liteeth.phy.ecp5rgmii import LiteEthPHYRGMII
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_init = ClockDomain()
self.clock_domains.cd_por = ClockDomain(reset_less=True)
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys2x = ClockDomain()
self.clock_domains.cd_sys2x_i = ClockDomain(reset_less=True)
# # #
self.stop = Signal()
self.reset = Signal()
# Clk / Rst
clk100 = platform.request("clk100")
rst_n = platform.request("rst_n")
# Power on reset
por_count = Signal(16, reset=2**16-1)
por_done = Signal()
self.comb += self.cd_por.clk.eq(clk100)
self.comb += por_done.eq(por_count == 0)
self.sync.por += If(~por_done, por_count.eq(por_count - 1))
# PLL
self.submodules.pll = pll = ECP5PLL()
self.comb += pll.reset.eq(~por_done | ~rst_n | self.rst)
pll.register_clkin(clk100, 100e6)
pll.create_clkout(self.cd_sys2x_i, 2*sys_clk_freq)
pll.create_clkout(self.cd_init, 25e6)
self.specials += [
Instance("ECLKSYNCB",
i_ECLKI = self.cd_sys2x_i.clk,
i_STOP = self.stop,
o_ECLKO = self.cd_sys2x.clk),
Instance("CLKDIVF",
p_DIV = "2.0",
i_ALIGNWD = 0,
i_CLKI = self.cd_sys2x.clk,
i_RST = self.reset,
o_CDIVX = self.cd_sys.clk),
AsyncResetSynchronizer(self.cd_sys, ~pll.locked | self.reset),
AsyncResetSynchronizer(self.cd_sys2x, ~pll.locked | self.reset),
]
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(75e6), device="LFE5UM5G", with_ethernet=False, with_etherbone=False, eth_ip="192.168.1.50", eth_phy=0, toolchain="trellis", **kwargs):
platform = versa_ecp5.Platform(toolchain=toolchain, device=device)
# FIXME: adapt integrated rom size for Microwatt
if kwargs.get("cpu_type", None) == "microwatt":
kwargs["integrated_rom_size"] = 0xb000 if with_ethernet else 0x9000
# SoCCore -----------------------------------------_----------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Versa ECP5",
ident_version = True,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = ECP5DDRPHY(
platform.request("ddram"),
sys_clk_freq=sys_clk_freq)
self.add_csr("ddrphy")
self.comb += self.crg.stop.eq(self.ddrphy.init.stop)
self.comb += self.crg.reset.eq(self.ddrphy.init.reset)
self.add_sdram("sdram",
phy = self.ddrphy,
module = MT41K64M16(sys_clk_freq, "1:2"),
origin = self.mem_map["main_ram"],
size = kwargs.get("max_sdram_size", 0x40000000),
l2_cache_size = kwargs.get("l2_size", 8192),
l2_cache_min_data_width = kwargs.get("min_l2_data_width", 128),
l2_cache_reverse = True
)
# Ethernet / Etherbone ---------------------------------------------------------------------
if with_ethernet or with_etherbone:
self.submodules.ethphy = LiteEthPHYRGMII(
clock_pads = self.platform.request("eth_clocks", eth_phy),
pads = self.platform.request("eth", eth_phy))
self.add_csr("ethphy")
if with_ethernet:
self.add_ethernet(phy=self.ethphy)
if with_etherbone:
self.add_etherbone(phy=self.ethphy, ip_address=eth_ip)
# Leds -------------------------------------------------------------------------------------
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
self.add_csr("leds")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Versa ECP5")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--toolchain", default="trellis", help="FPGA toolchain: trellis (default) or diamond")
parser.add_argument("--sys-clk-freq", default=75e6, help="System clock frequency (default: 75MHz)")
parser.add_argument("--device", default="LFE5UM5G", help="FPGA device (LFE5UM5G (default) or LFE5UM)")
ethopts = parser.add_mutually_exclusive_group()
ethopts.add_argument("--with-ethernet", action="store_true", help="Enable Ethernet support")
ethopts.add_argument("--with-etherbone", action="store_true", help="Enable Etherbone support")
parser.add_argument("--eth-ip", default="192.168.1.50", type=str, help="Ethernet/Etherbone IP address")
parser.add_argument("--eth-phy", default=0, type=int, help="Ethernet PHY: 0 (default) or 1")
builder_args(parser)
soc_sdram_args(parser)
trellis_args(parser)
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = int(float(args.sys_clk_freq)),
device = args.device,
with_ethernet = args.with_ethernet,
with_etherbone = args.with_etherbone,
eth_ip = args.eth_ip,
eth_phy = args.eth_phy,
toolchain = args.toolchain,
**soc_sdram_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder_kargs = trellis_argdict(args) if args.toolchain == "trellis" else {}
builder.build(**builder_kargs, run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".svf"))
if __name__ == "__main__":
main()
| 44
| 174
| 0.552061
|
7954d73f446c343c4c0fbf1e55bf7095f3d52925
| 5,588
|
py
|
Python
|
ALGO_BreadthFirstSearch.py
|
divergent63/DataStructureAlgorithms
|
1e378a31beb6ac1b333b560f0bfe2a3daeef7be2
|
[
"Apache-2.0"
] | null | null | null |
ALGO_BreadthFirstSearch.py
|
divergent63/DataStructureAlgorithms
|
1e378a31beb6ac1b333b560f0bfe2a3daeef7be2
|
[
"Apache-2.0"
] | null | null | null |
ALGO_BreadthFirstSearch.py
|
divergent63/DataStructureAlgorithms
|
1e378a31beb6ac1b333b560f0bfe2a3daeef7be2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
import math
import DST_Graph
import DST_Queue
# 词梯问题
def BuildWordGraph(file):
with open(file) as f:
word_data = f.readlines()
buckets = {}
for line in word_data:
for i in range(len(line)-1):
bucket = line[:i] + '_' + line[i+1:]
if bucket not in buckets.keys():
buckets[bucket] = []
buckets[bucket].append(line[:-1])
else:
buckets[bucket].append(line[:-1])
word_graph = DST_Graph.Graph()
for word_key in buckets.keys():
for word1 in buckets[word_key]:
if word1 not in word_graph:
word_graph.AddNode(word1)
for word2 in buckets[word_key]:
if word2 not in word_graph:
word_graph.AddNode(word2)
if word1 != word2:
word_graph.AddEdge(word1, word2, weight=None)
return word_graph
def BreadthFirstSearch(word_graph, word_item):
# TODO: Wrong Answer
q = DST_Queue.queue_test()
# for idx, word_item in enumerate(word_graph):
q.push(word_item)
word_item.State = 0
word_item.Dis = 0
word_item.Pre = None
# SetState(word_item, 0)
# SetDistance(word_item, 0)
# SetPre(word_item, None)
while q.__sizeof__() > 0:
word_item.State = 1
next_node_all = q.pop().GetAdjIDs()
for qnext_item in list(next_node_all): # 获取所有邻节点
word_graph.GetNode(qnext_item).State = 0
# SetState(qnext_item, 0)
for qnext_item in list(next_node_all): # 获取所有邻节点
if word_graph.GetNode(qnext_item).State == 0: # 邻节点未探索
q.push(word_graph.GetNode(qnext_item))
word_graph.GetNode(qnext_item).State = 1
word_graph.GetNode(qnext_item).Dis += 1
word_graph.GetNode(qnext_item).Pre = word_item
# SetState(qnext_item, 1)
# SetDistance(qnext_item, idx+1) # 距离加1
# SetPre(qnext_item, word_item) # 设置qnext_item的前驱节点为word_item
word_item = word_graph.GetNode(qnext_item)
word_item.State = 2
return word_graph
def TransversePath(node):
path_lst = [node.GetVID()]
while node.Pre is not None:
path_lst.append(node.Pre.GetVID())
node = node.Pre
return path_lst
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# 返回二维列表[[1,2],[4,5]]
def _DropNone(self, lst):
StayIdx = []
for i in range(len(lst)):
if lst[i] is not None:
StayIdx.append(i)
return [lst[StayIdx[i]] for i in range(len(StayIdx))]
def Print(self, pRoot):
# write code here
if pRoot is None:
return []
CurrentNode = pRoot
CurrentNodeIniDepth = [pRoot]
PrintLst = [CurrentNode.val]
results = []
NextDepthLst = []
# PrintLst.append([CurrentNode.key, CurrentNode.val])
while True:
for i in range(len(CurrentNodeIniDepth)):
if CurrentNodeIniDepth.count(None) == len(CurrentNodeIniDepth):
PrintLstWihtLines = []
for i in range(int(math.log2(len(PrintLst)))):
PrintLstInLine = self._DropNone(PrintLst[int(2**(i)-1):int(2**(i+1))-1])
PrintLstWihtLines.append(PrintLstInLine)
print(PrintLstWihtLines)
return PrintLstWihtLines
if CurrentNodeIniDepth[i]:
PrintLst.append(CurrentNodeIniDepth[i].left.val) if CurrentNodeIniDepth[
i].left is not None else PrintLst.append(
None)
PrintLst.append(CurrentNodeIniDepth[i].right.val) if CurrentNodeIniDepth[
i].right is not None else PrintLst.append(
None)
else:
PrintLst.append(None)
PrintLst.append(None)
# results.append(PrintLst)
for i in range(len(CurrentNodeIniDepth)):
if CurrentNodeIniDepth[i]:
NextDepthLst.append(CurrentNodeIniDepth[i].left) if CurrentNodeIniDepth[
i].left is not None else NextDepthLst.append(
None)
NextDepthLst.append(CurrentNodeIniDepth[i].right) if CurrentNodeIniDepth[
i].right is not None else NextDepthLst.append(
None)
else:
NextDepthLst.append(None)
NextDepthLst.append(None)
CurrentNodeIniDepth = NextDepthLst
NextDepthLst = []
if __name__ == '__main__':
# word_graph = BuildWordGraph('./datasets/fourletterwords.txt')
# BreadthFirstSearch(word_graph, word_graph.GetNode('ABOS'))
# print(TransversePath(word_graph.GetNode('ACID')))
# {8,6,10,5,7,9,11}
pRoot = TreeNode(8)
pRoot.left = TreeNode(6)
pRoot.right = TreeNode(10)
pRoot.left.left = TreeNode(5)
pRoot.left.right = TreeNode(7)
pRoot.right.left = TreeNode(9)
pRoot.right.right = TreeNode(11)
s = Solution()
s.Print(pRoot)
print()
| 34.073171
| 123
| 0.535612
|
a3f7a10ac372d192bc4af3cde260133fcf26f733
| 3,715
|
py
|
Python
|
webdriver/tests/print/user_prompts.py
|
spao234/wpt
|
4b9447991bcb28f37b45532caf7f8e8747f9ad41
|
[
"BSD-3-Clause"
] | 1
|
2021-12-19T09:30:55.000Z
|
2021-12-19T09:30:55.000Z
|
webdriver/tests/print/user_prompts.py
|
spao234/wpt
|
4b9447991bcb28f37b45532caf7f8e8747f9ad41
|
[
"BSD-3-Clause"
] | 6
|
2021-03-31T20:00:14.000Z
|
2022-03-12T00:50:17.000Z
|
webdriver/tests/print/user_prompts.py
|
spao234/wpt
|
4b9447991bcb28f37b45532caf7f8e8747f9ad41
|
[
"BSD-3-Clause"
] | 1
|
2020-11-09T05:05:06.000Z
|
2020-11-09T05:05:06.000Z
|
# META: timeout=long
import base64
import pytest
from six import ensure_binary
from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
from tests.support.inline import inline
from .printcmd import do_print, assert_pdf
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog):
def check_user_prompt_closed_without_exception(dialog_type, retval):
session.url = inline("<input/>")
create_dialog(dialog_type, text=dialog_type)
response = do_print(session, {})
value = assert_success(response)
pdf = base64.decodestring(ensure_binary(value))
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert_pdf(pdf)
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog):
def check_user_prompt_closed_with_exception(dialog_type, retval):
session.url = inline("<input/>")
create_dialog(dialog_type, text=dialog_type)
response = do_print(session, {})
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog):
def check_user_prompt_not_closed_but_exception(dialog_type):
session.url = inline("<input/>")
create_dialog(dialog_type, text=dialog_type)
response = do_print(session, {})
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| 32.304348
| 90
| 0.756662
|
21035f14cc1c5d4a4de4b46b58c5d88b1cf13a2c
| 3,826
|
py
|
Python
|
1dim_gaussian/model.py
|
MasazI/gan_basic
|
37e23e1799616bafa18527aeffc1d3c8e7c5f2ef
|
[
"MIT"
] | 7
|
2017-04-03T14:08:52.000Z
|
2021-03-15T02:55:04.000Z
|
1dim_gaussian/model.py
|
MasazI/gan_basic
|
37e23e1799616bafa18527aeffc1d3c8e7c5f2ef
|
[
"MIT"
] | 2
|
2017-06-06T21:31:20.000Z
|
2017-10-15T07:40:18.000Z
|
1dim_gaussian/model.py
|
MasazI/gan_basic
|
37e23e1799616bafa18527aeffc1d3c8e7c5f2ef
|
[
"MIT"
] | 5
|
2017-10-15T09:27:53.000Z
|
2021-02-26T06:53:42.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import model_part
class Discriminator():
def __init__(self, hidden_layer_dim, output_dim):
self.hidden_layer_dim = hidden_layer_dim
self.hidden_layer_dim2 = hidden_layer_dim - 1
self.output_dim = output_dim
def tiny_mlp(self, x):
fc1, w1, b1 = model_part.fc('fc1', x, [x.get_shape()[1], self.hidden_layer_dim], [self.hidden_layer_dim])
logits, w2, b2 = model_part.fc('fc2', fc1, [self.hidden_layer_dim, self.output_dim], [self.output_dim])
return logits, [w1, b1, w2, b2]
def mlp(self, x, reuse=False):
fc1, w1, b1 = model_part.fc('fc1', x, [x.get_shape()[1], self.hidden_layer_dim], [self.hidden_layer_dim], reuse=reuse)
fc2, w2, b2 = model_part.fc('fc2', fc1, [self.hidden_layer_dim, self.hidden_layer_dim2], [self.hidden_layer_dim2], reuse=reuse)
logits, w3, b3 = model_part.fc('fc3', fc2, [self.hidden_layer_dim2, self.output_dim], [self.output_dim], reuse=reuse)
return logits, [w1, b1, w2, b2, w3, b3]
def mlp_org(self, x):
# construct learnable parameters within local scope
w1 = tf.get_variable("w0", [x.get_shape()[1], 6], initializer=tf.random_normal_initializer())
b1 = tf.get_variable("b0", [6], initializer=tf.constant_initializer(0.0))
w2 = tf.get_variable("w1", [6, 5], initializer=tf.random_normal_initializer())
b2 = tf.get_variable("b1", [5], initializer=tf.constant_initializer(0.0))
w3 = tf.get_variable("w2", [5, self.output_dim], initializer=tf.random_normal_initializer())
b3 = tf.get_variable("b2", [self.output_dim], initializer=tf.constant_initializer(0.0))
# nn operators
fc1 = tf.nn.tanh(tf.matmul(x, w1) + b1)
fc2 = tf.nn.tanh(tf.matmul(fc1, w2) + b2)
fc3 = tf.nn.tanh(tf.matmul(fc2, w3) + b3)
return fc3, [w1, b1, w2, b2, w3, b3]
class Generator():
def __init__(self, hidden_layer_dim, output_dim):
self.hidden_layer_dim = hidden_layer_dim
self.hidden_layer_dim2 = hidden_layer_dim - 1
self.output_dim = output_dim
def tiny_mlp(self, x):
fc1, w1, b1 = model_part.fc('fc1', x, [x.get_shape()[1], self.hidden_layer_dim], [self.hidden_layer_dim])
logits, w2, b2 = model_part.fc('fc2', fc1, [self.hidden_layer_dim, self.output_dim], [self.output_dim])
return logits, [w1, b1, w2, b2]
def mlp(self, x, reuse=False):
fc1, w1, b1 = model_part.fc('fc1', x, [x.get_shape()[1], self.hidden_layer_dim], [self.hidden_layer_dim], reuse=reuse)
fc2, w2, b2 = model_part.fc('fc2', fc1, [self.hidden_layer_dim, self.hidden_layer_dim2], [self.hidden_layer_dim2], reuse=reuse)
logits, w3, b3 = model_part.fc('fc3', fc2, [self.hidden_layer_dim2, self.output_dim], [self.output_dim], reuse=reuse)
return logits, [w1, b1, w2, b2, w3, b3]
def mlp_org(self, x):
# construct learnable parameters within local scope
w1 = tf.get_variable("w0", [x.get_shape()[1], 6], initializer=tf.random_normal_initializer())
b1 = tf.get_variable("b0", [6], initializer=tf.constant_initializer(0.0))
w2 = tf.get_variable("w1", [6, 5], initializer=tf.random_normal_initializer())
b2 = tf.get_variable("b1", [5], initializer=tf.constant_initializer(0.0))
w3 = tf.get_variable("w2", [5, self.output_dim], initializer=tf.random_normal_initializer())
b3 = tf.get_variable("b2", [self.output_dim], initializer=tf.constant_initializer(0.0))
# nn operators
fc1 = tf.nn.tanh(tf.matmul(x, w1) + b1)
fc2 = tf.nn.tanh(tf.matmul(fc1, w2) + b2)
fc3 = tf.nn.tanh(tf.matmul(fc2, w3) + b3)
return fc3, [w1, b1, w2, b2, w3, b3]
| 53.887324
| 135
| 0.65839
|
11cdf217f5c9984858725334cd8aae25bbfe199b
| 341
|
py
|
Python
|
env/lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/features.py
|
diego-d5000/MisValesMd
|
b641782bc2546776e9f55f452ec7fb48100dc482
|
[
"MIT"
] | null | null | null |
env/lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/features.py
|
diego-d5000/MisValesMd
|
b641782bc2546776e9f55f452ec7fb48100dc482
|
[
"MIT"
] | null | null | null |
env/lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/features.py
|
diego-d5000/MisValesMd
|
b641782bc2546776e9f55f452ec7fb48100dc482
|
[
"MIT"
] | null | null | null |
from django.contrib.gis.db.backends.base.features import BaseSpatialFeatures
from django.db.backends.postgresql_psycopg2.features import \
DatabaseFeatures as Psycopg2DatabaseFeatures
class DatabaseFeatures(BaseSpatialFeatures, Psycopg2DatabaseFeatures):
supports_3d_functions = True
supports_left_right_lookups = True
| 37.888889
| 77
| 0.826979
|
1be32bf821512c9f06894132200385e966b30103
| 4,621
|
py
|
Python
|
PythonClient/computer_vision/qzc_cv_capture.py
|
aqiugroup/AirSim
|
b8ea885963d38f15a3176f9f0b4a4473ba21c4a6
|
[
"MIT"
] | null | null | null |
PythonClient/computer_vision/qzc_cv_capture.py
|
aqiugroup/AirSim
|
b8ea885963d38f15a3176f9f0b4a4473ba21c4a6
|
[
"MIT"
] | null | null | null |
PythonClient/computer_vision/qzc_cv_capture.py
|
aqiugroup/AirSim
|
b8ea885963d38f15a3176f9f0b4a4473ba21c4a6
|
[
"MIT"
] | null | null | null |
# In settings.json first activate computer vision mode:
# https://github.com/Microsoft/AirSim/blob/master/docs/image_apis.md#computer-vision-mode
import setup_path
import airsim
import pprint
import tempfile
import os
import sys
import time
file_path = "/Users/aqiu/Documents/AirSim/2022-03-07-02-02-06/airsim_rec.txt" # sys.argv[1]
cur_dir = file_path[:file_path.rfind(os.path.sep)] + os.path.sep
print(cur_dir)
pp = pprint.PrettyPrinter(indent=4)
client = airsim.VehicleClient()
client.confirmConnection()
# airsim.wait_key('Press any key to get camera parameters')
for camera_id in range(2):
camera_info = client.simGetCameraInfo(str(camera_id))
print("CameraInfo %d: %s" % (camera_id, pp.pprint(camera_info)))
tmp_dir = os.path.join(cur_dir, "airsim_drone")
print ("Saving images to %s" % tmp_dir)
try:
for n in range(3):
os.makedirs(os.path.join(tmp_dir, str(n)))
except OSError:
if not os.path.isdir(tmp_dir):
raise
fin = open(file_path, "r")
line = fin.readline().strip()
line = fin.readline().strip()
while (line):
parts = line.split("\t")
timestamp = parts[1] # ms
# timestamp = float(parts[1]) / 1000.0 # s
pos_x = float(parts[2])
pos_y =float( parts[3])
pos_z = float(parts[4])
quat_w = float(parts[5])
quat_x = float(parts[6])
quat_y = float(parts[7])
quat_z = float(parts[8])
client.simSetVehiclePose(airsim.Pose(airsim.Vector3r(pos_x, pos_y, pos_z), airsim.Quaternionr(quat_x,quat_y,quat_z,quat_w)), True)
time.sleep(0.1)
responses = client.simGetImages([
airsim.ImageRequest("0", airsim.ImageType.Scene),
# airsim.ImageRequest("0", airsim.ImageType.DepthPerspective, True, False),
airsim.ImageRequest("0", airsim.ImageType.DepthVis, True, False),
airsim.ImageRequest("0", airsim.ImageType.Segmentation)])
for i, response in enumerate(responses):
if response.pixels_as_float:
print("Type %d, size %d, pos %s" % (response.image_type, len(response.image_data_float), pprint.pformat(response.camera_position)))
airsim.write_pfm(os.path.normpath(os.path.join(tmp_dir, str(i), str(timestamp) + "_" + str(i) + '.pfm')), airsim.get_pfm_array(response))
else:
print("Type %d, size %d, pos %s" % (response.image_type, len(response.image_data_uint8), pprint.pformat(response.camera_position)))
airsim.write_file(os.path.normpath(os.path.join(tmp_dir, str(i), str(timestamp) + "_" + str(i) + '.png')), response.image_data_uint8)
pose = client.simGetVehiclePose()
pp.pprint(pose)
time.sleep(3)
line = fin.readline().strip()
# pp = pprint.PrettyPrinter(indent=4)
# client = airsim.VehicleClient()
# airsim.wait_key('Press any key to get camera parameters')
# for camera_id in range(2):
# for camera_id in range(2):
# camera_info = client.simGetCameraInfo(str(camera_id))
# print("CameraInfo %d: %s" % (camera_id, pp.pprint(camera_info)))
# airsim.wait_key('Press any key to get images')
# tmp_dir = os.path.join(tempfile.gettempdir(), "airsim_drone")
# print ("Saving images to %s" % tmp_dir)
# try:
# for n in range(3):
# os.makedirs(os.path.join(tmp_dir, str(n)))
# except OSError:
# if not os.path.isdir(tmp_dir):
# raise
# for x in range(50): # do few times
# #xn = 1 + x*5 # some random number
# client.simSetVehiclePose(airsim.Pose(airsim.Vector3r(x, 0, -2), airsim.to_quaternion(0, 0, 0)), True)
# time.sleep(0.1)
# responses = client.simGetImages([
# airsim.ImageRequest("0", airsim.ImageType.Scene),
# airsim.ImageRequest("1", airsim.ImageType.Scene),
# airsim.ImageRequest("2", airsim.ImageType.Scene)])
# for i, response in enumerate(responses):
# if response.pixels_as_float:
# print("Type %d, size %d, pos %s" % (response.image_type, len(response.image_data_float), pprint.pformat(response.camera_position)))
# airsim.write_pfm(os.path.normpath(os.path.join(tmp_dir, str(x) + "_" + str(i) + '.pfm')), airsim.get_pfm_array(response))
# else:
# print("Type %d, size %d, pos %s" % (response.image_type, len(response.image_data_uint8), pprint.pformat(response.camera_position)))
# airsim.write_file(os.path.normpath(os.path.join(tmp_dir, str(i), str(x) + "_" + str(i) + '.png')), response.image_data_uint8)
# pose = client.simGetVehiclePose()
# pp.pprint(pose)
# time.sleep(3)
# currently reset() doesn't work in CV mode. Below is the workaround
client.simSetVehiclePose(airsim.Pose(airsim.Vector3r(0, 0, 0), airsim.to_quaternion(0, 0, 0)), True)
| 37.877049
| 150
| 0.675611
|
e85c6e06858444be1865a9e74a2be9c94a7e00f2
| 46
|
py
|
Python
|
salon/api/serializers/__init__.py
|
amirRamirfatahi/beautstertest
|
53f6e515903fd11992fd1eb70760318b17896437
|
[
"MIT"
] | null | null | null |
salon/api/serializers/__init__.py
|
amirRamirfatahi/beautstertest
|
53f6e515903fd11992fd1eb70760318b17896437
|
[
"MIT"
] | 5
|
2021-03-30T13:08:19.000Z
|
2021-09-22T18:54:13.000Z
|
salon/api/serializers/__init__.py
|
amirRamirfatahi/beautstertest
|
53f6e515903fd11992fd1eb70760318b17896437
|
[
"MIT"
] | null | null | null |
from .salon_serializer import SalonSerializer
| 23
| 45
| 0.891304
|
6f62a23b1d438b27fecc10ff692c7af3351ff007
| 7,019
|
py
|
Python
|
plaid/model/credit_filter.py
|
hboshnak/plaid-python
|
69f0879b01ce7119d220a2a2b6e2f48570df609f
|
[
"MIT"
] | null | null | null |
plaid/model/credit_filter.py
|
hboshnak/plaid-python
|
69f0879b01ce7119d220a2a2b6e2f48570df609f
|
[
"MIT"
] | null | null | null |
plaid/model/credit_filter.py
|
hboshnak/plaid-python
|
69f0879b01ce7119d220a2a2b6e2f48570df609f
|
[
"MIT"
] | null | null | null |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.credit_account_subtypes import CreditAccountSubtypes
globals()['CreditAccountSubtypes'] = CreditAccountSubtypes
class CreditFilter(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'account_subtypes': (CreditAccountSubtypes,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'account_subtypes': 'account_subtypes', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, account_subtypes, *args, **kwargs): # noqa: E501
"""CreditFilter - a model defined in OpenAPI
Args:
account_subtypes (CreditAccountSubtypes):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.account_subtypes = account_subtypes
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.994444
| 110
| 0.591395
|
fe7e6dd9aa21ae646052be9ca790a47424be55ec
| 3,973
|
py
|
Python
|
alipay/aop/api/request/AlipayOpenIotmbsRoomstateSyncRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/AlipayOpenIotmbsRoomstateSyncRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/AlipayOpenIotmbsRoomstateSyncRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOpenIotmbsRoomstateSyncModel import AlipayOpenIotmbsRoomstateSyncModel
class AlipayOpenIotmbsRoomstateSyncRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOpenIotmbsRoomstateSyncModel):
self._biz_content = value
else:
self._biz_content = AlipayOpenIotmbsRoomstateSyncModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.iotmbs.roomstate.sync'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.4
| 148
| 0.644853
|
d1d221f085d4138ecedd2364200f61ba4713d47a
| 2,685
|
py
|
Python
|
docker_hoernchen/flask/gsheets.py
|
sroertgen/oerhoernchen20_docker
|
b7cd697b62f2865b24879b2ccbe8dcd65c512fb6
|
[
"CC0-1.0"
] | 2
|
2019-12-14T16:33:41.000Z
|
2020-01-14T09:34:58.000Z
|
docker_hoernchen/flask/gsheets.py
|
sroertgen/oerhoernchen20_docker
|
b7cd697b62f2865b24879b2ccbe8dcd65c512fb6
|
[
"CC0-1.0"
] | 6
|
2020-02-27T11:22:53.000Z
|
2022-02-13T02:27:14.000Z
|
docker_hoernchen/flask/gsheets.py
|
sroertgen/oerhoernchen20_docker
|
b7cd697b62f2865b24879b2ccbe8dcd65c512fb6
|
[
"CC0-1.0"
] | 2
|
2020-03-25T10:37:51.000Z
|
2020-04-22T09:41:59.000Z
|
from flask_restful import Resource, reqparse
import uuid
import requests
import json
import pprint
from elasticsearch import Elasticsearch
# Link to sheet: https://spreadsheets.google.com/feeds/list/1kntJWO9iP6rL6WFqKXNsINoa923LjoDfEz38_NA4-ao/od6/public/values?alt=json
# Setup gsheet link
gsheet_link = 'https://spreadsheets.google.com/feeds/list/1kntJWO9iP6rL6WFqKXNsINoa923LjoDfEz38_NA4-ao/od6/public/values?alt=json'
# setup elasticsearch
es = Elasticsearch(
['http://elasticsearch:9200'],
http_auth=('elastic', 'changethisinproduction'),
scheme="http",
port=80)
class Gsheet(Resource):
@classmethod
def get_gsheet(cls):
r = requests.get(gsheet_link)
return r.json()
@classmethod
def post_data_to_es(cls):
new_entries = []
data = cls.get_gsheet()
for item in data['feed']['entry']:
pprint.pprint(item)
entry = {}
entry['id'] = uuid.uuid5(uuid.NAMESPACE_DNS, item['gsx$url']['$t'])
entry['name'] = item['gsx$titel']['$t']
entry['about'] = item['gsx$beschreibung']['$t']
entry["author"] = ""
entry["publisher"] = ""
entry['inLanguage'] = item['gsx$sprache']['$t'].split(', ')
entry["accessibilityAPI"] = ""
entry["accessibilityControl"] = ""
entry["accessibilityFeature"] = ""
entry["accessibilityHazard"] = ""
entry['license'] = item['gsx$lizenz-urloptional']['$t']
entry["timeRequired"] = ""
entry["educationalRole"] = ""
entry["alignmentType"] = ""
entry['educationalFramework'] = item['gsx$fachgebiet']['$t'].split(', ')
entry["targetDescription"] = ""
entry["targetName"] = ""
entry["targetURL"] = ""
entry["educationalUse"] = ""
entry["typicalAgeRange"] = ""
entry["interactivityType"] = ""
entry['learningResourceType'] = item['gsx$art']['$t'].split(', ')
entry['date_published'] = item['gsx$jahroptional']['$t']
entry['url'] = item['gsx$url']['$t']
entry["thumbnail"] = ""
entry["tags"] = ""
entry["project"] = ""
entry["source"] = "GSheets"
entry["spider"] = ""
entry["date_scraped"] = ""
entry['tags'] = item['gsx$tags']['$t']
new_entries.append(entry)
# TODO improve deletion
es.indices.delete(index="gsheets", ignore_unavailable="true")
for item in new_entries:
res = es.index(index="gsheets", id=item['id'], body=item)
print(res)
def get(self):
try:
self.post_data_to_es()
return {'message': 'updated gsheets entries in es index'}
except:
return {'message': 'there was an error!'}, 500
| 32.743902
| 131
| 0.603724
|
ea2aeacc64707fc64db8e483b4cc667b5b213b8b
| 469
|
py
|
Python
|
SmartBadge/timedata.py
|
SmartBadge/SmartBadge
|
7bddc1ec230bcf5fa6185999b0b0c0e448528629
|
[
"MIT"
] | null | null | null |
SmartBadge/timedata.py
|
SmartBadge/SmartBadge
|
7bddc1ec230bcf5fa6185999b0b0c0e448528629
|
[
"MIT"
] | null | null | null |
SmartBadge/timedata.py
|
SmartBadge/SmartBadge
|
7bddc1ec230bcf5fa6185999b0b0c0e448528629
|
[
"MIT"
] | null | null | null |
import ujson
class TimeData(object):
def __init__(self, filename, subject, slot):
f = open(filename)
timedata = ujson.load(f)
self.title = timedata[subject]['class' + str(slot)]['title']
self.time = timedata[subject]['class' + str(slot)]['time']
self.location = timedata[subject]['class' + str(slot)]['location']
def get_time_data(self):
return "{} \"{}\" {}".format(self.title, self.time, self.location)
| 24.684211
| 74
| 0.603412
|
82e6c1b6afa5608d7f213c902116afec72f6181e
| 3,417
|
py
|
Python
|
homeassistant/components/mobile_app/helpers.py
|
zemerick1/home-assistant-1
|
2d2abc783151ea7a876c6ffd0cbc9e5062834c9a
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/mobile_app/helpers.py
|
zemerick1/home-assistant-1
|
2d2abc783151ea7a876c6ffd0cbc9e5062834c9a
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/mobile_app/helpers.py
|
zemerick1/home-assistant-1
|
2d2abc783151ea7a876c6ffd0cbc9e5062834c9a
|
[
"Apache-2.0"
] | null | null | null |
"""Helpers for mobile_app."""
import logging
import json
from typing import Callable, Dict, Tuple
from aiohttp.web import Response
from homeassistant.core import Context
from homeassistant.helpers.typing import HomeAssistantType
from .const import (ATTR_APP_DATA, ATTR_APP_ID, ATTR_APP_NAME,
ATTR_APP_VERSION, DATA_DELETED_IDS, ATTR_DEVICE_NAME,
ATTR_MANUFACTURER, ATTR_MODEL, ATTR_OS_VERSION,
DATA_REGISTRATIONS, ATTR_SUPPORTS_ENCRYPTION,
CONF_USER_ID, DOMAIN)
_LOGGER = logging.getLogger(__name__)
def get_cipher() -> Tuple[int, Callable]:
"""Return decryption function and length of key.
Async friendly.
"""
from nacl.secret import SecretBox
from nacl.encoding import Base64Encoder
def decrypt(ciphertext, key):
"""Decrypt ciphertext using key."""
return SecretBox(key).decrypt(ciphertext, encoder=Base64Encoder)
return (SecretBox.KEY_SIZE, decrypt)
def _decrypt_payload(key: str, ciphertext: str) -> Dict[str, str]:
"""Decrypt encrypted payload."""
try:
keylen, decrypt = get_cipher()
except OSError:
_LOGGER.warning(
"Ignoring encrypted payload because libsodium not installed")
return None
if key is None:
_LOGGER.warning(
"Ignoring encrypted payload because no decryption key known")
return None
key = key.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b'\0')
try:
message = decrypt(ciphertext, key)
message = json.loads(message.decode("utf-8"))
_LOGGER.debug("Successfully decrypted mobile_app payload")
return message
except ValueError:
_LOGGER.warning("Ignoring encrypted payload because unable to decrypt")
return None
def registration_context(registration: Dict) -> Context:
"""Generate a context from a request."""
return Context(user_id=registration[CONF_USER_ID])
def empty_okay_response(headers: Dict = None, status: int = 200) -> Response:
"""Return a Response with empty JSON object and a 200."""
return Response(body='{}', status=status, content_type='application/json',
headers=headers)
def supports_encryption() -> bool:
"""Test if we support encryption."""
try:
import nacl # noqa pylint: disable=unused-import
return True
except OSError:
return False
def safe_registration(registration: Dict) -> Dict:
"""Return a registration without sensitive values."""
# Sensitive values: webhook_id, secret, cloudhook_url
return {
ATTR_APP_DATA: registration[ATTR_APP_DATA],
ATTR_APP_ID: registration[ATTR_APP_ID],
ATTR_APP_NAME: registration[ATTR_APP_NAME],
ATTR_APP_VERSION: registration[ATTR_APP_VERSION],
ATTR_DEVICE_NAME: registration[ATTR_DEVICE_NAME],
ATTR_MANUFACTURER: registration[ATTR_MANUFACTURER],
ATTR_MODEL: registration[ATTR_MODEL],
ATTR_OS_VERSION: registration[ATTR_OS_VERSION],
ATTR_SUPPORTS_ENCRYPTION: registration[ATTR_SUPPORTS_ENCRYPTION],
}
def savable_state(hass: HomeAssistantType) -> Dict:
"""Return a clean object containing things that should be saved."""
return {
DATA_DELETED_IDS: hass.data[DOMAIN][DATA_DELETED_IDS],
DATA_REGISTRATIONS: hass.data[DOMAIN][DATA_REGISTRATIONS]
}
| 32.855769
| 79
| 0.687445
|
bccafb05c62b2b658ee4691d6bdaa94bee99acd4
| 4,118
|
py
|
Python
|
lib/python-bitcoinlib/bitcoin/tests/test_transactions.py
|
petertodd/tx-flood-attack
|
772ec12cb503e721c29a24bc78c9cd64f843d015
|
[
"MIT"
] | 3
|
2016-07-29T02:13:51.000Z
|
2018-06-05T23:12:47.000Z
|
lib/python-bitcoinlib/bitcoin/tests/test_transactions.py
|
ezaruba/tx-flood-attack
|
772ec12cb503e721c29a24bc78c9cd64f843d015
|
[
"MIT"
] | null | null | null |
lib/python-bitcoinlib/bitcoin/tests/test_transactions.py
|
ezaruba/tx-flood-attack
|
772ec12cb503e721c29a24bc78c9cd64f843d015
|
[
"MIT"
] | 2
|
2015-01-17T22:54:44.000Z
|
2019-01-19T14:39:15.000Z
|
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import unittest
import os
from bitcoin.core import COutPoint, CTxIn, CTxOut, CTransaction, CheckTransaction, CheckTransactionError, lx, x, b2x, ValidationError
from bitcoin.core.scripteval import VerifyScript, SCRIPT_VERIFY_P2SH
from bitcoin.tests.test_scripteval import parse_script
def load_test_vectors(name):
with open(os.path.dirname(__file__) + '/data/' + name, 'r') as fd:
for test_case in json.load(fd):
# Comments designated by single length strings
if len(test_case) == 1:
continue
assert len(test_case) == 3
prevouts = {}
for json_prevout in test_case[0]:
assert len(json_prevout) == 3
n = json_prevout[1]
if n == -1:
n = 0xffffffff
prevout = COutPoint(lx(json_prevout[0]), n)
prevouts[prevout] = parse_script(json_prevout[2])
tx = CTransaction.deserialize(x(test_case[1]))
enforceP2SH = test_case[2]
yield (prevouts, tx, enforceP2SH)
class Test_COutPoint(unittest.TestCase):
def test_is_null(self):
self.assertTrue(COutPoint().is_null())
self.assertTrue(COutPoint(hash=b'\x00'*32,n=0xffffffff).is_null())
self.assertFalse(COutPoint(hash=b'\x00'*31 + b'\x01').is_null())
self.assertFalse(COutPoint(n=1).is_null())
def test_repr(self):
def T(outpoint, expected):
actual = repr(outpoint)
self.assertEqual(actual, expected)
T( COutPoint(),
'COutPoint()')
T( COutPoint(lx('4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b'), 0),
"COutPoint(lx('4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b'), 0)")
class Test_CTxIn(unittest.TestCase):
def test_is_final(self):
self.assertTrue(CTxIn().is_final())
self.assertTrue(CTxIn(nSequence=0xffffffff).is_final())
self.assertFalse(CTxIn(nSequence=0).is_final())
def test_repr(self):
def T(txin, expected):
actual = repr(txin)
self.assertEqual(actual, expected)
T( CTxIn(),
'CTxIn(COutPoint(), CScript([]), 0xffffffff)')
class Test_CTransaction(unittest.TestCase):
def test_is_coinbase(self):
tx = CTransaction()
self.assertFalse(tx.is_coinbase())
tx.vin.append(CTxIn())
# IsCoinBase() in reference client doesn't check if vout is empty
self.assertTrue(tx.is_coinbase())
tx.vin[0].prevout.n = 0
self.assertFalse(tx.is_coinbase())
tx.vin[0] = CTxIn()
tx.vin.append(CTxIn())
self.assertFalse(tx.is_coinbase())
def test_tx_valid(self):
for prevouts, tx, enforceP2SH in load_test_vectors('tx_valid.json'):
try:
CheckTransaction(tx)
except CheckTransactionError:
self.fail('tx failed CheckTransaction(): ' \
+ str((prevouts, b2x(tx.serialize()), enforceP2SH)))
continue
for i in range(len(tx.vin)):
flags = set()
if enforceP2SH:
flags.add(SCRIPT_VERIFY_P2SH)
VerifyScript(tx.vin[i].scriptSig, prevouts[tx.vin[i].prevout], tx, i, flags=flags)
def test_tx_invalid(self):
for prevouts, tx, enforceP2SH in load_test_vectors('tx_invalid.json'):
try:
CheckTransaction(tx)
except CheckTransactionError:
continue
with self.assertRaises(ValidationError):
for i in range(len(tx.vin)):
flags = set()
if enforceP2SH:
flags.add(SCRIPT_VERIFY_P2SH)
VerifyScript(tx.vin[i].scriptSig, prevouts[tx.vin[i].prevout], tx, i, flags=flags)
| 36.122807
| 133
| 0.609519
|
5207d092a56aa016945dd43165d33f64156c0d7a
| 2,419
|
py
|
Python
|
tests/test_quantum_volume.py
|
msohaibalam/forest-benchmarking
|
40f5fd5235803204b34fa8ba1ced4ef2e0f3098d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_quantum_volume.py
|
msohaibalam/forest-benchmarking
|
40f5fd5235803204b34fa8ba1ced4ef2e0f3098d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_quantum_volume.py
|
msohaibalam/forest-benchmarking
|
40f5fd5235803204b34fa8ba1ced4ef2e0f3098d
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import warnings
from forest_benchmarking.quantum_volume import *
np.random.seed(1)
def test_ideal_sim_heavy_probs(qvm):
qvm.qam.random_seed = 1
depths = [2, 3]
# silence warning from too few circuits, since 100 circuits is too slow
with warnings.catch_warnings():
warnings.simplefilter("ignore")
outcomes = measure_quantum_volume(qvm, num_circuits=80, num_shots=50, qubits=[0, 1, 2])
assert extract_quantum_volume_from_results(outcomes) == 8
target_probs = [0.788765, 0.852895]
probs = [outcomes[depth][0] for depth in depths]
np.testing.assert_allclose(probs, target_probs, atol=.02)
def test_qv_df_generation():
depths = [2, 3]
n_ckts = 100
df = generate_quantum_volume_experiments(depths, n_ckts)
df_depths = df["Depth"].values
ckts = df["Abstract Ckt"].values
assert len(df_depths) == len(depths)*n_ckts
assert all([len(ckt[0]) == depth for ckt, depth in zip(ckts, df_depths)])
assert all([len(ckt[0][0]) == depth for ckt, depth in zip(ckts, df_depths)])
assert all([ckt[1].shape == (depth, depth//2, 4, 4) for ckt, depth in zip(ckts, df_depths)])
def test_qv_data_acquisition(qvm):
depths = [2, 3]
n_ckts = 10
n_shots = 5
df = generate_quantum_volume_experiments(depths, n_ckts)
df = add_programs_to_dataframe(df, qvm)
df = acquire_quantum_volume_data(df, qvm, n_shots)
df_depths = df["Depth"].values
results = df["Results"].values
assert all([res.shape == (n_shots, depth) for res, depth in zip(results, df_depths)])
def test_qv_count_heavy_hitters(qvm):
depths = [2, 3]
n_ckts = 10
n_shots = 5
df = generate_quantum_volume_experiments(depths, n_ckts)
df = add_programs_to_dataframe(df, qvm)
df = acquire_quantum_volume_data(df, qvm, n_shots)
df = acquire_heavy_hitters(df)
num_hhs = df["Num HH Sampled"].values
assert all([0 <= num_hh <= n_shots for num_hh in num_hhs])
def test_qv_get_results_by_depth(qvm):
depths = [2, 3]
n_ckts = 10
n_shots = 5
df = generate_quantum_volume_experiments(depths, n_ckts)
df = add_programs_to_dataframe(df, qvm)
df = acquire_heavy_hitters(df)
df = acquire_quantum_volume_data(df, qvm, n_shots)
results = get_results_by_depth(df)
assert len(results.keys()) == len(depths)
assert [0 <= results[d][1] <= results[d][0] <= 1 for d in depths]
| 29.144578
| 96
| 0.686234
|
0fb70af5fb7994297bbf3711f2fb9f5cd49eb8de
| 2,609
|
py
|
Python
|
src/exhaustive_search/graph.py
|
ciarand/exhausting-search-homework
|
af9404abec70d6d92d69f1eb8237e6c8b74e7a39
|
[
"0BSD"
] | 1
|
2015-04-26T21:21:40.000Z
|
2015-04-26T21:21:40.000Z
|
src/exhaustive_search/graph.py
|
ciarand/exhausting-search-homework
|
af9404abec70d6d92d69f1eb8237e6c8b74e7a39
|
[
"0BSD"
] | null | null | null |
src/exhaustive_search/graph.py
|
ciarand/exhausting-search-homework
|
af9404abec70d6d92d69f1eb8237e6c8b74e7a39
|
[
"0BSD"
] | null | null | null |
""" Graph is provided to add some structure to the algorithms """
import random, math
from .point import Point
class Edge:
""" Edge represents a connection between two Points """
def __init__(self, left, right):
# guards
if not isinstance(left, Point):
raise TypeError("expected Point, got %s" % type(left))
if not isinstance(right, Point):
raise TypeError("expected Point, got %s" % type(right))
if left.x == right.x and left.y == right.y:
raise ValueError("x and y cannot be the same point")
# deterministically order so __eq__ and __hash__ checks are easy.
# Left.x is always >= than right.x
# When left.x == right.x, left.y is always >= right.y
if (left.x < right.x) or (left.x == right.x and left.y < right.y):
left, right = right, left
self.left = left
self.right = right
def weight(self):
""" Calculate the weight for this edge """
return math.sqrt(((self.left.y - self.right.y) ** 2) + ((self.left.x - self.right.x) ** 2))
def connected_to(self, candidate):
""" connected_to determines whether the given Point is a part of this
Edge """
return candidate == self.left or candidate == self.right
def __eq__(self, other):
return self.left == other.left and self.right == other.right
def __hash__(self):
return hash(''.join(str(v) for v in [self.left.x, self.left.y]))
def __str__(self):
return "Edge(left=%s, right=%s)" % (self.left, self.right)
class Graph:
""" Graph represents a collection of edges and points """
def __init__(self, points):
self.edges = set([])
self.points = set([])
for origin in points:
self.points.add(origin)
for dest in points:
if dest != origin:
self.edges.add(Edge(origin, dest))
def num_edges(self):
""" num_edges returns the number of edges in the graph """
return len(self.edges)
def num_points(self):
""" num_points returns the number of points in the graph """
return len(self.points)
def random_edge(self):
""" returns one of the edges in the graph, chosen pseudorandomly """
sample = random.sample(self.edges, 1)
return sample[0] if len(sample) != 0 else None
def random_point(self):
""" returns one of the edges in the graph, chosen pseudorandomly """
sample = random.sample(self.points, 1)
return sample[0] if len(sample) != 0 else None
| 32.6125
| 99
| 0.596014
|
b50d1f6d7ed1e9591d18473b1dfb133be4557120
| 2,521
|
py
|
Python
|
main.py
|
ashishsahu1/Space-Invader
|
be85178840ad103d98c9d56550cdc2a7f0b17b78
|
[
"MIT"
] | 1
|
2021-01-12T20:53:35.000Z
|
2021-01-12T20:53:35.000Z
|
main.py
|
ashishsahu1/Space-Invader
|
be85178840ad103d98c9d56550cdc2a7f0b17b78
|
[
"MIT"
] | null | null | null |
main.py
|
ashishsahu1/Space-Invader
|
be85178840ad103d98c9d56550cdc2a7f0b17b78
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.constants import KEYDOWN
import random
#initialise pygame module
pygame.init()
#creating screen (x,y)
screen = pygame.display.set_mode((800,600))
background = pygame.image.load('img/bg.png')
#title and icon
pygame.display.set_caption("Space Invader")
icon = pygame.image.load('img/icon2.png')
pygame.display.set_icon(icon)
bulletImg = pygame.image.load('img/bullet.png')
bulletX = 0
bulletY = 480
bulletXchange = 0
bulletYchange = 0.05
# global bullet_state
bullet_State = 'ready'
def bullet(x,y):
print("bullet")
screen.blit(bulletImg, (x+16,y+10))
global bullet_state
bullet_state = "fire"
# def fire_bullet(x,y):
# global bullet_state
# bullet_state = 'fire'
# screen.blit(bulletImg, (x+16,y+16))
#enemy
enemyImg = pygame.image.load('img/enemy.png')
enemyX = random.randint(0,800)
enemyY = random.randint(50,150)
enemyXchange = 0.2
enemyYchange = 40
def enemy(enemyX,enemyY):
screen.blit(enemyImg,(enemyX,enemyY))
#player
playerImg = pygame.image.load('img/player.png')
playerX = 370
playerY = 480
playerXchange = 0
# Ychange = 0
def player(playerX,playerY):
screen.blit(playerImg,(playerX,playerY))
# Game loop
running = True
while running:
#screen colour
screen.fill((15, 48, 87))
#background
#screen.blit(background,(0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
#checking keystroke is left or right
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
playerXchange = -0.3
if event.key == pygame.K_RIGHT:
playerXchange = 0.3
if event.key == pygame.K_SPACE:
bullet(playerX,bulletY)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
playerXchange = 0
#player boundary check
playerX += playerXchange
if playerX >= 736:
playerX = 736
elif playerX <=0:
playerX = 0
#enenmy movement
enemyX += enemyXchange
if enemyX >= 736:
enemyXchange = -0.2
enemyY += enemyYchange
elif enemyX <=0:
enemyXchange = 0.2
enemyY += enemyYchange
if bullet_State == "fire":
print("fire")
bullet(playerX,bulletY)
bulletY -= bulletYchange
# playerY += Ychange
player(playerX,playerY)
enemy(enemyX,enemyY)
pygame.display.update()
| 21.921739
| 73
| 0.630305
|
df4f6617ee0fc7bc1cfa3f066889734dfd88b046
| 10,202
|
py
|
Python
|
homeassistant/components/spaceapi/__init__.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
homeassistant/components/spaceapi/__init__.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/spaceapi/__init__.py
|
learn-home-automation/core
|
c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""Support for the SpaceAPI."""
from contextlib import suppress
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_LOCATION,
ATTR_NAME,
ATTR_STATE,
ATTR_UNIT_OF_MEASUREMENT,
CONF_ADDRESS,
CONF_EMAIL,
CONF_ENTITY_ID,
CONF_SENSORS,
CONF_STATE,
CONF_URL,
)
import homeassistant.core as ha
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
ATTR_ADDRESS = "address"
ATTR_SPACEFED = "spacefed"
ATTR_CAM = "cam"
ATTR_STREAM = "stream"
ATTR_FEEDS = "feeds"
ATTR_CACHE = "cache"
ATTR_PROJECTS = "projects"
ATTR_RADIO_SHOW = "radio_show"
ATTR_LAT = "lat"
ATTR_LON = "lon"
ATTR_API = "api"
ATTR_CLOSE = "close"
ATTR_CONTACT = "contact"
ATTR_ISSUE_REPORT_CHANNELS = "issue_report_channels"
ATTR_LASTCHANGE = "lastchange"
ATTR_LOGO = "logo"
ATTR_OPEN = "open"
ATTR_SENSORS = "sensors"
ATTR_SPACE = "space"
ATTR_UNIT = "unit"
ATTR_URL = "url"
ATTR_VALUE = "value"
ATTR_SENSOR_LOCATION = "location"
CONF_CONTACT = "contact"
CONF_HUMIDITY = "humidity"
CONF_ICON_CLOSED = "icon_closed"
CONF_ICON_OPEN = "icon_open"
CONF_ICONS = "icons"
CONF_IRC = "irc"
CONF_ISSUE_REPORT_CHANNELS = "issue_report_channels"
CONF_LOCATION = "location"
CONF_SPACEFED = "spacefed"
CONF_SPACENET = "spacenet"
CONF_SPACESAML = "spacesaml"
CONF_SPACEPHONE = "spacephone"
CONF_CAM = "cam"
CONF_STREAM = "stream"
CONF_M4 = "m4"
CONF_MJPEG = "mjpeg"
CONF_USTREAM = "ustream"
CONF_FEEDS = "feeds"
CONF_FEED_BLOG = "blog"
CONF_FEED_WIKI = "wiki"
CONF_FEED_CALENDAR = "calendar"
CONF_FEED_FLICKER = "flicker"
CONF_FEED_TYPE = "type"
CONF_FEED_URL = "url"
CONF_CACHE = "cache"
CONF_CACHE_SCHEDULE = "schedule"
CONF_PROJECTS = "projects"
CONF_RADIO_SHOW = "radio_show"
CONF_RADIO_SHOW_NAME = "name"
CONF_RADIO_SHOW_URL = "url"
CONF_RADIO_SHOW_TYPE = "type"
CONF_RADIO_SHOW_START = "start"
CONF_RADIO_SHOW_END = "end"
CONF_LOGO = "logo"
CONF_PHONE = "phone"
CONF_SIP = "sip"
CONF_KEYMASTERS = "keymasters"
CONF_KEYMASTER_NAME = "name"
CONF_KEYMASTER_IRC_NICK = "irc_nick"
CONF_KEYMASTER_PHONE = "phone"
CONF_KEYMASTER_EMAIL = "email"
CONF_KEYMASTER_TWITTER = "twitter"
CONF_TWITTER = "twitter"
CONF_FACEBOOK = "facebook"
CONF_IDENTICA = "identica"
CONF_FOURSQUARE = "foursquare"
CONF_ML = "ml"
CONF_JABBER = "jabber"
CONF_ISSUE_MAIL = "issue_mail"
CONF_SPACE = "space"
CONF_TEMPERATURE = "temperature"
DATA_SPACEAPI = "data_spaceapi"
DOMAIN = "spaceapi"
ISSUE_REPORT_CHANNELS = [CONF_EMAIL, CONF_ISSUE_MAIL, CONF_ML, CONF_TWITTER]
SENSOR_TYPES = [CONF_HUMIDITY, CONF_TEMPERATURE]
SPACEAPI_VERSION = "0.13"
URL_API_SPACEAPI = "/api/spaceapi"
LOCATION_SCHEMA = vol.Schema({vol.Optional(CONF_ADDRESS): cv.string})
SPACEFED_SCHEMA = vol.Schema(
{
vol.Optional(CONF_SPACENET): cv.boolean,
vol.Optional(CONF_SPACESAML): cv.boolean,
vol.Optional(CONF_SPACEPHONE): cv.boolean,
}
)
STREAM_SCHEMA = vol.Schema(
{
vol.Optional(CONF_M4): cv.url,
vol.Optional(CONF_MJPEG): cv.url,
vol.Optional(CONF_USTREAM): cv.url,
}
)
FEED_SCHEMA = vol.Schema(
{vol.Optional(CONF_FEED_TYPE): cv.string, vol.Required(CONF_FEED_URL): cv.url}
)
FEEDS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_FEED_BLOG): FEED_SCHEMA,
vol.Optional(CONF_FEED_WIKI): FEED_SCHEMA,
vol.Optional(CONF_FEED_CALENDAR): FEED_SCHEMA,
vol.Optional(CONF_FEED_FLICKER): FEED_SCHEMA,
}
)
CACHE_SCHEMA = vol.Schema(
{
vol.Required(CONF_CACHE_SCHEDULE): cv.matches_regex(
r"(m.02|m.05|m.10|m.15|m.30|h.01|h.02|h.04|h.08|h.12|d.01)"
)
}
)
RADIO_SHOW_SCHEMA = vol.Schema(
{
vol.Required(CONF_RADIO_SHOW_NAME): cv.string,
vol.Required(CONF_RADIO_SHOW_URL): cv.url,
vol.Required(CONF_RADIO_SHOW_TYPE): cv.matches_regex(r"(mp3|ogg)"),
vol.Required(CONF_RADIO_SHOW_START): cv.string,
vol.Required(CONF_RADIO_SHOW_END): cv.string,
}
)
KEYMASTER_SCHEMA = vol.Schema(
{
vol.Optional(CONF_KEYMASTER_NAME): cv.string,
vol.Optional(CONF_KEYMASTER_IRC_NICK): cv.string,
vol.Optional(CONF_KEYMASTER_PHONE): cv.string,
vol.Optional(CONF_KEYMASTER_EMAIL): cv.string,
vol.Optional(CONF_KEYMASTER_TWITTER): cv.string,
}
)
CONTACT_SCHEMA = vol.Schema(
{
vol.Optional(CONF_EMAIL): cv.string,
vol.Optional(CONF_IRC): cv.string,
vol.Optional(CONF_ML): cv.string,
vol.Optional(CONF_PHONE): cv.string,
vol.Optional(CONF_TWITTER): cv.string,
vol.Optional(CONF_SIP): cv.string,
vol.Optional(CONF_FACEBOOK): cv.string,
vol.Optional(CONF_IDENTICA): cv.string,
vol.Optional(CONF_FOURSQUARE): cv.string,
vol.Optional(CONF_JABBER): cv.string,
vol.Optional(CONF_ISSUE_MAIL): cv.string,
vol.Optional(CONF_KEYMASTERS): vol.All(
cv.ensure_list, [KEYMASTER_SCHEMA], vol.Length(min=1)
),
},
required=False,
)
STATE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Inclusive(CONF_ICON_CLOSED, CONF_ICONS): cv.url,
vol.Inclusive(CONF_ICON_OPEN, CONF_ICONS): cv.url,
},
required=False,
)
SENSOR_SCHEMA = vol.Schema(
{vol.In(SENSOR_TYPES): [cv.entity_id], cv.string: [cv.entity_id]}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CONTACT): CONTACT_SCHEMA,
vol.Required(CONF_ISSUE_REPORT_CHANNELS): vol.All(
cv.ensure_list, [vol.In(ISSUE_REPORT_CHANNELS)]
),
vol.Optional(CONF_LOCATION): LOCATION_SCHEMA,
vol.Required(CONF_LOGO): cv.url,
vol.Required(CONF_SPACE): cv.string,
vol.Required(CONF_STATE): STATE_SCHEMA,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_SENSORS): SENSOR_SCHEMA,
vol.Optional(CONF_SPACEFED): SPACEFED_SCHEMA,
vol.Optional(CONF_CAM): vol.All(
cv.ensure_list, [cv.url], vol.Length(min=1)
),
vol.Optional(CONF_STREAM): STREAM_SCHEMA,
vol.Optional(CONF_FEEDS): FEEDS_SCHEMA,
vol.Optional(CONF_CACHE): CACHE_SCHEMA,
vol.Optional(CONF_PROJECTS): vol.All(cv.ensure_list, [cv.url]),
vol.Optional(CONF_RADIO_SHOW): vol.All(
cv.ensure_list, [RADIO_SHOW_SCHEMA]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Register the SpaceAPI with the HTTP interface."""
hass.data[DATA_SPACEAPI] = config[DOMAIN]
hass.http.register_view(APISpaceApiView)
return True
class APISpaceApiView(HomeAssistantView):
"""View to provide details according to the SpaceAPI."""
url = URL_API_SPACEAPI
name = "api:spaceapi"
@staticmethod
def get_sensor_data(hass, spaceapi, sensor):
"""Get data from a sensor."""
if not (sensor_state := hass.states.get(sensor)):
return None
sensor_data = {ATTR_NAME: sensor_state.name, ATTR_VALUE: sensor_state.state}
if ATTR_SENSOR_LOCATION in sensor_state.attributes:
sensor_data[ATTR_LOCATION] = sensor_state.attributes[ATTR_SENSOR_LOCATION]
else:
sensor_data[ATTR_LOCATION] = spaceapi[CONF_SPACE]
# Some sensors don't have a unit of measurement
if ATTR_UNIT_OF_MEASUREMENT in sensor_state.attributes:
sensor_data[ATTR_UNIT] = sensor_state.attributes[ATTR_UNIT_OF_MEASUREMENT]
return sensor_data
@ha.callback
def get(self, request):
"""Get SpaceAPI data."""
hass = request.app["hass"]
spaceapi = dict(hass.data[DATA_SPACEAPI])
is_sensors = spaceapi.get("sensors")
location = {ATTR_LAT: hass.config.latitude, ATTR_LON: hass.config.longitude}
try:
location[ATTR_ADDRESS] = spaceapi[ATTR_LOCATION][CONF_ADDRESS]
except KeyError:
pass
except TypeError:
pass
state_entity = spaceapi["state"][ATTR_ENTITY_ID]
if (space_state := hass.states.get(state_entity)) is not None:
state = {
ATTR_OPEN: space_state.state != "off",
ATTR_LASTCHANGE: dt_util.as_timestamp(space_state.last_updated),
}
else:
state = {ATTR_OPEN: "null", ATTR_LASTCHANGE: 0}
with suppress(KeyError):
state[ATTR_ICON] = {
ATTR_OPEN: spaceapi["state"][CONF_ICON_OPEN],
ATTR_CLOSE: spaceapi["state"][CONF_ICON_CLOSED],
}
data = {
ATTR_API: SPACEAPI_VERSION,
ATTR_CONTACT: spaceapi[CONF_CONTACT],
ATTR_ISSUE_REPORT_CHANNELS: spaceapi[CONF_ISSUE_REPORT_CHANNELS],
ATTR_LOCATION: location,
ATTR_LOGO: spaceapi[CONF_LOGO],
ATTR_SPACE: spaceapi[CONF_SPACE],
ATTR_STATE: state,
ATTR_URL: spaceapi[CONF_URL],
}
with suppress(KeyError):
data[ATTR_CAM] = spaceapi[CONF_CAM]
with suppress(KeyError):
data[ATTR_SPACEFED] = spaceapi[CONF_SPACEFED]
with suppress(KeyError):
data[ATTR_STREAM] = spaceapi[CONF_STREAM]
with suppress(KeyError):
data[ATTR_FEEDS] = spaceapi[CONF_FEEDS]
with suppress(KeyError):
data[ATTR_CACHE] = spaceapi[CONF_CACHE]
with suppress(KeyError):
data[ATTR_PROJECTS] = spaceapi[CONF_PROJECTS]
with suppress(KeyError):
data[ATTR_RADIO_SHOW] = spaceapi[CONF_RADIO_SHOW]
if is_sensors is not None:
sensors = {}
for sensor_type in is_sensors:
sensors[sensor_type] = []
for sensor in spaceapi["sensors"][sensor_type]:
sensor_data = self.get_sensor_data(hass, spaceapi, sensor)
sensors[sensor_type].append(sensor_data)
data[ATTR_SENSORS] = sensors
return self.json(data)
| 30.183432
| 86
| 0.656048
|
bf0379b343888a2fdff27906cd8519c1689c9c83
| 4,788
|
py
|
Python
|
coreutils/fileutil.py
|
bcsr0009/pdtf
|
5d3cc7933ac07457f6b4b59f2d4d70e0de1ffaec
|
[
"MIT"
] | null | null | null |
coreutils/fileutil.py
|
bcsr0009/pdtf
|
5d3cc7933ac07457f6b4b59f2d4d70e0de1ffaec
|
[
"MIT"
] | null | null | null |
coreutils/fileutil.py
|
bcsr0009/pdtf
|
5d3cc7933ac07457f6b4b59f2d4d70e0de1ffaec
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import requests
import json
import paramiko
import time
import logging
import os
import difflib
import pdb
from coreutils.logdecorator import logwrap
@logwrap
def execute_command(ssh_connection_handler, cmd):
'''
This method provides execute_command option on device
'''
stdin,stdout,stderr=ssh_connection_handler.exec_command(cmd)
return stdin,stdout,stderr
@logwrap
def build_show_config_from_stdout(stdout, file):
'''
This method redirects stdout from execute command of device to file
'''
with open(file, "w") as rcbsp:
for line in stdout.readlines():
rcbsp.write(line)
def get_running_config(ssh_connection_handler, config_file, cmd):
execute_command_result = execute_command(ssh_connection_handler, cmd)
build_show_config_from_stdout(execute_command_result[1], config_file)
@logwrap
def get_running_config_from_ncs_before_service_push(ssh_connection_handler, before_service_push_config_file, cmd):
'''
This method provides NCS running config before service create
'''
get_running_config(ssh_connection_handler, before_service_push_config_file, cmd)
@logwrap
def get_running_config_from_ncs_before_service_delete(ssh_connection_handler, before_service_delete_config_file, cmd):
'''
This method provides running config before service delete
'''
get_running_config(ssh_connection_handler, before_service_delete_config_file, cmd)
@logwrap
def get_running_config_from_device_before_service_push(ssh_connection_handler, before_service_push_config_file, cmd):
'''
This method provides running config before service create
'''
get_running_config(ssh_connection_handler, before_service_push_config_file, cmd)
@logwrap
def get_running_config_from_device_before_service_delete(ssh_connection_handler, before_service_delete_config_file, cmd):
'''
This method provides running config before service delete
'''
get_running_config(ssh_connection_handler, before_service_delete_config_file, cmd)
@logwrap
def get_running_config_from_device_after_service_push(ssh_connection_handler, after_service_push_config_file, cmd):
'''
This method provides running config after service create
'''
get_running_config(ssh_connection_handler, after_service_push_config_file, cmd)
@logwrap
def get_running_config_from_ncs_after_service_push(ssh_connection_handler, after_service_push_config_file, cmd):
'''
This method provides NCS running config after service create
'''
get_running_config(ssh_connection_handler, after_service_push_config_file, cmd)
@logwrap
def get_running_config_from_ncs_after_service_delete(ssh_connection_handler, after_service_delete_config_file, cmd):
'''
This method provides running config after service delete
'''
get_running_config(ssh_connection_handler, after_service_delete_config_file, cmd)
@logwrap
def get_running_config_from_device_after_service_delete(ssh_connection_handler, after_service_delete_config_file, cmd):
'''
This method provides running config after service delete
'''
get_running_config(ssh_connection_handler, after_service_delete_config_file, cmd)
@logwrap
def get_generated_config_from_service_create(before_service_push, after_service_push, generated_service_config):
'''
This method provides generated config out of (before and after running configs )
'''
with open(before_service_push, 'r') as before_service_push_file_obj, open(after_service_push, 'r') as after_service_push_file_obj:
before_service_push_file_content = before_service_push_file_obj.readlines()
after_service_push_file_content = after_service_push_file_obj.readlines()
with open(generated_service_config, 'w') as generated_service_config_obj:
for line in after_service_push_file_content:
if line in before_service_push_file_content:
continue
generated_service_config_obj.write(line)
@logwrap
def get_generated_config_from_service_delete(before_service_push, after_service_push, generated_service_config):
'''
This method provides generated config out of (before and after running configs )
'''
with open(before_service_push, 'r') as before_service_push_file_obj, open(after_service_push, 'r') as after_service_push_file_obj:
before_service_push_file_content = before_service_push_file_obj.readlines()
after_service_push_file_content = after_service_push_file_obj.readlines()
with open(generated_service_config, 'w') as generated_service_config_obj:
for line in before_service_push_file_content:
if line in after_service_push_file_content:
continue
generated_service_config_obj.write(line)
| 38.926829
| 134
| 0.794277
|
a34bee5f601a595592d67466729c0ac4b0af96e4
| 6,084
|
py
|
Python
|
espresso/tools/simple_greedy_decoder.py
|
beat-buesser/espresso
|
bd6ba1f7745c90a2c3c8ff0a0d7332efeebcc808
|
[
"MIT"
] | 1
|
2021-01-08T02:51:16.000Z
|
2021-01-08T02:51:16.000Z
|
espresso/tools/simple_greedy_decoder.py
|
opendp/espresso
|
2017183c33bc3414dddcbc7850f2fa4284e6d944
|
[
"MIT"
] | null | null | null |
espresso/tools/simple_greedy_decoder.py
|
opendp/espresso
|
2017183c33bc3414dddcbc7850f2fa4284e6d944
|
[
"MIT"
] | 1
|
2021-09-10T15:35:58.000Z
|
2021-09-10T15:35:58.000Z
|
# Copyright (c) Yiming Wang
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
class SimpleGreedyDecoder(nn.Module):
def __init__(
self, models, dictionary, max_len_a=0, max_len_b=200,
temperature=1.0, eos=None, symbols_to_strip_from_output=None,
for_validation=True,
):
"""Decode given speech audios with the simple greedy search.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
dictionary (~fairseq.data.Dictionary): dictionary
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
for_validation (bool, optional): indicate whether the decoder is
used for validation. It affects how max_len is determined, and
whether a tensor of lprobs is returned. If true, target should be
not None
"""
super().__init__()
from fairseq.sequence_generator import EnsembleModel
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.pad = dictionary.pad()
self.unk = dictionary.unk()
self.eos = dictionary.eos() if eos is None else eos
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None else {self.eos}
)
self.vocab_size = len(dictionary)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.temperature = temperature
assert temperature > 0, "--temperature must be greater than 0"
self.model.eval()
self.for_validation = for_validation
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def decode(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs):
"""Generate a batch of translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._decode(sample, **kwargs)
@torch.no_grad()
def _decode(self, sample: Dict[str, Dict[str, Tensor]], bos_token: Optional[int] = None):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
src_tokens = net_input["src_tokens"]
bsz, src_len = src_tokens.size()[:2]
# compute the encoder output
encoder_outs = self.model.forward_encoder(net_input)
target = sample["target"]
# target can only be None if not for validation
assert target is not None or not self.for_validation
max_encoder_output_length = encoder_outs[0].encoder_out.size(0)
# for validation, make the maximum decoding length equal to at least the
# length of target, and the length of encoder_out if possible; otherwise
# max_len is obtained from max_len_a/b
max_len = max(max_encoder_output_length, target.size(1)) \
if self.for_validation else \
min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
self.model.max_decoder_positions() - 1,
)
tokens = src_tokens.new(bsz, max_len + 2).long().fill_(self.pad)
tokens[:, 0] = self.eos if bos_token is None else bos_token
# lprobs is only used when target is not None (i.e., for validation)
lprobs = encoder_outs[0].encoder_out.new_full(
(bsz, target.size(1), self.vocab_size), -np.log(self.vocab_size),
) if self.for_validation else None
attn = None
for step in range(max_len + 1): # one extra step for EOS marker
is_eos = tokens[:, step].eq(self.eos)
if step > 0 and is_eos.sum() == is_eos.size(0):
# all predictions are finished (i.e., ended with eos)
tokens = tokens[:, :step + 1]
if attn is not None:
attn = attn[:, :, :step + 1]
break
log_probs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
temperature=self.temperature,
)
tokens[:, step + 1] = log_probs.argmax(-1)
if step > 0: # deal with finished predictions
# make log_probs uniform if the previous output token is EOS
# and add consecutive EOS to the end of prediction
log_probs[is_eos, :] = -np.log(log_probs.size(1))
tokens[is_eos, step + 1] = self.eos
if self.for_validation and step < target.size(1):
lprobs[:, step, :] = log_probs
# Record attention scores
if type(avg_attn_scores) is list:
avg_attn_scores = avg_attn_scores[0]
if avg_attn_scores is not None:
if attn is None:
attn = avg_attn_scores.new(bsz, max_encoder_output_length, max_len + 2)
attn[:, :, step + 1].copy_(avg_attn_scores)
return tokens, lprobs, attn
| 42.25
| 93
| 0.59977
|
80496d1c7089a23fe5e0a46e149a45417ea68e3d
| 58,606
|
py
|
Python
|
transformers/modeling_t5.py
|
UKPLab/StructAdapt
|
a4d17712de06b61064524e888ed05e12bce4fd32
|
[
"Apache-2.0"
] | 16
|
2021-09-09T02:09:41.000Z
|
2022-03-22T15:38:11.000Z
|
transformers/modeling_t5.py
|
UKPLab/StructAdapt
|
a4d17712de06b61064524e888ed05e12bce4fd32
|
[
"Apache-2.0"
] | 3
|
2021-11-08T04:40:28.000Z
|
2022-03-30T09:10:31.000Z
|
transformers/modeling_t5.py
|
UKPLab/StructAdapt
|
a4d17712de06b61064524e888ed05e12bce4fd32
|
[
"Apache-2.0"
] | 6
|
2021-11-01T09:45:53.000Z
|
2022-01-06T06:18:45.000Z
|
# coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch T5 model. """
import copy
import math
import os
import warnings
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from .configuration_t5 import T5Config
from .file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_callable,
replace_return_docstrings,
)
from .modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput
from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from .utils import logging
from .gat_utils import GATConv
from .graph_utils import get_pytorch_graph
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
####################################################
# This dict contrains shortcut names and associated url
# for the pretrained weights provided with the models
####################################################
T5_PRETRAINED_MODEL_ARCHIVE_LIST = [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
# See all T5 models at https://huggingface.co/models?filter=t5
]
####################################################
# This is a conversion method from TF 1.0 to PyTorch
# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28
####################################################
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info("Skipping {}".format("/".join(name)))
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
# elif scope_names[0] == 'scale':
# pointer = getattr(pointer, 'weight')
# elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':
# pointer = getattr(pointer, 'bias')
# elif scope_names[0] == 'squad':
# pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info("Transposing numpy weight of shape {} for {}".format(array.shape, name))
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
# logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
####################################################
# PyTorch Models are constructed by sub-classing
# - torch.nn.Module for the layers and
# - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module)
####################################################
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""Construct a layernorm module in the T5 style
No bias and no substraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
# layer norm should always be calculated in float32
variance = x.to(torch.float32).pow(2).mean(-1, keepdim=True)
x = x / torch.sqrt(variance + self.variance_epsilon)
if self.weight.dtype == torch.float16:
x = x.to(torch.float16)
return self.weight * x
class T5DenseReluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
h = self.wi(hidden_states)
h = F.relu(h)
h = self.dropout(h)
h = self.wo(h)
return h
class T5LayerFF(nn.Module):
def __init__(self, config):
super().__init__()
self.DenseReluDense = T5DenseReluDense(config)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
norm_x = self.layer_norm(hidden_states)
y = self.DenseReluDense(norm_x)
layer_output = hidden_states + self.dropout(y)
return layer_output
class T5LayerAdapt(nn.Module):
def __init__(self, config):
super().__init__()
self.DenseReluDense = T5DenseReluDense(config)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
norm_x = self.layer_norm(hidden_states)
y = self.DenseReluDense(norm_x)
layer_output = hidden_states + self.dropout(y)
return layer_output
from .rgcn_custom import RGCNConv
from torch_geometric.nn import GCNConv, FastRGCNConv
class T5GNNAdapt(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = RGCNConv(config.d_model, config.d_ff, num_relations=2, root_weight=True)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.dropout_gnn = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states, graphs):
norm_x = self.layer_norm(hidden_states)
graph_batch = get_pytorch_graph(norm_x, graphs)
y = F.elu(self.conv(graph_batch.x, graph_batch.edge_index, edge_type=graph_batch.y))
y = self.dropout_gnn(y)
y = self.wo(y)
y = y.view_as(hidden_states)
layer_output = hidden_states + self.dropout(y)
return layer_output
class T5Attention(nn.Module):
def __init__(self, config: T5Config, has_relative_attention_bias=False, is_bidirectional=False):
super().__init__()
self.is_bidirectional = is_bidirectional
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.d_kv = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.d_kv
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.d_kv, self.pruned_heads)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.d_kv * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger buckets
for larger absolute relative_positions. All relative positions >=max_distance
map to the same bucket. All relative positions <=-max_distance map to the
same bucket. This should allow for more graceful generalization to longer
sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).to(torch.long) * num_buckets # mtf.to_int32(mtf.less(n, 0)) * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def compute_bias(self, qlen, klen):
""" Compute binned relative position bias """
context_position = torch.arange(qlen, dtype=torch.long)[:, None]
memory_position = torch.arange(klen, dtype=torch.long)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position, # shape (qlen, klen)
bidirectional=self.is_bidirectional,
num_buckets=self.relative_attention_num_buckets,
)
rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen)
return values
def forward(
self,
input,
mask=None,
kv=None,
position_bias=None,
past_key_value=None,
head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
# past_key_value[0] is (bs, n_heads, q_len - 1, dim_per_head)
bs, qlen, dim = input.size()
if past_key_value is not None:
assert self.is_decoder is True, "Encoder cannot cache past key value states"
assert (
len(past_key_value) == 2
), "past_key_value should have 2 past states: keys and values. Got {} past states".format(
len(past_key_value)
)
real_qlen = qlen + past_key_value[0].shape[2] if query_length is None else query_length
else:
real_qlen = qlen
if kv is None:
klen = real_qlen
else:
klen = kv.size(1)
def shape(x):
""" projection """
return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim)
q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(input)) # (bs, n_heads, qlen, dim_per_head)
elif past_key_value is None:
k = v = kv
k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head)
if past_key_value is not None:
if kv is None:
k_, v_ = past_key_value
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = past_key_value
if self.is_decoder and use_cache is True:
present_key_value_state = ((k, v),)
else:
present_key_value_state = (None,)
# (bs, n_heads, qlen, klen)
scores = torch.matmul(
q, k.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", q, k), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
raise ValueError("No position_bias provided and no weights to compute position_bias")
position_bias = self.compute_bias(real_qlen, klen)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -qlen:, :]
if mask is not None:
position_bias = position_bias + mask # (bs, n_heads, qlen, klen)
scores += position_bias
weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
context = self.o(context)
outputs = (context,) + present_key_value_state
if output_attentions:
outputs = outputs + (weights,)
if self.has_relative_attention_bias:
outputs = outputs + (position_bias,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(
config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=not config.is_decoder
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
norm_x,
mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.EncDecAttention = T5Attention(
config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=True
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
kv,
attention_mask=None,
position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
norm_x,
mask=attention_mask,
kv=kv,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config, has_relative_attention_bias=has_relative_attention_bias))
self.layer.append(T5LayerFF(config))
if not self.is_decoder:
adapter_config = copy.deepcopy(config)
adapter_config.d_ff = config.adapter_dim
self.adapter = T5GNNAdapt(adapter_config)
else:
adapter_config = copy.deepcopy(config)
adapter_config.d_ff = config.adapter_dim
self.adapter = T5LayerAdapt(adapter_config)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
graphs=None,
):
if past_key_value is not None:
assert self.is_decoder, "Only decoder can use `past_key_values`"
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
error_message = "There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states".format(
expected_num_past_key_values,
"2 (past / key) for cross attention" if expected_num_past_key_values == 4 else "",
len(past_key_value),
)
assert len(past_key_value) == expected_num_past_key_values, error_message
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
if self.is_decoder and encoder_hidden_states is not None:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
kv=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
head_mask=head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + cross_attention_outputs[1]
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
####
# ADD Adapter
if not self.is_decoder:
hidden_states = self.adapter(hidden_states, graphs)
else:
hidden_states = self.adapter(hidden_states)
####
outputs = (hidden_states,)
# Add attentions if we output them
outputs = outputs + (present_key_value_state,) + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
class T5PreTrainedModel(PreTrainedModel):
"""An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = T5Config
load_tf_weights = load_tf_weights_in_t5
base_model_prefix = "transformer"
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
""" Initialize the weights """
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, T5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (T5Model, T5ForConditionalGeneration)):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, T5DenseReluDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5Attention):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
d_kv = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * d_kv) ** -0.5))
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))
module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * d_kv) ** -0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information"
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
graphs=None,
):
# print(graphs)
# exit()
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to intialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, ":obj:`use_cache` can only be set to `True` if {} is used as a decoder".format(
self
)
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is not None:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
head_mask=head_mask[i],
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
graphs=graphs,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
hidden_states, present_key_value_state = layer_outputs[:2]
if i == 0:
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
position_bias = layer_outputs[3 if output_attentions else 2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[5 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[2],) # We keep only self-attention weights for now
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
T5_START_DOCSTRING = r"""
The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer
<https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,
Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.
It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
T5_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right
and the left.
Indices can be obtained using :class:`~transformers.T5Tokenizer`.
See :meth:`transformers.PreTrainedTokenizer.encode` and
:meth:`transformers.PreTrainedTokenizer.__call__` for detail.
To know more on how to prepare :obj:`input_ids` for pretraining take a look a
`T5 Training <./t5.html#training>`__.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **maked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for sequence to sequence training. T5 uses the :obj:`pad_token_id` as the starting token for
:obj:`decoder_input_ids` generation.
If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at
`T5 Training <./t5.html#training>`__. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both
unset, :obj:`decoder_input_ids` takes the value of :obj:`input_ids`.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`: `attentions`)
:obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation.
If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds` have to be input
(see :obj:`past_key_values`).
This is useful if you want more control over how to convert :obj:`decoder_input_ids` indices into
associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both
unset, :obj:`decoder_inputs_embeds` takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states" "without any specific head on top.",
T5_START_DOCSTRING,
)
class T5Model(T5PreTrainedModel):
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
Returns:
Example::
>>> from transformers import T5Tokenizer, T5Model
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5Model.from_pretrained('t5-small')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids, return_dict=True)
>>> last_hidden_states = outputs.last_hidden_state
"""
if "decoder_past_key_value_states" in kwargs:
warnings.warn(
"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_value_states")
if "decoder_past_key_values" in kwargs:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_values")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING)
class T5ForConditionalGeneration(T5PreTrainedModel):
authorized_missing_keys = [r"encoder\.embed_tokens\.weight", r"decoder\.embed_tokens\.weight", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_callable(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
graphs=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[-100, 0, ..., config.vocab_size - 1]`.
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Examples::
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5ForConditionalGeneration.from_pretrained('t5-small', return_dict=True)
>>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids
labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you ", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
"""
if "lm_labels" in kwargs:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("lm_labels")
if "decoder_past_key_value_states" in kwargs:
warnings.warn(
"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_value_states")
if "decoder_past_key_values" in kwargs:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_values")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
graphs=graphs
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# If decoding with past key value states, only the last tokens
# should be given as an input
if past_key_values is not None:
assert labels is None, "Decoder should not use cached key value states when training."
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
if decoder_inputs_embeds is not None:
decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim ** -0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"use_cache": use_cache,
}
def _reorder_cache(self, past, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past is None:
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
| 43.508537
| 213
| 0.649217
|
03f0c43bca8da24f54eff1f9d372fb3376167825
| 4,264
|
py
|
Python
|
dbaas/workflow/steps/util/metric_collector.py
|
didindinn/database-as-a-service
|
747de31ff8546f7874ddd654af860e130afd17a0
|
[
"BSD-3-Clause"
] | null | null | null |
dbaas/workflow/steps/util/metric_collector.py
|
didindinn/database-as-a-service
|
747de31ff8546f7874ddd654af860e130afd17a0
|
[
"BSD-3-Clause"
] | null | null | null |
dbaas/workflow/steps/util/metric_collector.py
|
didindinn/database-as-a-service
|
747de31ff8546f7874ddd654af860e130afd17a0
|
[
"BSD-3-Clause"
] | null | null | null |
from base import BaseInstanceStep
from dbaas_credentials.models import CredentialType
from util import get_credentials_for
from util import build_context_script
from util import exec_remote_command_host
import logging
LOG = logging.getLogger(__name__)
class MetricsCollector(BaseInstanceStep):
def __init__(self, instance):
super(MetricsCollector, self).__init__(instance)
self.credential = get_credentials_for(
self.environment, CredentialType.TELEGRAF)
self.collector_allowed = self.credential.get_parameter_by_name(
'collector_allowed')
self.kafka_topic = self.credential.get_parameter_by_name(
'kafka_topic')
@property
def is_valid(self):
return str(self.collector_allowed).lower() == 'true'
@property
def script_variables(self):
user = self.driver.get_metric_collector_user(self.credential.user)
password = self.driver.get_metric_collector_password(
self.credential.password)
create_telegraf_config = True
if self.instance.instance_type == self.instance.REDIS_SENTINEL:
if len(self.host.instances.all()) > 1:
create_telegraf_config = False
create_default_file = self.instance.instance_type in (
self.instance.MYSQL, self.instance.MONGODB, self.instance.REDIS,
self.instance.MYSQL_PERCONA)
variables = {
'HOSTNAME': self.host.hostname.split('.')[0],
'HOSTADDRESS': self.host.address,
'PORT': self.instance.port,
'USER': user,
'PASSWORD': password,
'MYSQL': self.instance.instance_type == self.instance.MYSQL,
'MONGODB': self.instance.instance_type == self.instance.MONGODB,
'REDIS': self.instance.instance_type == self.instance.REDIS,
'CREATE_TELEGRAF_CONFIG': create_telegraf_config,
'CREATE_DEFAULT_FILE': create_default_file,
'KAFKA_ENDPOINT': self.credential.endpoint,
'KAFKA_TOPIC': self.kafka_topic,
}
return variables
def do(self):
raise NotImplementedError
def undo(self):
pass
def exec_script(self, script):
output = {}
return_code = exec_remote_command_host(self.host, script, output)
if return_code != 0:
raise EnvironmentError(str(output))
LOG.info("output: {}".format(output))
return output
class ConfigureTelegraf(MetricsCollector):
def __unicode__(self):
return "Configuring Telegraf..."
def do(self):
if not self.is_valid:
return
template_script = self.plan.script.metric_collector_template
script = build_context_script(self.script_variables, template_script)
return self.exec_script(script)
class InstallTelegraf(MetricsCollector):
def __unicode__(self):
return "Installing Telegraf..."
def do(self):
if not self.is_valid:
return
script = "yum install telegraf -y"
self.exec_script(script)
class RestartTelegraf(MetricsCollector):
def __unicode__(self):
return "Restarting Telegraf..."
def do(self):
if not self.is_valid:
return
script = "/etc/init.d/telegraf restart"
self.exec_script(script)
class StopTelegraf(MetricsCollector):
def __unicode__(self):
return "Stopping Telegraf..."
def do(self):
if not self.is_valid:
return
script = "/etc/init.d/telegraf stop"
self.exec_script(script)
class CreateMetricCollectorDatabaseUser(MetricsCollector):
def __unicode__(self):
return "Creating metric collector database user..."
def do(self):
if not self.is_valid:
return
if self.driver.check_instance_is_master(self.instance):
self.driver.create_metric_collector_user(
username=self.credential.user,
password=self.credential.password)
def undo(self):
if not self.is_valid:
return
if self.driver.check_instance_is_master(self.instance):
self.driver.remove_metric_collector_user(
username=self.credential.user)
| 32.30303
| 77
| 0.653143
|
f5effc2413e15ea411a06d72bf58d61216409a78
| 1,923
|
py
|
Python
|
better/tdagent/algorithms/bcrp.py
|
bettertony/Better
|
edb58ac1a44692f4227d1c0f6cdde550eca13f4f
|
[
"BSD-3-Clause"
] | 1
|
2019-12-07T00:27:35.000Z
|
2019-12-07T00:27:35.000Z
|
better/tdagent/algorithms/bcrp.py
|
bettertony/Better
|
edb58ac1a44692f4227d1c0f6cdde550eca13f4f
|
[
"BSD-3-Clause"
] | null | null | null |
better/tdagent/algorithms/bcrp.py
|
bettertony/Better
|
edb58ac1a44692f4227d1c0f6cdde550eca13f4f
|
[
"BSD-3-Clause"
] | null | null | null |
from ..tdagent import TDAgent
from better.tdagent.algorithms.crp import CRP
import numpy as np
from scipy.optimize import minimize
class BCRP(CRP):
""" Best Constant Rebalanced Portfolio = Constant Rebalanced Portfolio constructed with hindsight. It is often used as benchmark.
Reference:
T. Cover. Universal Portfolios, 1991.
http://www-isl.stanford.edu/~cover/papers/paper93.pdf
"""
def __init__(self, last_b=None):
super(BCRP, self).__init__()
self.last_b = last_b
def get_weight(self, data):
""" Find weights which maximize return on X in hindsight! """
weights = opt_weights(data)
return weights
def decide_by_history(self, x, last_b):
if self.last_b is None:
from better.tools.trade import get_test_data
from better.tools.configprocess import preprocess_config
import json
with open("better/net_config.json") as file:
config = json.load(file)
config = preprocess_config(config)
data = get_test_data(config)
self.last_b = self.get_weight(data.T)
return self.last_b
def opt_weights(X, max_leverage=1):
x_0 = max_leverage * np.ones(X.shape[1]) / float(X.shape[1])
objective = lambda b: -np.prod(X.dot(b))
cons = ({'type': 'eq', 'fun': lambda b: max_leverage-np.sum(b)},)
bnds = [(0., max_leverage)]*len(x_0)
res = minimize(objective, x_0, bounds=bnds, constraints=cons, method='slsqp', options={'ftol': 1e-07})
return res.x
if __name__ == '__main__':
from better.tools.backtest import get_test_data
from better.tools.configprocess import preprocess_config
import json
with open("better/net_config.json") as file:
config = json.load(file)
config = preprocess_config(config)
data = get_test_data(config)
bcrp = BCRP()
result = bcrp.get_weight(data.T)
| 33.736842
| 133
| 0.657826
|
7f9ab75b7f9e674a571b5904f06f61f00cf1b1b1
| 257
|
py
|
Python
|
scripts/portal/enterMCave.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | 9
|
2021-04-26T11:59:29.000Z
|
2021-12-20T13:15:27.000Z
|
scripts/portal/enterMCave.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | null | null | null |
scripts/portal/enterMCave.py
|
Snewmy/swordie
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
[
"MIT"
] | 6
|
2021-07-14T06:32:05.000Z
|
2022-02-06T02:32:56.000Z
|
# 101020400
if sm.hasQuest(21201):
sm.warpInstanceIn(914021000, 1)
sm.addQRValue(21203, "0")
sm.setInstanceTime(15*60)
if sm.hasQuest(21302):
sm.warpInstanceIn(914022100, 0)
sm.setQRValue(21203, "1", False)
sm.setInstanceTime(20*60)
| 25.7
| 36
| 0.692607
|
a5a5a2ae1ddac59e1687d05ea6df51be3ab96fb8
| 31,175
|
py
|
Python
|
st7789.py
|
Eureka1024/RP2040-MicroPython-Mouse
|
fd3c113c64965601732e8625057985b799a14052
|
[
"Apache-2.0"
] | null | null | null |
st7789.py
|
Eureka1024/RP2040-MicroPython-Mouse
|
fd3c113c64965601732e8625057985b799a14052
|
[
"Apache-2.0"
] | null | null | null |
st7789.py
|
Eureka1024/RP2040-MicroPython-Mouse
|
fd3c113c64965601732e8625057985b799a14052
|
[
"Apache-2.0"
] | null | null | null |
"""
st7789 tft driver in MicroPython based on devbis' st7789py_mpy module from
https://github.com/devbis/st7789py_mpy.
I added support for display rotation, scrolling and drawing text using 8 and 16
bit wide bitmap fonts with heights that are multiples of 8. Included are 12
bitmap fonts derived from classic pc text mode fonts.
"""
import time
from micropython import const
import ustruct as struct
# commands
ST7789_NOP = const(0x00)
ST7789_SWRESET = const(0x01)
ST7789_RDDID = const(0x04)
ST7789_RDDST = const(0x09)
ST7789_SLPIN = const(0x10)
ST7789_SLPOUT = const(0x11)
ST7789_PTLON = const(0x12)
ST7789_NORON = const(0x13)
ST7789_INVOFF = const(0x20)
ST7789_INVON = const(0x21)
ST7789_DISPOFF = const(0x28)
ST7789_DISPON = const(0x29)
ST7789_CASET = const(0x2A)
ST7789_RASET = const(0x2B)
ST7789_RAMWR = const(0x2C)
ST7789_RAMRD = const(0x2E)
ST7789_PTLAR = const(0x30)
ST7789_VSCRDEF = const(0x33)
ST7789_COLMOD = const(0x3A)
ST7789_MADCTL = const(0x36)
ST7789_VSCSAD = const(0x37)
ST7789_MADCTL_MY = const(0x80)
ST7789_MADCTL_MX = const(0x40)
ST7789_MADCTL_MV = const(0x20)
ST7789_MADCTL_ML = const(0x10)
ST7789_MADCTL_BGR = const(0x08)
ST7789_MADCTL_MH = const(0x04)
ST7789_MADCTL_RGB = const(0x00)
ST7789_RDID1 = const(0xDA)
ST7789_RDID2 = const(0xDB)
ST7789_RDID3 = const(0xDC)
ST7789_RDID4 = const(0xDD)
COLOR_MODE_65K = const(0x50)
COLOR_MODE_262K = const(0x60)
COLOR_MODE_12BIT = const(0x03)
COLOR_MODE_16BIT = const(0x05)
COLOR_MODE_18BIT = const(0x06)
COLOR_MODE_16M = const(0x07)
# Color definitions
BLACK = const(0x0000)
BLUE = const(0x001F)
RED = const(0xF800)
GREEN = const(0x07E0)
CYAN = const(0x07FF)
MAGENTA = const(0xF81F)
YELLOW = const(0xFFE0)
WHITE = const(0xFFFF)
_ENCODE_PIXEL = ">H"
_ENCODE_POS = ">HH"
_DECODE_PIXEL = ">BBB"
_BUFFER_SIZE = const(256)
_BIT7 = const(0x80)
_BIT6 = const(0x40)
_BIT5 = const(0x20)
_BIT4 = const(0x10)
_BIT3 = const(0x08)
_BIT2 = const(0x04)
_BIT1 = const(0x02)
_BIT0 = const(0x01)
def color565(red, green=0, blue=0):
"""
Convert red, green and blue values (0-255) into a 16-bit 565 encoding.
"""
try:
red, green, blue = red # see if the first var is a tuple/list
except TypeError:
pass
return (red & 0xf8) << 8 | (green & 0xfc) << 3 | blue >> 3
def _encode_pos(x, y):
"""Encode a postion into bytes."""
return struct.pack(_ENCODE_POS, x, y)
def _encode_pixel(color):
"""Encode a pixel color into bytes."""
return struct.pack(_ENCODE_PIXEL, color)
class ST7789():
def __init__(self, spi, width, height, reset, dc, cs=None, backlight=None,
xstart=-1, ystart=-1, rotation=0):
"""
Initialize display.
"""
if (width, height) != (240, 240) and (width, height) != (135, 240):
raise ValueError(
"Unsupported display. Only 240x240 and 135x240 are supported."
)
self._display_width = self.width = width
self._display_height = self.height = height
self.spi = spi
self.reset = reset
self.dc = dc
self.cs = cs
self.backlight = backlight
self._rotation = rotation % 4
self.xstart = xstart
self.ystart = ystart
self.spi.write(bytes(0xff))#
self.hard_reset()
self.soft_reset()
self.sleep_mode(False)
self._set_color_mode(COLOR_MODE_65K|COLOR_MODE_16BIT)
time.sleep_ms(50)
self.rotation(self._rotation)
self.inversion_mode(True)
time.sleep_ms(10)
self.write(ST7789_NORON)
time.sleep_ms(10)
if backlight is not None:
backlight.value(1)
self.fill(0)
self.write(ST7789_DISPON)
time.sleep_ms(500)
def write(self, command=None, data=None):
"""SPI write to the device: commands and data."""
if self.cs:
self.cs.off()
if command is not None:
self.dc.off()
self.spi.write(bytes([command]))
if data is not None:
self.dc.on()
self.spi.write(data)
if self.cs:
self.cs.on()
def hard_reset(self):
"""
Hard reset display.
"""
if self.cs:
self.cs.off()
if self.reset:
self.reset.on()
time.sleep_ms(50)
if self.reset:
self.reset.off()
time.sleep_ms(50)
if self.reset:
self.reset.on()
time.sleep_ms(150)
if self.cs:
self.cs.on()
def soft_reset(self):
"""
Soft reset display.
"""
self.write(ST7789_SWRESET)
time.sleep_ms(150)
def sleep_mode(self, value):
"""
Enable or disable display sleep mode.
Args:
value (bool): if True enable sleep mode. if False disable sleep
mode
"""
if value:
self.write(ST7789_SLPIN)
else:
self.write(ST7789_SLPOUT)
def inversion_mode(self, value):
"""
Enable or disable display inversion mode.
Args:
value (bool): if True enable inversion mode. if False disable
inversion mode
"""
if value:
self.write(ST7789_INVON)
else:
self.write(ST7789_INVOFF)
def _set_color_mode(self, mode):
"""
Set display color mode.
Args:
mode (int): color mode
COLOR_MODE_65K, COLOR_MODE_262K, COLOR_MODE_12BIT,
COLOR_MODE_16BIT, COLOR_MODE_18BIT, COLOR_MODE_16M
"""
self.write(ST7789_COLMOD, bytes([mode & 0x77]))
def rotation(self, rotation):
"""
Set display rotation.
Args:
rotation (int): 0-Portrait, 1-Landscape, 2-Inverted Portrait,
3-Inverted Landscape
"""
self._rotation = rotation % 4
if self._rotation == 0: # Portrait
madctl = ST7789_MADCTL_RGB
self.width = self._display_width
self.height = self._display_height
if self._display_width == 135:
self.xstart = 52
self.ystart = 40
elif self._rotation == 1: # Landscape
madctl = ST7789_MADCTL_MX | ST7789_MADCTL_MV | ST7789_MADCTL_RGB
self.width = self._display_height
self.height = self._display_width
if self._display_width == 135:
self.xstart = 40
self.ystart = 53
elif self._rotation == 2: # Inverted Portrait
madctl = ST7789_MADCTL_MX | ST7789_MADCTL_MY | ST7789_MADCTL_RGB
self.width = self._display_width
self.height = self._display_height
if self._display_width == 135:
self.xstart = 53
self.ystart = 40
else: # Inverted Landscape
madctl = ST7789_MADCTL_MV | ST7789_MADCTL_MY | ST7789_MADCTL_RGB
self.width = self._display_height
self.height = self._display_width
if self._display_width == 135:
self.xstart = 40
self.ystart = 52
self.write(ST7789_MADCTL, bytes([madctl]))
def _set_columns(self, start, end):
"""
Send CASET (column address set) command to display.
Args:
start (int): column start address
end (int): column end address
"""
if start <= end <= self.width:
self.write(ST7789_CASET, _encode_pos(
start+self.xstart, end + self.xstart))
def _set_rows(self, start, end):
"""
Send RASET (row address set) command to display.
Args:
start (int): row start address
end (int): row end address
"""
if start <= end <= self.height:
self.write(ST7789_RASET, _encode_pos(
start+self.ystart, end+self.ystart))
def set_window(self, x0, y0, x1, y1):
"""
Set window to column and row address.
Args:
x0 (int): column start address
y0 (int): row start address
x1 (int): column end address
y1 (int): row end address
"""
self._set_columns(x0, x1)
self._set_rows(y0, y1)
self.write(ST7789_RAMWR)
def vline(self, x, y, length, color):
"""
Draw vertical line at the given location and color.
Args:
x (int): x coordinate
Y (int): y coordinate
length (int): length of line
color (int): 565 encoded color
"""
self.fill_rect(x, y, 1, length, color)
def hline(self, x, y, length, color):
"""
Draw horizontal line at the given location and color.
Args:
x (int): x coordinate
Y (int): y coordinate
length (int): length of line
color (int): 565 encoded color
"""
self.fill_rect(x, y, length, 1, color)
def pixel(self, x, y, color):
"""
Draw a pixel at the given location and color.
Args:
x (int): x coordinate
Y (int): y coordinate
color (int): 565 encoded color
"""
self.set_window(x, y, x, y)
self.write(None, _encode_pixel(color))
def blit_buffer(self, buffer, x, y, width, height):
"""
Copy buffer to display at the given location.
Args:
buffer (bytes): Data to copy to display
x (int): Top left corner x coordinate
Y (int): Top left corner y coordinate
width (int): Width
height (int): Height
"""
self.set_window(x, y, x + width - 1, y + height - 1)
self.write(None, buffer)
def rect(self, x, y, w, h, color):
"""
Draw a rectangle at the given location, size and color.
Args:
x (int): Top left corner x coordinate
y (int): Top left corner y coordinate
width (int): Width in pixels
height (int): Height in pixels
color (int): 565 encoded color
"""
self.hline(x, y, w, color)
self.vline(x, y, h, color)
self.vline(x + w - 1, y, h, color)
self.hline(x, y + h - 1, w, color)
def fill_rect(self, x, y, width, height, color):
"""
Draw a rectangle at the given location, size and filled with color.
Args:
x (int): Top left corner x coordinate
y (int): Top left corner y coordinate
width (int): Width in pixels
height (int): Height in pixels
color (int): 565 encoded color
"""
self.set_window(x, y, x + width - 1, y + height - 1)
chunks, rest = divmod(width * height, _BUFFER_SIZE)
pixel = _encode_pixel(color)
self.dc.on()
if chunks:
data = pixel * _BUFFER_SIZE
for _ in range(chunks):
self.write(None, data)
if rest:
self.write(None, pixel * rest)
def fill(self, color):
"""
Fill the entire FrameBuffer with the specified color.
Args:
color (int): 565 encoded color
"""
self.fill_rect(0, 0, self.width, self.height, color)
def line(self, x0, y0, x1, y1, color):
"""
Draw a single pixel wide line starting at x0, y0 and ending at x1, y1.
Args:
x0 (int): Start point x coordinate
y0 (int): Start point y coordinate
x1 (int): End point x coordinate
y1 (int): End point y coordinate
color (int): 565 encoded color
"""
steep = abs(y1 - y0) > abs(x1 - x0)
if steep:
x0, y0 = y0, x0
x1, y1 = y1, x1
if x0 > x1:
x0, x1 = x1, x0
y0, y1 = y1, y0
dx = x1 - x0
dy = abs(y1 - y0)
err = dx // 2
if y0 < y1:
ystep = 1
else:
ystep = -1
while x0 <= x1:
if steep:
self.pixel(y0, x0, color)
else:
self.pixel(x0, y0, color)
err -= dy
if err < 0:
y0 += ystep
err += dx
x0 += 1
def vscrdef(self, tfa, vsa, bfa):
"""
Set Vertical Scrolling Definition.
To scroll a 135x240 display these values should be 40, 240, 40.
There are 40 lines above the display that are not shown followed by
240 lines that are shown followed by 40 more lines that are not shown.
You could write to these areas off display and scroll them into view by
changing the TFA, VSA and BFA values.
Args:
tfa (int): Top Fixed Area
vsa (int): Vertical Scrolling Area
bfa (int): Bottom Fixed Area
"""
struct.pack(">HHH", tfa, vsa, bfa)
self.write(ST7789_VSCRDEF, struct.pack(">HHH", tfa, vsa, bfa))
def vscsad(self, vssa):
"""
Set Vertical Scroll Start Address of RAM.
Defines which line in the Frame Memory will be written as the first
line after the last line of the Top Fixed Area on the display
Example:
for line in range(40, 280, 1):
tft.vscsad(line)
utime.sleep(0.01)
Args:
vssa (int): Vertical Scrolling Start Address
"""
self.write(ST7789_VSCSAD, struct.pack(">H", vssa))
def _text8(self, font, text, x0, y0, color=WHITE, background=BLACK):
"""
Internal method to write characters with width of 8 and
heights of 8 or 16.
Args:
font (module): font module to use
text (str): text to write
x0 (int): column to start drawing at
y0 (int): row to start drawing at
color (int): 565 encoded color to use for characters
background (int): 565 encoded color to use for background
"""
for char in text:
ch = ord(char)
if (font.FIRST <= ch < font.LAST
and x0+font.WIDTH <= self.width
and y0+font.HEIGHT <= self.height):
if font.HEIGHT == 8:
passes = 1
size = 8
each = 0
else:
passes = 2
size = 16
each = 8
for line in range(passes):
idx = (ch-font.FIRST)*size+(each*line)
buffer = struct.pack('>64H',
color if font.FONT[idx] & _BIT7 else background,
color if font.FONT[idx] & _BIT6 else background,
color if font.FONT[idx] & _BIT5 else background,
color if font.FONT[idx] & _BIT4 else background,
color if font.FONT[idx] & _BIT3 else background,
color if font.FONT[idx] & _BIT2 else background,
color if font.FONT[idx] & _BIT1 else background,
color if font.FONT[idx] & _BIT0 else background,
color if font.FONT[idx+1] & _BIT7 else background,
color if font.FONT[idx+1] & _BIT6 else background,
color if font.FONT[idx+1] & _BIT5 else background,
color if font.FONT[idx+1] & _BIT4 else background,
color if font.FONT[idx+1] & _BIT3 else background,
color if font.FONT[idx+1] & _BIT2 else background,
color if font.FONT[idx+1] & _BIT1 else background,
color if font.FONT[idx+1] & _BIT0 else background,
color if font.FONT[idx+2] & _BIT7 else background,
color if font.FONT[idx+2] & _BIT6 else background,
color if font.FONT[idx+2] & _BIT5 else background,
color if font.FONT[idx+2] & _BIT4 else background,
color if font.FONT[idx+2] & _BIT3 else background,
color if font.FONT[idx+2] & _BIT2 else background,
color if font.FONT[idx+2] & _BIT1 else background,
color if font.FONT[idx+2] & _BIT0 else background,
color if font.FONT[idx+3] & _BIT7 else background,
color if font.FONT[idx+3] & _BIT6 else background,
color if font.FONT[idx+3] & _BIT5 else background,
color if font.FONT[idx+3] & _BIT4 else background,
color if font.FONT[idx+3] & _BIT3 else background,
color if font.FONT[idx+3] & _BIT2 else background,
color if font.FONT[idx+3] & _BIT1 else background,
color if font.FONT[idx+3] & _BIT0 else background,
color if font.FONT[idx+4] & _BIT7 else background,
color if font.FONT[idx+4] & _BIT6 else background,
color if font.FONT[idx+4] & _BIT5 else background,
color if font.FONT[idx+4] & _BIT4 else background,
color if font.FONT[idx+4] & _BIT3 else background,
color if font.FONT[idx+4] & _BIT2 else background,
color if font.FONT[idx+4] & _BIT1 else background,
color if font.FONT[idx+4] & _BIT0 else background,
color if font.FONT[idx+5] & _BIT7 else background,
color if font.FONT[idx+5] & _BIT6 else background,
color if font.FONT[idx+5] & _BIT5 else background,
color if font.FONT[idx+5] & _BIT4 else background,
color if font.FONT[idx+5] & _BIT3 else background,
color if font.FONT[idx+5] & _BIT2 else background,
color if font.FONT[idx+5] & _BIT1 else background,
color if font.FONT[idx+5] & _BIT0 else background,
color if font.FONT[idx+6] & _BIT7 else background,
color if font.FONT[idx+6] & _BIT6 else background,
color if font.FONT[idx+6] & _BIT5 else background,
color if font.FONT[idx+6] & _BIT4 else background,
color if font.FONT[idx+6] & _BIT3 else background,
color if font.FONT[idx+6] & _BIT2 else background,
color if font.FONT[idx+6] & _BIT1 else background,
color if font.FONT[idx+6] & _BIT0 else background,
color if font.FONT[idx+7] & _BIT7 else background,
color if font.FONT[idx+7] & _BIT6 else background,
color if font.FONT[idx+7] & _BIT5 else background,
color if font.FONT[idx+7] & _BIT4 else background,
color if font.FONT[idx+7] & _BIT3 else background,
color if font.FONT[idx+7] & _BIT2 else background,
color if font.FONT[idx+7] & _BIT1 else background,
color if font.FONT[idx+7] & _BIT0 else background
)
self.blit_buffer(buffer, x0, y0+8*line, 8, 8)
x0 += 8
def _text16(self, font, text, x0, y0, color=WHITE, background=BLACK):
"""
Internal method to draw characters with width of 16 and heights of 16
or 32.
Args:
font (module): font module to use
text (str): text to write
x0 (int): column to start drawing at
y0 (int): row to start drawing at
color (int): 565 encoded color to use for characters
background (int): 565 encoded color to use for background
"""
for char in text:
ch = ord(char)
if (font.FIRST <= ch < font.LAST
and x0+font.WIDTH <= self.width
and y0+font.HEIGHT <= self.height):
if font.HEIGHT == 16:
passes = 2
size = 32
each = 16
else:
passes = 4
size = 64
each = 16
for line in range(passes):
idx = (ch-font.FIRST)*size+(each*line)
buffer = struct.pack('>128H',
color if font.FONT[idx] & _BIT7 else background,
color if font.FONT[idx] & _BIT6 else background,
color if font.FONT[idx] & _BIT5 else background,
color if font.FONT[idx] & _BIT4 else background,
color if font.FONT[idx] & _BIT3 else background,
color if font.FONT[idx] & _BIT2 else background,
color if font.FONT[idx] & _BIT1 else background,
color if font.FONT[idx] & _BIT0 else background,
color if font.FONT[idx+1] & _BIT7 else background,
color if font.FONT[idx+1] & _BIT6 else background,
color if font.FONT[idx+1] & _BIT5 else background,
color if font.FONT[idx+1] & _BIT4 else background,
color if font.FONT[idx+1] & _BIT3 else background,
color if font.FONT[idx+1] & _BIT2 else background,
color if font.FONT[idx+1] & _BIT1 else background,
color if font.FONT[idx+1] & _BIT0 else background,
color if font.FONT[idx+2] & _BIT7 else background,
color if font.FONT[idx+2] & _BIT6 else background,
color if font.FONT[idx+2] & _BIT5 else background,
color if font.FONT[idx+2] & _BIT4 else background,
color if font.FONT[idx+2] & _BIT3 else background,
color if font.FONT[idx+2] & _BIT2 else background,
color if font.FONT[idx+2] & _BIT1 else background,
color if font.FONT[idx+2] & _BIT0 else background,
color if font.FONT[idx+3] & _BIT7 else background,
color if font.FONT[idx+3] & _BIT6 else background,
color if font.FONT[idx+3] & _BIT5 else background,
color if font.FONT[idx+3] & _BIT4 else background,
color if font.FONT[idx+3] & _BIT3 else background,
color if font.FONT[idx+3] & _BIT2 else background,
color if font.FONT[idx+3] & _BIT1 else background,
color if font.FONT[idx+3] & _BIT0 else background,
color if font.FONT[idx+4] & _BIT7 else background,
color if font.FONT[idx+4] & _BIT6 else background,
color if font.FONT[idx+4] & _BIT5 else background,
color if font.FONT[idx+4] & _BIT4 else background,
color if font.FONT[idx+4] & _BIT3 else background,
color if font.FONT[idx+4] & _BIT2 else background,
color if font.FONT[idx+4] & _BIT1 else background,
color if font.FONT[idx+4] & _BIT0 else background,
color if font.FONT[idx+5] & _BIT7 else background,
color if font.FONT[idx+5] & _BIT6 else background,
color if font.FONT[idx+5] & _BIT5 else background,
color if font.FONT[idx+5] & _BIT4 else background,
color if font.FONT[idx+5] & _BIT3 else background,
color if font.FONT[idx+5] & _BIT2 else background,
color if font.FONT[idx+5] & _BIT1 else background,
color if font.FONT[idx+5] & _BIT0 else background,
color if font.FONT[idx+6] & _BIT7 else background,
color if font.FONT[idx+6] & _BIT6 else background,
color if font.FONT[idx+6] & _BIT5 else background,
color if font.FONT[idx+6] & _BIT4 else background,
color if font.FONT[idx+6] & _BIT3 else background,
color if font.FONT[idx+6] & _BIT2 else background,
color if font.FONT[idx+6] & _BIT1 else background,
color if font.FONT[idx+6] & _BIT0 else background,
color if font.FONT[idx+7] & _BIT7 else background,
color if font.FONT[idx+7] & _BIT6 else background,
color if font.FONT[idx+7] & _BIT5 else background,
color if font.FONT[idx+7] & _BIT4 else background,
color if font.FONT[idx+7] & _BIT3 else background,
color if font.FONT[idx+7] & _BIT2 else background,
color if font.FONT[idx+7] & _BIT1 else background,
color if font.FONT[idx+7] & _BIT0 else background,
color if font.FONT[idx+8] & _BIT7 else background,
color if font.FONT[idx+8] & _BIT6 else background,
color if font.FONT[idx+8] & _BIT5 else background,
color if font.FONT[idx+8] & _BIT4 else background,
color if font.FONT[idx+8] & _BIT3 else background,
color if font.FONT[idx+8] & _BIT2 else background,
color if font.FONT[idx+8] & _BIT1 else background,
color if font.FONT[idx+8] & _BIT0 else background,
color if font.FONT[idx+9] & _BIT7 else background,
color if font.FONT[idx+9] & _BIT6 else background,
color if font.FONT[idx+9] & _BIT5 else background,
color if font.FONT[idx+9] & _BIT4 else background,
color if font.FONT[idx+9] & _BIT3 else background,
color if font.FONT[idx+9] & _BIT2 else background,
color if font.FONT[idx+9] & _BIT1 else background,
color if font.FONT[idx+9] & _BIT0 else background,
color if font.FONT[idx+10] & _BIT7 else background,
color if font.FONT[idx+10] & _BIT6 else background,
color if font.FONT[idx+10] & _BIT5 else background,
color if font.FONT[idx+10] & _BIT4 else background,
color if font.FONT[idx+10] & _BIT3 else background,
color if font.FONT[idx+10] & _BIT2 else background,
color if font.FONT[idx+10] & _BIT1 else background,
color if font.FONT[idx+10] & _BIT0 else background,
color if font.FONT[idx+11] & _BIT7 else background,
color if font.FONT[idx+11] & _BIT6 else background,
color if font.FONT[idx+11] & _BIT5 else background,
color if font.FONT[idx+11] & _BIT4 else background,
color if font.FONT[idx+11] & _BIT3 else background,
color if font.FONT[idx+11] & _BIT2 else background,
color if font.FONT[idx+11] & _BIT1 else background,
color if font.FONT[idx+11] & _BIT0 else background,
color if font.FONT[idx+12] & _BIT7 else background,
color if font.FONT[idx+12] & _BIT6 else background,
color if font.FONT[idx+12] & _BIT5 else background,
color if font.FONT[idx+12] & _BIT4 else background,
color if font.FONT[idx+12] & _BIT3 else background,
color if font.FONT[idx+12] & _BIT2 else background,
color if font.FONT[idx+12] & _BIT1 else background,
color if font.FONT[idx+12] & _BIT0 else background,
color if font.FONT[idx+13] & _BIT7 else background,
color if font.FONT[idx+13] & _BIT6 else background,
color if font.FONT[idx+13] & _BIT5 else background,
color if font.FONT[idx+13] & _BIT4 else background,
color if font.FONT[idx+13] & _BIT3 else background,
color if font.FONT[idx+13] & _BIT2 else background,
color if font.FONT[idx+13] & _BIT1 else background,
color if font.FONT[idx+13] & _BIT0 else background,
color if font.FONT[idx+14] & _BIT7 else background,
color if font.FONT[idx+14] & _BIT6 else background,
color if font.FONT[idx+14] & _BIT5 else background,
color if font.FONT[idx+14] & _BIT4 else background,
color if font.FONT[idx+14] & _BIT3 else background,
color if font.FONT[idx+14] & _BIT2 else background,
color if font.FONT[idx+14] & _BIT1 else background,
color if font.FONT[idx+14] & _BIT0 else background,
color if font.FONT[idx+15] & _BIT7 else background,
color if font.FONT[idx+15] & _BIT6 else background,
color if font.FONT[idx+15] & _BIT5 else background,
color if font.FONT[idx+15] & _BIT4 else background,
color if font.FONT[idx+15] & _BIT3 else background,
color if font.FONT[idx+15] & _BIT2 else background,
color if font.FONT[idx+15] & _BIT1 else background,
color if font.FONT[idx+15] & _BIT0 else background
)
self.blit_buffer(buffer, x0, y0+8*line, 16, 8)
x0 += font.WIDTH
def text(self, font, text, x0, y0, color=WHITE, background=BLACK):
"""
Draw text on display in specified font and colors. 8 and 16 bit wide
fonts are supported.
Args:
font (module): font module to use.
text (str): text to write
x0 (int): column to start drawing at
y0 (int): row to start drawing at
color (int): 565 encoded color to use for characters
background (int): 565 encoded color to use for background
"""
if font.WIDTH == 8:
self._text8(font, text, x0, y0, color, background)
else:
self._text16(font, text, x0, y0, color, background)
| 41.291391
| 79
| 0.527057
|
ab1ad29b0492945e5de819d81e7c9dc2b59780dd
| 623
|
py
|
Python
|
files/get_files.py
|
SitwalaM/aws_fitbit_monitor
|
9827c27a58eb47dfa8d5def952991171f8939796
|
[
"MIT"
] | null | null | null |
files/get_files.py
|
SitwalaM/aws_fitbit_monitor
|
9827c27a58eb47dfa8d5def952991171f8939796
|
[
"MIT"
] | null | null | null |
files/get_files.py
|
SitwalaM/aws_fitbit_monitor
|
9827c27a58eb47dfa8d5def952991171f8939796
|
[
"MIT"
] | null | null | null |
import json
#import datetime
#import requests
#import numpy as np
import boto3
# aws services clients
s3 = boto3.client("s3")
s3_resource = boto3.resource('s3')
bucket = "bucketfitbit"
def get_s3_files_list():
my_bucket = s3_resource.Bucket(bucket)
files_in_bucket = []
files_in_bucket = [files_in_bucket.key for files_in_bucket in my_bucket.objects.all()]
iterator = files_in_bucket
for file in iterator:
if file[-4:] != "json":
files_in_bucket.remove(file)
return files_in_bucket
files = get_s3_files_list()
for file in files:
s3.download_file(bucket,file, file)
| 20.766667
| 90
| 0.70947
|
7bd2a321824acb0213bd30edfb205413559a8d80
| 2,518
|
py
|
Python
|
pyquil/experiment/tests/test_setting.py
|
kalzoo/pyquil
|
f37d55acb906a02c0f3320ee3990e9051ee64145
|
[
"Apache-2.0"
] | null | null | null |
pyquil/experiment/tests/test_setting.py
|
kalzoo/pyquil
|
f37d55acb906a02c0f3320ee3990e9051ee64145
|
[
"Apache-2.0"
] | null | null | null |
pyquil/experiment/tests/test_setting.py
|
kalzoo/pyquil
|
f37d55acb906a02c0f3320ee3990e9051ee64145
|
[
"Apache-2.0"
] | null | null | null |
import functools
from operator import mul
import numpy as np
import pytest
from pyquil.experiment._setting import (ExperimentSetting, SIC0, SIC1, SIC2, SIC3,
TensorProductState, minusX, minusY, minusZ, plusX, plusY,
plusZ, zeros_state)
from pyquil.paulis import sI, sX, sY, sZ
def _generate_random_states(n_qubits, n_terms):
oneq_states = [SIC0, SIC1, SIC2, SIC3, plusX, minusX, plusY, minusY, plusZ, minusZ]
all_s_inds = np.random.randint(len(oneq_states), size=(n_terms, n_qubits))
states = []
for s_inds in all_s_inds:
state = functools.reduce(mul, (oneq_states[pi](i) for i, pi in enumerate(s_inds)),
TensorProductState([]))
states += [state]
return states
def _generate_random_paulis(n_qubits, n_terms):
paulis = [sI, sX, sY, sZ]
all_op_inds = np.random.randint(len(paulis), size=(n_terms, n_qubits))
operators = []
for op_inds in all_op_inds:
op = functools.reduce(mul, (paulis[pi](i) for i, pi in enumerate(op_inds)), sI(0))
op *= np.random.uniform(-1, 1)
operators += [op]
return operators
def test_experiment_setting():
in_states = _generate_random_states(n_qubits=4, n_terms=7)
out_ops = _generate_random_paulis(n_qubits=4, n_terms=7)
for ist, oop in zip(in_states, out_ops):
expt = ExperimentSetting(ist, oop)
assert str(expt) == expt.serializable()
expt2 = ExperimentSetting.from_str(str(expt))
assert expt == expt2
assert expt2.in_state == ist
assert expt2.out_operator == oop
@pytest.mark.filterwarnings("ignore:ExperimentSetting")
def test_setting_no_in_back_compat():
out_ops = _generate_random_paulis(n_qubits=4, n_terms=7)
for oop in out_ops:
expt = ExperimentSetting(TensorProductState(), oop)
expt2 = ExperimentSetting.from_str(str(expt))
assert expt == expt2
assert expt2.in_operator == sI()
assert expt2.out_operator == oop
@pytest.mark.filterwarnings("ignore:ExperimentSetting")
def test_setting_no_in():
out_ops = _generate_random_paulis(n_qubits=4, n_terms=7)
for oop in out_ops:
expt = ExperimentSetting(zeros_state(oop.get_qubits()), oop)
expt2 = ExperimentSetting.from_str(str(expt))
assert expt == expt2
assert expt2.in_operator == functools.reduce(mul, [sZ(q) for q in oop.get_qubits()], sI())
assert expt2.out_operator == oop
| 37.58209
| 98
| 0.661239
|
90da83289c9fbb1ae258d70fe268a50e058698d4
| 7,144
|
py
|
Python
|
src/paths/long/scripts/filter_vcfs.py
|
bayolau/discovar
|
9e472aca13670e40ab2234b89c8afd64875c58bf
|
[
"MIT"
] | null | null | null |
src/paths/long/scripts/filter_vcfs.py
|
bayolau/discovar
|
9e472aca13670e40ab2234b89c8afd64875c58bf
|
[
"MIT"
] | null | null | null |
src/paths/long/scripts/filter_vcfs.py
|
bayolau/discovar
|
9e472aca13670e40ab2234b89c8afd64875c58bf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###############################################################################
## SOFTWARE COPYRIGHT NOTICE AGREEMENT ##
## This software and its documentation are copyright (2013) by the ##
## Broad Institute. All rights are reserved. This software is supplied ##
## without any warranty or guaranteed support whatsoever. The Broad ##
## Institute is not responsible for its use, misuse, or functionality. ##
###############################################################################
# filter_vcfs - filter variant lines where phred score in either REFP or
# ALTP exceeds a threshold.
#
# The following rules apply:
#
# - already filtered lines are left alone (lines not marked as "PASS" or
# ".")
#
# - lines that are "PASS" or "." with a phred score for any element in
# ALTP or REFP which is >0 AND <threshold are marked as filtered (e.g.
# "PhredFilter10" for lines filtered with a threshold of 10.
#
# - lines that are "." and pass the phred threshold are marked as
# "PASS"
#
import sys
import argparse
import math
import os
def filter_vcf( input, output ):
debug = False
phred90 = int(-10.0*math.log10( 1.0-0.9 ))
phred995 = int(-10.0*math.log10( 1.0-0.995 ))
print "input={}, output={}, phred90={}, phred995={}".format(input, output, phred90, phred995)
# new filters must be added below as symbol=("name","description")
# this is just to enforce consistency between what's in the head and
# in the body of the vcf
filters = dict(
Filter090=( "PhredFilter{}".format(phred90), "Some allele was below prob 0.90 = phred {}".format(phred90) ),
NoAllelesPass0995=( \
"NoAllelesPassPhred{}".format(phred995), "Not one allele was above prob 0.995 = phred {}".format(phred995) ),
RefCallOnly=( "RefCallOnly", "Non-variant line; only the reference was called" ),
TooManyCalls=( "TooManyCalls", "Multi-allelic site (greater than two alleles)" )
)
with open(output, 'w') as fd:
for line in open(input, 'r'):
if line[0] == '#':
if line[:len("#CHROM")] == "#CHROM":
# dump out the ##FILTER lines before the #CHROM
for id, descr in filters.viewvalues():
fd.write("##FILTER=<ID={id},Description=\"{descr}\">\n".format( id=id, descr=descr ))
fd.write(line)
else:
debug_line = False
fields=line.split()
(chrom,pos,id,ref,alt,qual,filter,info,format)=fields[:9]
samples = fields[9:]
# don't filter unless we're already passing or not yet filtered
if filter == '.' or filter == 'PASS':
names = format.split(':')
if (not "REFP" in names) or (not "ALTP" in names):
raise Exception("missing REFP and ALTP tags in line {}".format(line.strip() ) )
new_samples=[]
new_filters=set()
for sample in samples:
vals = sample.split(':')
if len(vals) != len(names):
raise Exception("sample values {} doesn't match format {} for line {}".format(
sample, format, line.strip()))
sample_info = dict( zip( names, vals ) )
probs = [ float( sample_info['REFP'] ) ]
probs.extend( map(float, sample_info['ALTP'].split(',') ) )
if True in map( lambda x: x>0 and x<phred90, probs ):
new_samples.append( sample )
new_filters.add( filters['Filter090'][0])
continue
calls = map( lambda x: x > phred995, probs )
if sum(calls) == 0:
new_samples.append( sample )
new_filters.add( filters['NoAllelesPass0995'][0] )
continue
if sum(calls) == 1 and calls[0] == True:
new_samples.append( sample )
new_filters.add( filters['RefCallOnly'][0] )
continue
if sum(calls) > 2:
new_samples.append( sample )
new_filters.add( filters['TooManyCalls'][0] )
continue
allele1 = calls.index(True)
if sum(calls) == 1:
allele2 = allele1
else:
allele2 = calls.index(True, allele1+1)
gt_idx = names.index('GT')
old_gt = vals[gt_idx]
vals[gt_idx] = '{}/{}'.format(allele1, allele2)
if debug and old_gt != vals[gt_idx]:
debug_line = True
new_samples.append( ':'.join(vals) )
new_filters.add( 'PASS' )
# okay, now re-write the filter tag based on
# "new_filters"
# RefCallOnly doesn't matter if there are other objections
if len(new_filters) > 1: new_filters.discard( filters['RefCallOnly'][0] )
# PASS only counts if there are no other objections
# (besides RefCallOnly removed above)
if len(new_filters) > 1: new_filters.discard('PASS')
filter= ",".join( new_filters )
else: # re-using previous filter becase line wasn't . or PASS
new_samples = samples
if debug_line:
print "re-wrote genotypes:"
print "\told line:"
print "\t",line
print "\tnew genotypes:"
print "\t","\t".join( new_samples )
print "\n"
fd.write( "\t".join( [chrom, pos, id, ref, alt, qual, filter, info, format] ) )
fd.write( "\t" )
fd.write( "\t".join( new_samples ) )
fd.write( "\n" )
def main( argv=[__name__] ):
parser=argparse.ArgumentParser(description='filter Discovar-generated VCF based on REFP/ALTP')
parser.add_argument( '-o', '--output', help='VCF output file', required=True)
parser.add_argument( 'input', help='VCF input file' )
args = parser.parse_args(argv[1:])
if os.path.exists( args.output ):
print >>sys.stderr, \
"Output file {} already exists. Move it out of the way first.".format( args.output )
return 1
try:
return(filter_vcf( args.input, args.output ) )
except:
print >>sys.stderr,"removing partial output file {}".format( args.output )
os.unlink(args.output)
raise
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 41.534884
| 121
| 0.496361
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.