blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
adf0cfbbc71169a4d6fecca777b0ca1f2df1a9b5
|
c17635a2b05a96ea75d3b7dc76b63e1326606b64
|
/test/pexpect/__init__.py
|
dcf360f9dd8485a50f63f6111081d921d207fa0c
|
[] |
no_license
|
HamzaAnis/SimpleShell
|
8efde65c624f4024a9944cdce4cd66600a85cb99
|
afc642c2a4e029332ea535c9b0d445027c9f3d21
|
refs/heads/master
| 2020-03-28T19:21:45.654387
| 2018-09-19T15:50:28
| 2018-09-19T15:50:28
| 148,967,947
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87,780
|
py
|
'''Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to the Pexpect system; these are the function,
run() and the class, spawn. The spawn class is more powerful. The run()
function is simpler than spawn, and is good for quickly calling program. When
you call the run() function it executes a given program and then returns the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The spawn class is the more powerful interface to the Pexpect system. You can
use this to spawn a child program then interact with it by sending input and
expecting responses (waiting for patterns in the child's output).
For example::
child = pexpect.spawn('scp foo user@example.com:.')
child.expect('Password:')
child.sendline(mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams. For example, ssh reads input directly from the TTY
device which bypasses stdin.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
Spiegel, Jan Grant, and Shane Kerr. Let me know if I forgot anyone.
Pexpect is free, open source, and all that good stuff.
http://pexpect.sourceforge.net/
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
try:
import os
import sys
import time
import select
import re
import struct
import resource
import types
import pty
import tty
import termios
import fcntl
import errno
import traceback
import signal
import codecs
import stat
except ImportError: # pragma: no cover
err = sys.exc_info()[1]
raise ImportError(str(err) + '''
A critical module was not found. Probably this operating system does not
support it. Pexpect is intended for UNIX-like operating systems.''')
__version__ = '3.3'
__revision__ = ''
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnu', 'run', 'runu',
'which', 'split_command_line', '__version__', '__revision__']
PY3 = (sys.version_info[0] >= 3)
# Exception classes used by this module.
class ExceptionPexpect(Exception):
'''Base class for all exceptions raised by this module.
'''
def __init__(self, value):
super(ExceptionPexpect, self).__init__(value)
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
'''This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. '''
tblist = traceback.extract_tb(sys.exc_info()[2])
tblist = [item for item in tblist if 'pexpect/__init__' not in item[0]]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
class EOF(ExceptionPexpect):
'''Raised when EOF is read from a child.
This usually means the child has exited.'''
class TIMEOUT(ExceptionPexpect):
'''Raised when a read time exceeds the timeout. '''
##class TIMEOUT_PATTERN(TIMEOUT):
## '''Raised when the pattern match time exceeds the timeout.
## This is different than a read TIMEOUT because the child process may
## give output, thus never give a TIMEOUT, but the output
## may never match a pattern.
## '''
##class MAXBUFFER(ExceptionPexpect):
## '''Raised when a buffer fills before matching an expected pattern.'''
def run(command, timeout=-1, withexitstatus=False, events=None,
extra_args=None, logfile=None, cwd=None, env=None):
'''
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudottys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo user@example.com:.')
child.expect('(?i)password')
child.sendline(mypassword)
The previous code can be replace with the following::
from pexpect import *
run('scp foo user@example.com:.', events={'(?i)password': mypassword})
**Examples**
Start the apache daemon on the local machine::
from pexpect import *
run("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run('ls -l /bin', withexitstatus=1)
The following will run SSH and execute 'ls -l' on the remote machine. The
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
run("ssh username@machine.example.com 'ls -l'",
events={'(?i)password':'secret\\n'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be a dictionary of patterns and responses.
Whenever one of the patterns is seen in the command out run() will send the
associated response string. Note that you should put newlines in your
string if Enter is necessary. The responses may also contain callback
functions. Any callback is function that takes a dictionary as an argument.
The dictionary contains all the locals from the run() function, so you can
access the child spawn object or any other variable defined in run()
(event_count, child, and extra_args are the most useful). A callback may
return True to stop the current run process otherwise run() continues until
the next event. A callback may also return a string which will be sent to
the child. 'extra_args' is not used by directly run(). It provides a way to
pass data to a callback function through run() through the locals
dictionary passed to a callback.
'''
return _run(command, timeout=timeout, withexitstatus=withexitstatus,
events=events, extra_args=extra_args, logfile=logfile, cwd=cwd,
env=env, _spawn=spawn)
def runu(command, timeout=-1, withexitstatus=False, events=None,
extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
"""This offers the same interface as :func:`run`, but using unicode.
Like :class:`spawnu`, you can pass ``encoding`` and ``errors`` parameters,
which will be used for both input and output.
"""
return _run(command, timeout=timeout, withexitstatus=withexitstatus,
events=events, extra_args=extra_args, logfile=logfile, cwd=cwd,
env=env, _spawn=spawnu, **kwargs)
def _run(command, timeout, withexitstatus, events, extra_args, logfile, cwd,
env, _spawn, **kwargs):
if timeout == -1:
child = _spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env,
**kwargs)
else:
child = _spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
cwd=cwd, env=env, **kwargs)
if events is not None:
patterns = list(events.keys())
responses = list(events.values())
else:
# This assumes EOF or TIMEOUT will eventually cause run to terminate.
patterns = None
responses = None
child_result_list = []
event_count = 0
while True:
try:
index = child.expect(patterns)
if isinstance(child.after, child.allowed_string_types):
child_result_list.append(child.before + child.after)
else:
# child.after may have been a TIMEOUT or EOF,
# which we don't want appended to the list.
child_result_list.append(child.before)
if isinstance(responses[index], child.allowed_string_types):
child.send(responses[index])
elif isinstance(responses[index], types.FunctionType):
callback_result = responses[index](locals())
sys.stdout.flush()
if isinstance(callback_result, child.allowed_string_types):
child.send(callback_result)
elif callback_result:
break
else:
raise TypeError('The callback must be a string or function.')
event_count = event_count + 1
except TIMEOUT:
child_result_list.append(child.before)
break
except EOF:
child_result_list.append(child.before)
break
child_result = child.string_type().join(child_result_list)
if withexitstatus:
child.close()
return (child_result, child.exitstatus)
else:
return child_result
class spawn(object):
'''This is the main class interface for Pexpect. Use this class to start
and control child applications. '''
string_type = bytes
if PY3:
allowed_string_types = (bytes, str)
@staticmethod
def _chr(c):
return bytes([c])
linesep = os.linesep.encode('ascii')
crlf = '\r\n'.encode('ascii')
@staticmethod
def write_to_stdout(b):
try:
return sys.stdout.buffer.write(b)
except AttributeError:
# If stdout has been replaced, it may not have .buffer
return sys.stdout.write(b.decode('ascii', 'replace'))
else:
allowed_string_types = (basestring,) # analysis:ignore
_chr = staticmethod(chr)
linesep = os.linesep
crlf = '\r\n'
write_to_stdout = sys.stdout.write
encoding = None
def __init__(self, command, args=[], timeout=30, maxread=2000,
searchwindowsize=None, logfile=None, cwd=None, env=None,
ignore_sighup=True, echo=True):
'''This is the constructor. The command parameter may be a string that
includes a command and any arguments to the command. For example::
child = pexpect.spawn('/usr/bin/ftp')
child = pexpect.spawn('/usr/bin/ssh user@example.com')
child = pexpect.spawn('ls -latr /tmp')
You may also construct it with a list of arguments like so::
child = pexpect.spawn('/usr/bin/ftp', [])
child = pexpect.spawn('/usr/bin/ssh', ['user@example.com'])
child = pexpect.spawn('ls', ['-latr', '/tmp'])
After this the child application will be created and will be ready to
talk to. For normal use, see expect() and send() and sendline().
Remember that Pexpect does NOT interpret shell meta characters such as
redirect, pipe, or wild cards (``>``, ``|``, or ``*``). This is a
common mistake. If you want to run a command and pipe it through
another command then you must also start a shell. For example::
child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"')
child.expect(pexpect.EOF)
The second form of spawn (where you pass a list of arguments) is useful
in situations where you wish to spawn a command and pass it its own
argument list. This can make syntax more clear. For example, the
following is equivalent to the previous example::
shell_cmd = 'ls -l | grep LOG > logs.txt'
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
child.expect(pexpect.EOF)
The maxread attribute sets the read buffer size. This is maximum number
of bytes that Pexpect will try to read from a TTY at one time. Setting
the maxread size to 1 will turn off buffering. Setting the maxread
value higher may help performance in cases where large amounts of
output are read back from the child. This feature is useful in
conjunction with searchwindowsize.
The searchwindowsize attribute sets the how far back in the incoming
seach buffer Pexpect will search for pattern matches. Every time
Pexpect reads some data from the child it will append the data to the
incoming buffer. The default is to search from the beginning of the
incoming buffer each time new data is read from the child. But this is
very inefficient if you are running a command that generates a large
amount of data where you want to match. The searchwindowsize does not
affect the size of the incoming data buffer. You will still have
access to the full buffer after expect() returns.
The logfile member turns on or off logging. All input and output will
be copied to the given file object. Set logfile to None to stop
logging. This is the default. Set logfile to sys.stdout to echo
everything to standard output. The logfile is flushed after each write.
Example log input and output to a file::
child = pexpect.spawn('some_command')
fout = file('mylog.txt','w')
child.logfile = fout
Example log to stdout::
child = pexpect.spawn('some_command')
child.logfile = sys.stdout
The logfile_read and logfile_send members can be used to separately log
the input from the child and output sent to the child. Sometimes you
don't want to see everything you write to the child. You only want to
log what the child sends back. For example::
child = pexpect.spawn('some_command')
child.logfile_read = sys.stdout
To separately log output sent to the child use logfile_send::
self.logfile_send = fout
If ``ignore_sighup`` is True, the child process will ignore SIGHUP
signals. For now, the default is True, to preserve the behaviour of
earlier versions of Pexpect, but you should pass this explicitly if you
want to rely on it.
The delaybeforesend helps overcome a weird behavior that many users
were experiencing. The typical problem was that a user would expect() a
"Password:" prompt and then immediately call sendline() to send the
password. The user would then see that their password was echoed back
to them. Passwords don't normally echo. The problem is caused by the
fact that most applications print out the "Password" prompt and then
turn off stdin echo, but if you send your password before the
application turned off echo, then you get your password echoed.
Normally this wouldn't be a problem when interacting with a human at a
real keyboard. If you introduce a slight delay just before writing then
this seems to clear up the problem. This was such a common problem for
many users that I decided that the default pexpect behavior should be
to sleep just before writing to the child application. 1/20th of a
second (50 ms) seems to be enough to clear up the problem. You can set
delaybeforesend to 0 to return to the old behavior. Most Linux machines
don't like this to be below 0.03. I don't know why.
Note that spawn is clever about finding commands on your path.
It uses the same logic that "which" uses to find executables.
If you wish to get the exit status of the child you must call the
close() method. The exit or signal status of the child will be stored
in self.exitstatus or self.signalstatus. If the child exited normally
then exitstatus will store the exit return code and signalstatus will
be None. If the child was terminated abnormally with a signal then
signalstatus will store the signal value and exitstatus will be None.
If you need more detail you can also read the self.status member which
stores the status returned by os.waitpid. You can interpret this using
os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG.
The echo attribute may be set to False to disable echoing of input.
As a pseudo-terminal, all input echoed by the "keyboard" (send()
or sendline()) will be repeated to output. For many cases, it is
not desirable to have echo enabled, and it may be later disabled
using setecho(False) followed by waitnoecho(). However, for some
platforms such as Solaris, this is not possible, and should be
disabled immediately on spawn.
'''
self.STDIN_FILENO = pty.STDIN_FILENO
self.STDOUT_FILENO = pty.STDOUT_FILENO
self.STDERR_FILENO = pty.STDERR_FILENO
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.searcher = None
self.ignorecase = False
self.before = None
self.after = None
self.match = None
self.match_index = None
self.terminated = True
self.exitstatus = None
self.signalstatus = None
# status returned by os.waitpid
self.status = None
self.flag_eof = False
self.pid = None
# the child file descriptor is initially closed
self.child_fd = -1
self.timeout = timeout
self.delimiter = EOF
self.logfile = logfile
# input from child (read_nonblocking)
self.logfile_read = None
# output to send (send, sendline)
self.logfile_send = None
# max bytes to read at one time into buffer
self.maxread = maxread
# This is the read buffer. See maxread.
self.buffer = self.string_type()
# Data before searchwindowsize point is preserved, but not searched.
self.searchwindowsize = searchwindowsize
# Delay used before sending data to child. Time in seconds.
# Most Linux machines don't like this to be below 0.03 (30 ms).
self.delaybeforesend = 0.05
# Used by close() to give kernel time to update process status.
# Time in seconds.
self.delayafterclose = 0.1
# Used by terminate() to give kernel time to update process status.
# Time in seconds.
self.delayafterterminate = 0.1
self.softspace = False
self.name = '<' + repr(self) + '>'
self.closed = True
self.cwd = cwd
self.env = env
self.echo = echo
self.ignore_sighup = ignore_sighup
_platform = sys.platform.lower()
# This flags if we are running on irix
self.__irix_hack = _platform.startswith('irix')
# Solaris uses internal __fork_pty(). All others use pty.fork().
self.use_native_pty_fork = not (
_platform.startswith('solaris') or
_platform.startswith('sunos'))
# inherit EOF and INTR definitions from controlling process.
try:
from termios import VEOF, VINTR
fd = sys.__stdin__.fileno()
self._INTR = ord(termios.tcgetattr(fd)[6][VINTR])
self._EOF = ord(termios.tcgetattr(fd)[6][VEOF])
except (ImportError, OSError, IOError, termios.error):
# unless the controlling process is also not a terminal,
# such as cron(1). Fall-back to using CEOF and CINTR.
try:
from termios import CEOF, CINTR
(self._INTR, self._EOF) = (CINTR, CEOF)
except ImportError:
# ^C, ^D
(self._INTR, self._EOF) = (3, 4)
# Support subclasses that do not use command or args.
if command is None:
self.command = None
self.args = None
self.name = '<pexpect factory incomplete>'
else:
self._spawn(command, args)
@staticmethod
def _coerce_expect_string(s):
if not isinstance(s, bytes):
return s.encode('ascii')
return s
@staticmethod
def _coerce_send_string(s):
if not isinstance(s, bytes):
return s.encode('utf-8')
return s
@staticmethod
def _coerce_read_string(s):
return s
def __del__(self):
'''This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it. '''
if not self.closed:
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
try:
self.close()
# which exception, shouldnt' we catch explicitly .. ?
except:
pass
def __str__(self):
'''This returns a human-readable string that represents the state of
the object. '''
s = []
s.append(repr(self))
s.append('version: ' + __version__)
s.append('command: ' + str(self.command))
s.append('args: %r' % (self.args,))
s.append('searcher: %r' % (self.searcher,))
s.append('buffer (last 100 chars): %r' % (self.buffer)[-100:],)
s.append('before (last 100 chars): %r' % (self.before)[-100:],)
s.append('after: %r' % (self.after,))
s.append('match: %r' % (self.match,))
s.append('match_index: ' + str(self.match_index))
s.append('exitstatus: ' + str(self.exitstatus))
s.append('flag_eof: ' + str(self.flag_eof))
s.append('pid: ' + str(self.pid))
s.append('child_fd: ' + str(self.child_fd))
s.append('closed: ' + str(self.closed))
s.append('timeout: ' + str(self.timeout))
s.append('delimiter: ' + str(self.delimiter))
s.append('logfile: ' + str(self.logfile))
s.append('logfile_read: ' + str(self.logfile_read))
s.append('logfile_send: ' + str(self.logfile_send))
s.append('maxread: ' + str(self.maxread))
s.append('ignorecase: ' + str(self.ignorecase))
s.append('searchwindowsize: ' + str(self.searchwindowsize))
s.append('delaybeforesend: ' + str(self.delaybeforesend))
s.append('delayafterclose: ' + str(self.delayafterclose))
s.append('delayafterterminate: ' + str(self.delayafterterminate))
return '\n'.join(s)
def _spawn(self, command, args=[]):
'''This starts the given command in a child process. This does all the
fork/exec type of stuff for a pty. This is called by __init__. If args
is empty then command will be parsed (split on spaces) and args will be
set to parsed arguments. '''
# The pid and child_fd of this object get set by this method.
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may have spawned a child
# that performs some task; creates no stdout output; and then dies.
# If command is an int type then it may represent a file descriptor.
if isinstance(command, type(0)):
raise ExceptionPexpect('Command is an int type. ' +
'If this is a file descriptor then maybe you want to ' +
'use fdpexpect.fdspawn which takes an existing ' +
'file descriptor instead of a command string.')
if not isinstance(args, type([])):
raise TypeError('The argument, args, must be a list.')
if args == []:
self.args = split_command_line(command)
self.command = self.args[0]
else:
# Make a shallow copy of the args list.
self.args = args[:]
self.args.insert(0, command)
self.command = command
command_with_path = which(self.command)
if command_with_path is None:
raise ExceptionPexpect('The command was not found or was not ' +
'executable: %s.' % self.command)
self.command = command_with_path
self.args[0] = self.command
self.name = '<' + ' '.join(self.args) + '>'
assert self.pid is None, 'The pid member must be None.'
assert self.command is not None, 'The command member must not be None.'
if self.use_native_pty_fork:
try:
self.pid, self.child_fd = pty.fork()
except OSError: # pragma: no cover
err = sys.exc_info()[1]
raise ExceptionPexpect('pty.fork() failed: ' + str(err))
else:
# Use internal __fork_pty
self.pid, self.child_fd = self.__fork_pty()
# Some platforms must call setwinsize() and setecho() from the
# child process, and others from the master process. We do both,
# allowing IOError for either.
if self.pid == pty.CHILD:
# Child
self.child_fd = self.STDIN_FILENO
# set default window size of 24 rows by 80 columns
try:
self.setwinsize(24, 80)
except IOError as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
raise
# disable echo if spawn argument echo was unset
if not self.echo:
try:
self.setecho(self.echo)
except (IOError, termios.error) as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
raise
# Do not allow child to inherit open file descriptors from parent.
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
os.closerange(3, max_fd)
if self.ignore_sighup:
signal.signal(signal.SIGHUP, signal.SIG_IGN)
if self.cwd is not None:
os.chdir(self.cwd)
if self.env is None:
os.execv(self.command, self.args)
else:
os.execvpe(self.command, self.args, self.env)
# Parent
try:
self.setwinsize(24, 80)
except IOError as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
raise
self.terminated = False
self.closed = False
def __fork_pty(self):
'''This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
'''
parent_fd, child_fd = os.openpty()
if parent_fd < 0 or child_fd < 0:
raise ExceptionPexpect("Could not open with os.openpty().")
pid = os.fork()
if pid == pty.CHILD:
# Child.
os.close(parent_fd)
self.__pty_make_controlling_tty(child_fd)
os.dup2(child_fd, self.STDIN_FILENO)
os.dup2(child_fd, self.STDOUT_FILENO)
os.dup2(child_fd, self.STDERR_FILENO)
else:
# Parent.
os.close(child_fd)
return pid, parent_fd
def __pty_make_controlling_tty(self, tty_fd):
'''This makes the pseudo-terminal the controlling tty. This should be
more portable than the pty.fork() function. Specifically, this should
work on Solaris. '''
child_name = os.ttyname(tty_fd)
# Disconnect from controlling tty, if any. Raises OSError of ENXIO
# if there was no controlling tty to begin with, such as when
# executed by a cron(1) job.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
os.close(fd)
except OSError as err:
if err.errno != errno.ENXIO:
raise
os.setsid()
# Verify we are disconnected from controlling tty by attempting to open
# it again. We expect that OSError of ENXIO should always be raised.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
os.close(fd)
raise ExceptionPexpect("OSError of errno.ENXIO should be raised.")
except OSError as err:
if err.errno != errno.ENXIO:
raise
# Verify we can open child pty.
fd = os.open(child_name, os.O_RDWR)
os.close(fd)
# Verify we now have a controlling tty.
fd = os.open("/dev/tty", os.O_WRONLY)
os.close(fd)
def fileno(self):
'''This returns the file descriptor of the pty for the child.
'''
return self.child_fd
def close(self, force=True):
'''This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). '''
if not self.closed:
self.flush()
os.close(self.child_fd)
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect('Could not terminate the child.')
self.child_fd = -1
self.closed = True
#self.pid = None
def flush(self):
'''This does nothing. It is here to support the interface for a
File-like object. '''
pass
def isatty(self):
'''This returns True if the file descriptor is open and connected to a
tty(-like) device, else False.
On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
the child pty may not appear as a terminal device. This means
methods such as setecho(), setwinsize(), getwinsize() may raise an
IOError. '''
return os.isatty(self.child_fd)
def waitnoecho(self, timeout=-1):
'''This waits until the terminal ECHO flag is set False. This returns
True if the echo mode is off. This returns False if the ECHO flag was
not set False before the timeout. This can be used to detect when the
child is waiting for a password. Usually a child application will turn
off echo mode when it is waiting for the user to enter a password. For
example, instead of expecting the "password:" prompt you can wait for
the child to set ECHO off::
p = pexpect.spawn('ssh user@example.com')
p.waitnoecho()
p.sendline(mypassword)
If timeout==-1 then this method will use the value in self.timeout.
If timeout==None then this method to block until ECHO flag is False.
'''
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
while True:
if not self.getecho():
return True
if timeout < 0 and timeout is not None:
return False
if timeout is not None:
timeout = end_time - time.time()
time.sleep(0.1)
def getecho(self):
'''This returns the terminal echo mode. This returns True if echo is
on or False if echo is off. Child applications that are expecting you
to enter a password often set ECHO False. See waitnoecho().
Not supported on platforms where ``isatty()`` returns False. '''
try:
attr = termios.tcgetattr(self.child_fd)
except termios.error as err:
errmsg = 'getecho() may not be called on this platform'
if err.args[0] == errno.EINVAL:
raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
raise
self.echo = bool(attr[3] & termios.ECHO)
return self.echo
def setecho(self, state):
'''This sets the terminal echo mode on or off. Note that anything the
child sent before the echo will be lost, so you should be sure that
your input buffer is empty before you call setecho(). For example, the
following will work as expected::
p = pexpect.spawn('cat') # Echo is on by default.
p.sendline('1234') # We expect see this twice from the child...
p.expect(['1234']) # ... once from the tty echo...
p.expect(['1234']) # ... and again from cat itself.
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['abcd'])
p.expect(['wxyz'])
The following WILL NOT WORK because the lines sent before the setecho
will be lost::
p = pexpect.spawn('cat')
p.sendline('1234')
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['1234'])
p.expect(['1234'])
p.expect(['abcd'])
p.expect(['wxyz'])
Not supported on platforms where ``isatty()`` returns False.
'''
errmsg = 'setecho() may not be called on this platform'
try:
attr = termios.tcgetattr(self.child_fd)
except termios.error as err:
if err.args[0] == errno.EINVAL:
raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
raise
if state:
attr[3] = attr[3] | termios.ECHO
else:
attr[3] = attr[3] & ~termios.ECHO
try:
# I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent and
# blocked on some platforms. TCSADRAIN would probably be ideal.
termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
except IOError as err:
if err.args[0] == errno.EINVAL:
raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg))
raise
self.echo = state
def _log(self, s, direction):
if self.logfile is not None:
self.logfile.write(s)
self.logfile.flush()
second_log = self.logfile_send if (direction=='send') else self.logfile_read
if second_log is not None:
second_log.write(s)
second_log.flush()
def read_nonblocking(self, size=1, timeout=-1):
'''This reads at most size characters from the child application. It
includes a timeout. If the read does not complete within the timeout
period then a TIMEOUT exception is raised. If the end of file is read
then an EOF exception will be raised. If a log file was set using
setlog() then all data will also be written to the log file.
If timeout is None then the read may block indefinitely.
If timeout is -1 then the self.timeout value is used. If timeout is 0
then the child is polled and if there is no data immediately ready
then this will raise a TIMEOUT exception.
The timeout refers only to the amount of time to read at least one
character. This is not effected by the 'size' parameter, so if you call
read_nonblocking(size=100, timeout=30) and only one character is
available right away then one character will be returned immediately.
It will not wait for 30 seconds for another 99 characters to come in.
This is a wrapper around os.read(). It uses select.select() to
implement the timeout. '''
if self.closed:
raise ValueError('I/O operation on closed file.')
if timeout == -1:
timeout = self.timeout
# Note that some systems such as Solaris do not give an EOF when
# the child dies. In fact, you can still try to read
# from the child_fd -- it will block forever or until TIMEOUT.
# For this case, I test isalive() before doing any reading.
# If isalive() is false, then I pretend that this is the same as EOF.
if not self.isalive():
# timeout of 0 means "poll"
r, w, e = self.__select([self.child_fd], [], [], 0)
if not r:
self.flag_eof = True
raise EOF('End Of File (EOF). Braindead platform.')
elif self.__irix_hack:
# Irix takes a long time before it realizes a child was terminated.
# FIXME So does this mean Irix systems are forced to always have
# FIXME a 2 second delay when calling read_nonblocking? That sucks.
r, w, e = self.__select([self.child_fd], [], [], 2)
if not r and not self.isalive():
self.flag_eof = True
raise EOF('End Of File (EOF). Slow platform.')
r, w, e = self.__select([self.child_fd], [], [], timeout)
if not r:
if not self.isalive():
# Some platforms, such as Irix, will claim that their
# processes are alive; timeout on the select; and
# then finally admit that they are not alive.
self.flag_eof = True
raise EOF('End of File (EOF). Very slow platform.')
else:
raise TIMEOUT('Timeout exceeded.')
if self.child_fd in r:
try:
s = os.read(self.child_fd, size)
except OSError as err:
if err.args[0] == errno.EIO:
# Linux-style EOF
self.flag_eof = True
raise EOF('End Of File (EOF). Exception style platform.')
raise
if s == b'':
# BSD-style EOF
self.flag_eof = True
raise EOF('End Of File (EOF). Empty string style platform.')
s = self._coerce_read_string(s)
self._log(s, 'read')
return s
raise ExceptionPexpect('Reached an unexpected state.') # pragma: no cover
def read(self, size=-1):
'''This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. '''
if size == 0:
return self.string_type()
if size < 0:
# delimiter default is EOF
self.expect(self.delimiter)
return self.before
# I could have done this more directly by not using expect(), but
# I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistant behavior.
# It's a little less efficient, but there is less for me to
# worry about if I have to later modify read() or expect().
# Note, it's OK if size==-1 in the regex. That just means it
# will never match anything in which case we stop only on EOF.
cre = re.compile(self._coerce_expect_string('.{%d}' % size), re.DOTALL)
# delimiter default is EOF
index = self.expect([cre, self.delimiter])
if index == 0:
### FIXME self.before should be ''. Should I assert this?
return self.after
return self.before
def readline(self, size=-1):
'''This reads and returns one entire line. The newline at the end of
line is returned as part of the string, unless the file ends without a
newline. An empty string is returned if EOF is encountered immediately.
This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
this is what the pseudotty device returns. So contrary to what you may
expect you will receive newlines as \\r\\n.
If the size argument is 0 then an empty string is returned. In all
other cases the size argument is ignored, which is not standard
behavior for a file-like object. '''
if size == 0:
return self.string_type()
# delimiter default is EOF
index = self.expect([self.crlf, self.delimiter])
if index == 0:
return self.before + self.crlf
else:
return self.before
def __iter__(self):
'''This is to support iterators over a file-like object.
'''
return iter(self.readline, self.string_type())
def readlines(self, sizehint=-1):
'''This reads until EOF using readline() and returns a list containing
the lines thus read. The optional 'sizehint' argument is ignored.
Remember, because this reads until EOF that means the child
process should have closed its stdout. If you run this method on
a child that is still running with its stdout open then this
method will block until it timesout.'''
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def write(self, s):
'''This is similar to send() except that there is no return value.
'''
self.send(s)
def writelines(self, sequence):
'''This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators. There is no return value.
'''
for s in sequence:
self.write(s)
def send(self, s):
'''Sends string ``s`` to the child process, returning the number of
bytes written. If a logfile is specified, a copy is written to that
log. '''
time.sleep(self.delaybeforesend)
s = self._coerce_send_string(s)
self._log(s, 'send')
return self._send(s)
def _send(self, s):
return os.write(self.child_fd, s)
def sendline(self, s=''):
'''Wraps send(), sending string ``s`` to child process, with os.linesep
automatically appended. Returns number of bytes written. '''
n = self.send(s)
n = n + self.send(self.linesep)
return n
def sendcontrol(self, char):
'''Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof().
'''
char = char.lower()
a = ord(char)
if a >= 97 and a <= 122:
a = a - ord('a') + 1
return self.send(self._chr(a))
d = {'@': 0, '`': 0,
'[': 27, '{': 27,
'\\': 28, '|': 28,
']': 29, '}': 29,
'^': 30, '~': 30,
'_': 31,
'?': 127}
if char not in d:
return 0
return self.send(self._chr(d[char]))
def sendeof(self):
'''This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line. '''
self.send(self._chr(self._EOF))
def sendintr(self):
'''This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. '''
self.send(self._chr(self._INTR))
def eof(self):
'''This returns True if the EOF exception was ever raised.
'''
return self.flag_eof
def terminate(self, force=False):
'''This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. '''
if not self.isalive():
return True
try:
self.kill(signal.SIGHUP)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGCONT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
return False
except OSError:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
'''This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(), but, the child is
technically still alive until its output is read by the parent. '''
if self.isalive():
pid, status = os.waitpid(self.pid, 0)
else:
raise ExceptionPexpect('Cannot wait for dead child process.')
self.exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status): # pragma: no cover
# You can't call wait() on a child process in the stopped state.
raise ExceptionPexpect('Called wait() on a stopped child ' +
'process. This is not supported. Is some other ' +
'process attempting job control with our child pid?')
return self.exitstatus
def isalive(self):
'''This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. '''
if self.terminated:
return False
if self.flag_eof:
# This is for Linux, which requires the blocking form
# of waitpid to get the status of a defunct process.
# This is super-lame. The flag_eof would have been set
# in read_nonblocking(), so this should be safe.
waitpid_options = 0
else:
waitpid_options = os.WNOHANG
try:
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError:
err = sys.exc_info()[1]
# No child processes
if err.errno == errno.ECHILD:
raise ExceptionPexpect('isalive() encountered condition ' +
'where "terminated" is 0, but there was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise err
# I have to do this twice for Solaris.
# I can't even believe that I figured this out...
# If waitpid() returns 0 it means that no child process
# wishes to report, and the value of status is undefined.
if pid == 0:
try:
### os.WNOHANG) # Solaris!
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e: # pragma: no cover
# This should never happen...
if e.errno == errno.ECHILD:
raise ExceptionPexpect('isalive() encountered condition ' +
'that should never happen. There was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise
# If pid is still 0 after two calls to waitpid() then the process
# really is alive. This seems to work on all platforms, except for
# Irix which seems to require a blocking call on waitpid or select,
# so I let read_nonblocking take care of this situation
# (unfortunately, this requires waiting through the timeout).
if pid == 0:
return True
if pid == 0:
return True
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status):
raise ExceptionPexpect('isalive() encountered condition ' +
'where child process is stopped. This is not ' +
'supported. Is some other process attempting ' +
'job control with our child pid?')
return False
def kill(self, sig):
'''This sends the given signal to the child application. In keeping
with UNIX tradition it has a misleading name. It does not necessarily
kill the child unless you send the right signal. '''
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig)
def _pattern_type_err(self, pattern):
raise TypeError('got {badtype} ({badobj!r}) as pattern, must be one'
' of: {goodtypes}, pexpect.EOF, pexpect.TIMEOUT'\
.format(badtype=type(pattern),
badobj=pattern,
goodtypes=', '.join([str(ast)\
for ast in self.allowed_string_types])
)
)
def compile_pattern_list(self, patterns):
'''This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(clp, timeout)
...
'''
if patterns is None:
return []
if not isinstance(patterns, list):
patterns = [patterns]
# Allow dot to match \n
compile_flags = re.DOTALL
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for idx, p in enumerate(patterns):
if isinstance(p, self.allowed_string_types):
p = self._coerce_expect_string(p)
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif isinstance(p, type(re.compile(''))):
compiled_pattern_list.append(p)
else:
self._pattern_type_err(p)
return compiled_pattern_list
def expect(self, pattern, timeout=-1, searchwindowsize=-1):
'''This seeks through the stream until a pattern is matched. The
pattern is overloaded and may take several types. The pattern can be a
StringType, EOF, a compiled re, or a list of any of those types.
Strings will be compiled to re types. This returns the index into the
pattern list. If the pattern was not a list this returns index 0 on a
successful match. This may raise exceptions for EOF or TIMEOUT. To
avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
list. That will cause expect to match an EOF or TIMEOUT condition
instead of raising an exception.
If you pass a list of patterns and more than one matches, the first
match in the stream is chosen. If more than one pattern matches at that
point, the leftmost in the pattern list is chosen. For example::
# the input is 'foobar'
index = p.expect(['bar', 'foo', 'foobar'])
# returns 1('foo') even though 'foobar' is a "better" match
Please note, however, that buffering can affect this behavior, since
input arrives in unpredictable chunks. For example::
# the input is 'foobar'
index = p.expect(['foobar', 'foo'])
# returns 0('foobar') if all input is available at once,
# but returs 1('foo') if parts of the final 'bar' arrive late
After a match is found the instance attributes 'before', 'after' and
'match' will be set. You can see all the data read before the match in
'before'. You can see the data that was matched in 'after'. The
re.MatchObject used in the re match will be in 'match'. If an error
occurred then 'before' will be set to all the data read so far and
'after' and 'match' will be None.
If timeout is -1 then timeout will be set to the self.timeout value.
A list entry may be EOF or TIMEOUT instead of a string. This will
catch these exceptions and return the index of the list entry instead
of raising the exception. The attribute 'after' will be set to the
exception type. The attribute 'match' will be None. This allows you to
write code like this::
index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
do_something()
elif index == 1:
do_something_else()
elif index == 2:
do_some_other_thing()
elif index == 3:
do_something_completely_different()
instead of code like this::
try:
index = p.expect(['good', 'bad'])
if index == 0:
do_something()
elif index == 1:
do_something_else()
except EOF:
do_some_other_thing()
except TIMEOUT:
do_something_completely_different()
These two forms are equivalent. It all depends on what you want. You
can also just expect the EOF if you are waiting for all output of a
child to finish. For example::
p = pexpect.spawn('/bin/ls')
p.expect(pexpect.EOF)
print p.before
If you are trying to optimize for speed then see expect_list().
'''
compiled_pattern_list = self.compile_pattern_list(pattern)
return self.expect_list(compiled_pattern_list,
timeout, searchwindowsize)
def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1):
'''This takes a list of compiled regular expressions and returns the
index into the pattern_list that matched the child output. The list may
also contain EOF or TIMEOUT(which are not compiled regular
expressions). This method is similar to the expect() method except that
expect_list() does not recompile the pattern list on every call. This
may help if you are trying to optimize for speed, otherwise just use
the expect() method. This is called by expect(). If timeout==-1 then
the self.timeout value is used. If searchwindowsize==-1 then the
self.searchwindowsize value is used. '''
return self.expect_loop(searcher_re(pattern_list),
timeout, searchwindowsize)
def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1):
'''This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match.'''
if (isinstance(pattern_list, self.allowed_string_types) or
pattern_list in (TIMEOUT, EOF)):
pattern_list = [pattern_list]
def prepare_pattern(pattern):
if pattern in (TIMEOUT, EOF):
return pattern
if isinstance(pattern, self.allowed_string_types):
return self._coerce_expect_string(pattern)
self._pattern_type_err(pattern)
try:
pattern_list = iter(pattern_list)
except TypeError:
self._pattern_type_err(pattern_list)
pattern_list = [prepare_pattern(p) for p in pattern_list]
return self.expect_loop(searcher_string(pattern_list),
timeout, searchwindowsize)
def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
'''This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and
what to search for in the input.
See expect() for other arguments, return value and exceptions. '''
self.searcher = searcher
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
if searchwindowsize == -1:
searchwindowsize = self.searchwindowsize
try:
incoming = self.buffer
freshlen = len(incoming)
while True:
# Keep reading until exception or return.
index = searcher.search(incoming, freshlen, searchwindowsize)
if index >= 0:
self.buffer = incoming[searcher.end:]
self.before = incoming[: searcher.start]
self.after = incoming[searcher.start: searcher.end]
self.match = searcher.match
self.match_index = index
return self.match_index
# No match at this point
if (timeout is not None) and (timeout < 0):
raise TIMEOUT('Timeout exceeded in expect_any().')
# Still have time left, so read more data
c = self.read_nonblocking(self.maxread, timeout)
freshlen = len(c)
time.sleep(0.0001)
incoming = incoming + c
if timeout is not None:
timeout = end_time - time.time()
except EOF:
err = sys.exc_info()[1]
self.buffer = self.string_type()
self.before = incoming
self.after = EOF
index = searcher.eof_index
if index >= 0:
self.match = EOF
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise EOF(str(err) + '\n' + str(self))
except TIMEOUT:
err = sys.exc_info()[1]
self.buffer = incoming
self.before = incoming
self.after = TIMEOUT
index = searcher.timeout_index
if index >= 0:
self.match = TIMEOUT
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise TIMEOUT(str(err) + '\n' + str(self))
except:
self.before = incoming
self.after = None
self.match = None
self.match_index = None
raise
def getwinsize(self):
'''This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). '''
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.child_fd, TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def setwinsize(self, rows, cols):
'''This sets the terminal window size of the child tty. This will cause
a SIGWINCH signal to be sent to the child. This does not change the
physical window size. It changes the size reported to TTY-aware
applications like vi or curses -- applications that respond to the
SIGWINCH signal. '''
# Some very old platforms have a bug that causes the value for
# termios.TIOCSWINSZ to be truncated. There was a hack here to work
# around this, but it caused problems with newer platforms so has been
# removed. For details see https://github.com/pexpect/pexpect/issues/39
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
# Note, assume ws_xpixel and ws_ypixel are zero.
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
def interact(self, escape_character=chr(29),
input_filter=None, output_filter=None):
'''This gives control of the child process to the interactive user (the
human at the keyboard). Keystrokes are sent to the child process, and
the stdout and stderr output of the child process is printed. This
simply echos the child stdout and child stderr to the real stdout and
it echos the real stdin to the child stdin. When the user types the
escape_character this method will stop. The default for
escape_character is ^]. This should not be confused with ASCII 27 --
the ESC character. ASCII 29 was chosen for historical merit because
this is the character used by 'telnet' as the escape character. The
escape_character will not be sent to the child process.
You may pass in optional input and output filter functions. These
functions should take a string and return a string. The output_filter
will be passed all the output from the child process. The input_filter
will be passed all the keyboard input from the user. The input_filter
is run BEFORE the check for the escape_character.
Note that if you change the window size of the parent the SIGWINCH
signal will not be passed through to the child. If you want the child
window size to change when the parent's window size changes then do
something like the following example::
import pexpect, struct, fcntl, termios, signal, sys
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(),
termios.TIOCGWINSZ , s))
global p
p.setwinsize(a[0],a[1])
# Note this 'p' global and used in sigwinch_passthrough.
p = pexpect.spawn('/bin/bash')
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
p.interact()
'''
# Flush the buffer.
self.write_to_stdout(self.buffer)
self.stdout.flush()
self.buffer = self.string_type()
mode = tty.tcgetattr(self.STDIN_FILENO)
tty.setraw(self.STDIN_FILENO)
if PY3:
escape_character = escape_character.encode('latin-1')
try:
self.__interact_copy(escape_character, input_filter, output_filter)
finally:
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
def __interact_writen(self, fd, data):
'''This is used by the interact() method.
'''
while data != b'' and self.isalive():
n = os.write(fd, data)
data = data[n:]
def __interact_read(self, fd):
'''This is used by the interact() method.
'''
return os.read(fd, 1000)
def __interact_copy(self, escape_character=None,
input_filter=None, output_filter=None):
'''This is used by the interact() method.
'''
while self.isalive():
r, w, e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
if self.child_fd in r:
try:
data = self.__interact_read(self.child_fd)
except OSError as err:
if err.args[0] == errno.EIO:
# Linux-style EOF
break
raise
if data == b'':
# BSD-style EOF
break
if output_filter:
data = output_filter(data)
if self.logfile is not None:
self.logfile.write(data)
self.logfile.flush()
os.write(self.STDOUT_FILENO, data)
if self.STDIN_FILENO in r:
data = self.__interact_read(self.STDIN_FILENO)
if input_filter:
data = input_filter(data)
i = data.rfind(escape_character)
if i != -1:
data = data[:i]
self.__interact_writen(self.child_fd, data)
break
self.__interact_writen(self.child_fd, data)
def __select(self, iwtd, owtd, ewtd, timeout=None):
'''This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). '''
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select(iwtd, owtd, ewtd, timeout)
except select.error:
err = sys.exc_info()[1]
if err.args[0] == errno.EINTR:
# if we loop back we have to subtract the
# amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return([], [], [])
else:
# something else caused the select.error, so
# this actually is an exception.
raise
##############################################################################
# The following methods are no longer supported or allowed.
def setmaxread(self, maxread): # pragma: no cover
'''This method is no longer supported or allowed. I don't like getters
and setters without a good reason. '''
raise ExceptionPexpect('This method is no longer supported ' +
'or allowed. Just assign a value to the ' +
'maxread member variable.')
def setlog(self, fileobject): # pragma: no cover
'''This method is no longer supported or allowed.
'''
raise ExceptionPexpect('This method is no longer supported ' +
'or allowed. Just assign a value to the logfile ' +
'member variable.')
##############################################################################
# End of spawn class
##############################################################################
class spawnu(spawn):
"""Works like spawn, but accepts and returns unicode strings.
Extra parameters:
:param encoding: The encoding to use for communications (default: 'utf-8')
:param errors: How to handle encoding/decoding errors; one of 'strict'
(the default), 'ignore', or 'replace', as described
for :meth:`~bytes.decode` and :meth:`~str.encode`.
"""
if PY3:
string_type = str
allowed_string_types = (str, )
_chr = staticmethod(chr)
linesep = os.linesep
crlf = '\r\n'
else:
string_type = unicode
allowed_string_types = (unicode, )
_chr = staticmethod(unichr)
linesep = os.linesep.decode('ascii')
crlf = '\r\n'.decode('ascii')
# This can handle unicode in both Python 2 and 3
write_to_stdout = sys.stdout.write
def __init__(self, *args, **kwargs):
self.encoding = kwargs.pop('encoding', 'utf-8')
self.errors = kwargs.pop('errors', 'strict')
self._decoder = codecs.getincrementaldecoder(self.encoding)(errors=self.errors)
super(spawnu, self).__init__(*args, **kwargs)
@staticmethod
def _coerce_expect_string(s):
return s
@staticmethod
def _coerce_send_string(s):
return s
def _coerce_read_string(self, s):
return self._decoder.decode(s, final=False)
def _send(self, s):
return os.write(self.child_fd, s.encode(self.encoding, self.errors))
class searcher_string(object):
'''This is a plain string search helper for the spawn.expect_any() method.
This helper class is for speed. For more powerful regex patterns
see the helper class, searcher_re.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the matching string itself
'''
def __init__(self, strings):
'''This creates an instance of searcher_string. This argument 'strings'
may be a list; a sequence of strings; or the EOF or TIMEOUT types. '''
self.eof_index = -1
self.timeout_index = -1
self._strings = []
for n, s in enumerate(strings):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._strings.append((n, s))
def __str__(self):
'''This returns a human-readable string that represents the state of
the object.'''
ss = [(ns[0], ' %d: "%s"' % ns) for ns in self._strings]
ss.append((-1, 'searcher_string:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index,
' %d: TIMEOUT' % self.timeout_index))
ss.sort()
ss = list(zip(*ss))[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
'''This searches 'buffer' for the first occurence of one of the search
strings. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before. It helps to avoid
searching the same, possibly big, buffer over and over again.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, this returns -1. '''
first_match = None
# 'freshlen' helps a lot here. Further optimizations could
# possibly include:
#
# using something like the Boyer-Moore Fast String Searching
# Algorithm; pre-compiling the search through a list of
# strings into something that can scan the input once to
# search for all N strings; realize that if we search for
# ['bar', 'baz'] and the input is '...foo' we need not bother
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
# or at the very end of the old data
offset = -(freshlen + len(s))
else:
# better obey searchwindowsize
offset = -searchwindowsize
n = buffer.find(s, offset)
if n >= 0 and (first_match is None or n < first_match):
first_match = n
best_index, best_match = index, s
if first_match is None:
return -1
self.match = best_match
self.start = first_match
self.end = self.start + len(self.match)
return best_index
class searcher_re(object):
'''This is regular expression string search helper for the
spawn.expect_any() method. This helper class is for powerful
pattern matching. For speed, see the helper class, searcher_string.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the re.match object returned by a succesful re.search
'''
def __init__(self, patterns):
'''This creates an instance that searches for 'patterns' Where
'patterns' may be a list or other sequence of compiled regular
expressions, or the EOF or TIMEOUT types.'''
self.eof_index = -1
self.timeout_index = -1
self._searches = []
for n, s in zip(list(range(len(patterns))), patterns):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._searches.append((n, s))
def __str__(self):
'''This returns a human-readable string that represents the state of
the object.'''
#ss = [(n, ' %d: re.compile("%s")' %
# (n, repr(s.pattern))) for n, s in self._searches]
ss = list()
for n, s in self._searches:
try:
ss.append((n, ' %d: re.compile("%s")' % (n, s.pattern)))
except UnicodeEncodeError:
# for test cases that display __str__ of searches, dont throw
# another exception just because stdout is ascii-only, using
# repr()
ss.append((n, ' %d: re.compile(%r)' % (n, s.pattern)))
ss.append((-1, 'searcher_re:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index, ' %d: TIMEOUT' %
self.timeout_index))
ss.sort()
ss = list(zip(*ss))[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
'''This searches 'buffer' for the first occurence of one of the regular
expressions. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, returns -1.'''
first_match = None
# 'freshlen' doesn't help here -- we cannot predict the
# length of a match, and the re module provides no help.
if searchwindowsize is None:
searchstart = 0
else:
searchstart = max(0, len(buffer) - searchwindowsize)
for index, s in self._searches:
match = s.search(buffer, searchstart)
if match is None:
continue
n = match.start()
if first_match is None or n < first_match:
first_match = n
the_match = match
best_index = index
if first_match is None:
return -1
self.start = first_match
self.match = the_match
self.end = self.match.end()
return best_index
def is_executable_file(path):
"""Checks that path is an executable regular file (or a symlink to a file).
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``, but
on some platforms :func:`os.access` gives us the wrong answer, so this
checks permission bits directly.
"""
# follow symlinks,
fpath = os.path.realpath(path)
# return False for non-files (directories, fifo, etc.)
if not os.path.isfile(fpath):
return False
# On Solaris, etc., "If the process has appropriate privileges, an
# implementation may indicate success for X_OK even if none of the
# execute file permission bits are set."
#
# For this reason, it is necessary to explicitly check st_mode
# get file mode using os.stat, and check if `other',
# that is anybody, may read and execute.
mode = os.stat(fpath).st_mode
if mode & stat.S_IROTH and mode & stat.S_IXOTH:
return True
# get current user's group ids, and check if `group',
# when matching ours, may read and execute.
user_gids = os.getgroups() + [os.getgid()]
if (os.stat(fpath).st_gid in user_gids and
mode & stat.S_IRGRP and mode & stat.S_IXGRP):
return True
# finally, if file owner matches our effective userid,
# check if `user', may read and execute.
user_gids = os.getgroups() + [os.getgid()]
if (os.stat(fpath).st_uid == os.geteuid() and
mode & stat.S_IRUSR and mode & stat.S_IXUSR):
return True
return False
def which(filename):
'''This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.'''
# Special case where filename contains an explicit path.
if os.path.dirname(filename) != '' and is_executable_file(filename):
return filename
if 'PATH' not in os.environ or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = p.split(os.pathsep)
for path in pathlist:
ff = os.path.join(path, filename)
if is_executable_file(ff):
return ff
return None
def split_command_line(command_line):
'''This splits a command line into a list of arguments. It splits arguments
on spaces, but handles embedded quotes, doublequotes, and escaped
characters. It's impossible to do this with a regular expression, so I
wrote a little state machine to parse the command line. '''
arg_list = []
arg = ''
# Constants to name the states we can be in.
state_basic = 0
state_esc = 1
state_singlequote = 2
state_doublequote = 3
# The state when consuming whitespace between commands.
state_whitespace = 4
state = state_basic
for c in command_line:
if state == state_basic or state == state_whitespace:
if c == '\\':
# Escape the next character
state = state_esc
elif c == r"'":
# Handle single quote
state = state_singlequote
elif c == r'"':
# Handle double quote
state = state_doublequote
elif c.isspace():
# Add arg to arg_list if we aren't in the middle of whitespace.
if state == state_whitespace:
# Do nothing.
None
else:
arg_list.append(arg)
arg = ''
state = state_whitespace
else:
arg = arg + c
state = state_basic
elif state == state_esc:
arg = arg + c
state = state_basic
elif state == state_singlequote:
if c == r"'":
state = state_basic
else:
arg = arg + c
elif state == state_doublequote:
if c == r'"':
state = state_basic
else:
arg = arg + c
if arg != '':
arg_list.append(arg)
return arg_list
# vim: set shiftround expandtab tabstop=4 shiftwidth=4 ft=python autoindent :
|
[
"theaccountname@yahoo.com"
] |
theaccountname@yahoo.com
|
c299077986bf62f2c38ac444a12dff977c5aaf2c
|
3d46889bd80a69d665de0d61d1035b04359154d3
|
/PolymorphismSubmissionAssignment.py
|
492f5d5e74c16290541e16ac7ccac083053c587e
|
[] |
no_license
|
Kelinz74/Python-Projects
|
182e30636775a2ada5cb5f4735142df569c28fac
|
87cb6a97f394031c2ce6eeda22e1e7f5fe8d9b3e
|
refs/heads/main
| 2023-08-24T08:42:27.177603
| 2021-09-30T03:02:12
| 2021-09-30T03:02:12
| 402,674,304
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,438
|
py
|
## Parent class
class Avenger:
company = "Avengers"
name = ""
email = ""
password = ""
department = ""
# a function for the parent class for a mission statement to be displaid with each successful login
def foundation(self):
msg = "Protecting the future: {}\n".format(self.company)
return msg
## Child class used for a user (like a customer)
class User(Avenger):
name = "Captain America"
email = "cap@gmail.com"
password = "IronManSucks@5914"
# a function for the child class login input
def getLoginInfo(self):
entry_name = input("Enter your name: ")
entry_email = input("Enter your email: ")
entry_password = input("Enter your password: ")
# A welcome back statement display if login successful
if (entry_email == self.email and entry_password == self.password):
print("\nWelcome back, {}".format(entry_name))
company = User()
print(company.foundation())
# A incoreect statement display if login unsuccessful
else:
print("The password or email is incorrect.")
customer = User()
customer.getLoginInfo()
## child class used for an employee log in.
class Employee(Avenger):
name = "Stephen Strange"
email = "drstrange@gmail.com"
title = "Sorcerer Supreme"
department = "Time"
pin_number = "1130"
# a function for the child class login input
def getLoginInfo(self):
entry_name = input("Enter your name: ")
entry_email = input("Enter your email: ")
entry_pin = input("Enter your pin: ")
# A welcome back statement display if login successful
if (entry_email == self.email and entry_pin == self.pin_number):
print("\nWelcome back, {}".format(entry_name))
company = User()
print(company.foundation())
# A incoreect statement display if login unsuccessful
else:
print("The pin or email is incorrect.")
manager = Employee()
manager.getLoginInfo()
## child class used for a cleaning person login (Janitorial)
class Janitorial(Avenger):
name = "Thor"
email = "heavyhammer@gmail.com"
title = "Janitor"
tools = "Mop"
pin_number = "7941"
# a function for the child class login input
def getLoginInfo(self):
entry_name = input("Enter your name: ")
entry_email = input("Enter your email: ")
entry_pin = input("Enter your pin: ")
# A welcome back statement display if login successful
if (entry_email == self.email and entry_pin == self.pin_number):
print("\nWelcome back, {}".format(entry_name))
company = User()
print(company.foundation())
# A incoreect statement display if login unsuccessful
else:
print("The pin or email is incorrect.")
janitor = Janitorial()
janitor.getLoginInfo()
# calls to each class for login input and display message if successful or unsuccessful login.
if __name__ == "__main__":
customer = User()
customer.getLoginInfo()
manager = Employee()
manager.getLoginInfo()
janitor = Janitorial()
janitor.getLoginInfo()
|
[
"noreply@github.com"
] |
Kelinz74.noreply@github.com
|
5043e33106daca70e5c1091684a4d3b45a1fbf1b
|
2d15135e3559c65374b38abd47e9289c41c042b2
|
/server.py
|
e4d42f02581eeac5a6e90e1c13c8b659bfeec675
|
[] |
no_license
|
diana-xie/btc_prediction
|
33e418bc35f33835fd5439ddef3eb3c8fd92a4d0
|
bbecc4c45b9ab15a925e704357f689736de93db2
|
refs/heads/master
| 2022-12-12T10:55:44.348461
| 2020-09-10T00:57:57
| 2020-09-10T00:57:57
| 294,164,665
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
""" Runs the endpoints for BTC predict, train, unit tests """
import tensorflow as tf
from flask import Flask, jsonify, request
import os
import logging
import pkg_resources
import pandas as pd
from tests.test_conf import test_conf
from tests.test_preprocessing_train import test_preprocessing_train
from tests.test_model_drift import test_model_drift
from train import train_model
from utils import fix_path, process_request
# remove tf warning messages
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
app = Flask(__name__)
port = int(os.environ.get("PORT", 5000))
@app.route('/', methods=['GET'])
def server_is_up():
# print("success")
return 'API is up.'
@app.route('/train', methods=['POST']) # POST
def train_api():
observation = request.json
mae = train_model(observation)
return 'Model has been trained and saved. MAE is {}'.format(mae)
@app.route('/predict', methods=['POST']) # POST
def predict_api():
try:
model = pd.read_pickle(os.path.join(fix_path(), "models/model.pkl"))
logging.info("RFregressor version: ", pkg_resources.get_distribution("scikit-learn"))
# observation = observation.encode() # this code is for scenario where data is encoded as str in POST
# observation = pickle.loads(base64.b64decode(observation))
# request = open('request.json', 'rb') # todo - comment out if not testing locally
observation = request.json
observation = process_request(observation=observation)
pred = model.get_prediction(observation)
return jsonify({"bitcoin prediction": str(pred)})
except Exception as ex:
logging.error("No model was found, so run /train")
""" unit tests"""
@app.route('/test_conf', methods=['GET'])
def unit_tests_conf():
test_conf()
return 'Successfully ran conf test.'
@app.route('/test_preprocess_train', methods=['GET'])
def unit_tests_preprocess():
test_preprocessing_train()
return 'Successfully ran preprocessing and train tests.'
@app.route('/test_drift', methods=['GET'])
def unit_tests_drift():
msg = test_model_drift()
return msg
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=port)
|
[
"noreply@github.com"
] |
diana-xie.noreply@github.com
|
10a68c431ed91e2b2ac04ec4a3f1b21f88858fba
|
4a4e59ee97112c69412f61ccee0c885bc8230834
|
/ssfunction.py
|
79f93e516ac96e000cf91f25b49f9de144c0189e
|
[] |
no_license
|
harshantil/Firstpython
|
f11c846aa7b80ac5deababa8bf77efc49e732006
|
7f93cbd3013126c48def448d511c652c619c87d9
|
refs/heads/main
| 2023-07-06T00:16:46.784303
| 2021-08-06T11:42:22
| 2021-08-06T11:42:22
| 377,393,270
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
import time
from selenium import webdriver
from pynput.keyboard import *
def browser(driver):
driver = webdriver.Chrome(r"C:\Users\harsh\Downloads\chromedriver_win32\chromedriver.exe")
url = "https://accounts.google.com/signin/v2/identifie"
driver.get(url) # Going to Url
driver.maximize_window()
signin_user = driver.find_element_by_name("identifier")
signin_user.clear()
signin_user.send_keys("harshantil")
kb = Controller()
kb.press(Key.enter)
kb.release(Key.enter)
signin_pass = driver.find_element_by_name("password")
signin_pass.clear()
signin_pass.send_keys("12345678")
def screenshot(d):
folder =r"C:\\Users\\harsh\\Desktop\\testing\\Screenshot\\"
time_string = time.asctime().replace(":",".")
file_name = folder + time_string + ".png"
d.get_screenshot_as_file(file_name)
|
[
"harshantil@gmail.com"
] |
harshantil@gmail.com
|
5c61c5283583e4f8005ab3374fa0c5dfff7297da
|
7c6096fda1c62882aecde1b585418eee7a5e76da
|
/forums/migrations/0006_questions_tags.py
|
a9ee72ccd5a8068984a1f879b3ff58b0e65b5582
|
[] |
no_license
|
Lokesh-Balla/StackCopy
|
dec2596ce6c68cea6deb498a60e331b280ea4be7
|
05604b8719b301144f295dccad893ab6e170bee9
|
refs/heads/master
| 2023-02-08T04:29:20.070834
| 2020-07-14T17:07:30
| 2020-07-14T17:07:30
| 193,842,871
| 0
| 0
| null | 2023-02-04T05:18:40
| 2019-06-26T06:27:46
|
Python
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
# Generated by Django 2.2.2 on 2019-06-30 05:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forums', '0005_answers_user'),
]
operations = [
migrations.AddField(
model_name='questions',
name='tags',
field=models.ManyToManyField(to='forums.Tags'),
),
]
|
[
"Lokesh-Balla@users.noreply.github.com"
] |
Lokesh-Balla@users.noreply.github.com
|
ca4d478fbd596a33844375f314113fc89a94ff1e
|
d674009298cb8ecfeb97ed0dcac5820a1a34c469
|
/catalog/migrations/0002_auto_20201113_1659.py
|
f657f39b1fcef96fe2f0ec45a09a1258f1c3781e
|
[] |
no_license
|
pavelpyn/salon
|
e21eee434a901d7168aa20493690701ba6959611
|
7b88f77caf8d948e81f81b51277f2102d6897d09
|
refs/heads/main
| 2023-01-12T22:28:25.236529
| 2020-11-18T09:28:38
| 2020-11-18T09:28:38
| 313,882,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,041
|
py
|
# Generated by Django 3.1.2 on 2020-11-13 13:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='service',
name='price_1',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены(от 25 до 40см)'),
),
migrations.AlterField(
model_name='service',
name='price_2',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены(от 25 до 40см)'),
),
migrations.AlterField(
model_name='service',
name='price_3',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены(от 25 до 40см)'),
),
migrations.AlterField(
model_name='service',
name='price_4',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены(от 40 и выше)'),
),
migrations.AlterField(
model_name='service',
name='price_man_all',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Мужская стрижка, стоимость работы'),
),
migrations.AlterField(
model_name='service',
name='price_man_material',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Мужская стрижка, расходные материалы'),
),
migrations.AlterField(
model_name='service',
name='price_nm_1',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены без расходных материалов1'),
),
migrations.AlterField(
model_name='service',
name='price_nm_2',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены без расходных материалов2'),
),
migrations.AlterField(
model_name='service',
name='price_nm_3',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены без расходных материалов3'),
),
migrations.AlterField(
model_name='service',
name='price_nm_4',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Цены без расходных материалов4'),
),
migrations.AlterField(
model_name='service',
name='price_work',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, verbose_name='Мужская стрижка, стоимость услуги'),
),
]
|
[
"pavelpyn@gmail.com"
] |
pavelpyn@gmail.com
|
f0b5d8049387f82fdc10423ed90621cbe0c3bdef
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/nlp/dgu/src/dataset.py
|
b0e7e7d67e9e558b44bf62623dcbbab8f34c71a8
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 21,879
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
dataset used in Bert finetune and evaluation.
"""
import os
from typing import List
import numpy as np
# The input data bigin with '[CLS]', using '[SEP]' split conversation content(
# Previous part, current part, following part, etc.). If there are multiple
# conversation in split part, using 'INNER_SEP' to further split.
INNER_SEP = '[unused0]'
class Dataset():
""" Dataset base class """
def __init__(self):
pass
def __getitem__(self, idx):
raise NotImplementedError("'{}' not implement in class " \
"{}".format('__getitem__', self.__class__.__name__))
def __len__(self):
raise NotImplementedError("'{}' not implement in class " \
"{}".format('__len__', self.__class__.__name__))
def get_label_map(label_list):
""" Create label maps """
label_map = {}
for (i, l) in enumerate(label_list):
label_map[l] = i
return label_map
class UDCv1(Dataset):
"""
The UDCv1 dataset is using in task Dialogue Response Selection.
The source dataset is UDCv1(Ubuntu Dialogue Corpus v1.0). See detail at
http://dataset.cs.mcgill.ca/ubuntu-corpus-1.0/
"""
MAX_LEN_OF_RESPONSE = 60
LABEL_MAP = get_label_map(['0', '1'])
def __init__(self, data_dir, mode='train', label_map_config=None):
super(UDCv1, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
self.label_map = None
if label_map_config:
with open(label_map_config) as f:
self.label_map = json.load(f)
else:
self.label_map = None
#read data from file
def read_data(self):
"""read data from file"""
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt-small')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) < 3:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row contains at least three parts: label\tconversation1\t.....\tresponse.'
)
continue
label = arr[0]
text_a = arr[1:-1]
text_b = arr[-1]
self.data.append([label, text_a, text_b])
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
def _truncate_and_concat(text_a: List[str], text_b: str, tokenizer, max_seq_length):
tokens_b = tokenizer.tokenize(text_b)
tokens_b = tokens_b[:min(cls.MAX_LEN_OF_RESPONSE, len(tokens_b))]
tokens_a = []
for text in text_a:
tokens_a.extend(tokenizer.tokenize(text))
tokens_a.append(INNER_SEP)
tokens_a = tokens_a[:-1]
if len(tokens_a) > max_seq_length - len(tokens_b) - 3:
tokens_a = tokens_a[len(tokens_a) - max_seq_length + len(tokens_b) + 3:]
tokens, segment_ids = [], []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
return input_ids, input_mask, segment_ids
label, text_a, text_b = example
label = np.array([cls.get_label(label)], dtype='int64')
input_ids, input_mask, segment_ids = _truncate_and_concat(text_a, text_b, tokenizer, max_seq_length)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class DSTC2(Dataset):
"""
The dataset DSTC2 is using in task Dialogue State Tracking.
The source dataset is DSTC2(Dialog State Tracking Challenges 2). See detail at
https://github.com/matthen/dstc
"""
LABEL_MAP = get_label_map([str(i) for i in range(217)])
def __init__(self, data_dir, mode='train'):
super(DSTC2, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
def read_data(self):
"""read data from file"""
def _concat_dialogues(examples):
"""concat multi turns dialogues"""
new_examples = []
max_turns = 20
example_len = len(examples)
for i in range(example_len):
multi_turns = examples[max(i - max_turns, 0):i + 1]
new_qa = '\1'.join([example[0] for example in multi_turns])
new_examples.append((new_qa.split('\1'), examples[i][1]))
return new_examples
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
pre_idx = -1
examples = []
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 3:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains three parts: id\tquestion\1answer\tlabel1 label2 ...'
)
continue
idx = arr[0]
qa = arr[1]
label_list = arr[2].split()
if idx != pre_idx:
if idx != 0:
examples = _concat_dialogues(examples)
self.data.extend(examples)
examples = []
pre_idx = idx
examples.append((qa, label_list))
if examples:
examples = _concat_dialogues(examples)
self.data.extend(examples)
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
def _truncate_and_concat(texts: List[str], tokenizer, max_seq_length):
tokens = []
for text in texts:
tokens.extend(tokenizer.tokenize(text))
tokens.append(INNER_SEP)
tokens = tokens[:-1]
if len(tokens) > max_seq_length - 2:
tokens = tokens[len(tokens) - max_seq_length + 2:]
tokens_, segment_ids = [], []
tokens_.append("[CLS]")
segment_ids.append(0)
for token in tokens:
tokens_.append(token)
segment_ids.append(0)
tokens_.append("[SEP]")
segment_ids.append(0)
tokens = tokens_
input_ids = tokenizer.convert_tokens_to_ids(tokens)
return input_ids, segment_ids
texts, labels = example
input_ids, segment_ids = _truncate_and_concat(texts, tokenizer,
max_seq_length)
labels = [cls.get_label(l) for l in labels]
label = np.zeros(cls.num_classes(), dtype='int64')
for l in labels:
label[l] = 1
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class ATIS_DSF(Dataset):
"""
The dataset ATIS_DSF is using in task Dialogue Slot Filling.
The source dataset is ATIS(Airline Travel Information System). See detail at
https://www.kaggle.com/siddhadev/ms-cntk-atis
"""
LABEL_MAP = get_label_map([str(i) for i in range(130)])
def __init__(self, data_dir, mode='train'):
super(ATIS_DSF, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
def read_data(self):
"""read data from file"""
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 2:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains two parts: conversation_content\tlabel1 label2 label3.'
)
continue
text = arr[0]
label_list = arr[1].split()
self.data.append([text, label_list])
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
text, labels = example
tokens, label_list = [], []
words = text.split()
assert len(words) == len(labels)
for word, label in zip(words, labels):
piece_words = tokenizer.tokenize(word)
tokens.extend(piece_words)
label = cls.get_label(label)
label_list.extend([label] * len(piece_words))
if len(tokens) > max_seq_length - 2:
tokens = tokens[len(tokens) - max_seq_length + 2:]
label_list = label_list[len(tokens) - max_seq_length + 2:]
tokens_, segment_ids = [], []
tokens_.append("[CLS]")
for token in tokens:
tokens_.append(token)
tokens_.append("[SEP]")
tokens = tokens_
label_list = [0] + label_list + [0]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
label = np.array(label_list, dtype='int64')
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class ATIS_DID(Dataset):
"""
The dataset ATIS_ID is using in task Dialogue Intent Detection.
The source dataset is ATIS(Airline Travel Information System). See detail at
https://www.kaggle.com/siddhadev/ms-cntk-atis
"""
LABEL_MAP = get_label_map([str(i) for i in range(26)])
def __init__(self, data_dir, mode='train'):
super(ATIS_DID, self).__init__()
self._data_dir = data_dir
self._mode = mode
self.read_data()
def read_data(self):
"""read data from file"""
if self._mode == 'train':
data_path = os.path.join(self._data_dir, 'train.txt')
elif self._mode == 'dev':
data_path = os.path.join(self._data_dir, 'dev.txt')
elif self._mode == 'test':
data_path = os.path.join(self._data_dir, 'test.txt')
self.data = []
with open(data_path, 'r', encoding='utf8') as fin:
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 2:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains two parts: label\tconversation_content.'
)
continue
label = arr[0]
text = arr[1]
self.data.append([label, text])
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
label, text = example
tokens = tokenizer.tokenize(text)
if len(tokens) > max_seq_length - 2:
tokens = tokens[len(tokens) - max_seq_length + 2:]
tokens_, segment_ids = [], []
tokens_.append("[CLS]")
for token in tokens:
tokens_.append(token)
tokens_.append("[SEP]")
tokens = tokens_
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
label = np.array([cls.get_label(label)], dtype='int64')
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def read_da_data(data_dir, mode):
"""read data from file"""
def _concat_dialogues(examples):
"""concat multi turns dialogues"""
new_examples = []
example_len = len(examples)
for i in range(example_len):
label, caller, text = examples[i]
cur_txt = "%s : %s" % (caller, text)
pre_txt = [
"%s : %s" % (item[1], item[2])
for item in examples[max(0, i - 5):i]
]
suf_txt = [
"%s : %s" % (item[1], item[2])
for item in examples[i + 1:min(len(examples), i + 3)]
]
sample = [label, pre_txt, cur_txt, suf_txt]
new_examples.append(sample)
return new_examples
if mode == 'train':
data_path = os.path.join(data_dir, 'train.txt')
elif mode == 'dev':
data_path = os.path.join(data_dir, 'dev.txt')
elif mode == 'test':
data_path = os.path.join(data_dir, 'test.txt')
data = []
with open(data_path, 'r', encoding='utf8') as fin:
pre_idx = -1
examples = []
for line in fin:
if not line:
continue
arr = line.rstrip('\n').split('\t')
if len(arr) != 4:
print('Data format error: %s' % '\t'.join(arr))
print(
'Data row should contains four parts: id\tlabel\tcaller\tconversation_content.'
)
continue
idx, label, caller, text = arr
if idx != pre_idx:
if idx != 0:
examples = _concat_dialogues(examples)
data.extend(examples)
examples = []
pre_idx = idx
examples.append((label, caller, text))
if examples:
examples = _concat_dialogues(examples)
data.extend(examples)
return data
def truncate_and_concat(pre_txt: List[str],
cur_txt: str,
suf_txt: List[str],
tokenizer,
max_seq_length,
max_len_of_cur_text):
"""concat data"""
cur_tokens = tokenizer.tokenize(cur_txt)
cur_tokens = cur_tokens[:min(max_len_of_cur_text, len(cur_tokens))]
pre_tokens = []
for text in pre_txt:
pre_tokens.extend(tokenizer.tokenize(text))
pre_tokens.append(INNER_SEP)
pre_tokens = pre_tokens[:-1]
suf_tokens = []
for text in suf_txt:
suf_tokens.extend(tokenizer.tokenize(text))
suf_tokens.append(INNER_SEP)
suf_tokens = suf_tokens[:-1]
if len(cur_tokens) + len(pre_tokens) + len(suf_tokens) > max_seq_length - 4:
left_num = max_seq_length - 4 - len(cur_tokens)
if len(pre_tokens) > len(suf_tokens):
suf_num = int(left_num / 2)
suf_tokens = suf_tokens[:suf_num]
pre_num = left_num - len(suf_tokens)
pre_tokens = pre_tokens[max(0, len(pre_tokens) - pre_num):]
else:
pre_num = int(left_num / 2)
pre_tokens = pre_tokens[max(0, len(pre_tokens) - pre_num):]
suf_num = left_num - len(pre_tokens)
suf_tokens = suf_tokens[:suf_num]
tokens, segment_ids = [], []
tokens.append("[CLS]")
for token in pre_tokens:
tokens.append(token)
tokens.append("[SEP]")
segment_ids.extend([0] * len(tokens))
for token in cur_tokens:
tokens.append(token)
tokens.append("[SEP]")
segment_ids.extend([1] * (len(cur_tokens) + 1))
if suf_tokens:
for token in suf_tokens:
tokens.append(token)
tokens.append("[SEP]")
segment_ids.extend([0] * (len(suf_tokens) + 1))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
return input_ids, input_mask, segment_ids
class MRDA(Dataset):
"""
The dataset MRDA is using in task Dialogue Act.
The source dataset is MRDA(Meeting Recorder Dialogue Act). See detail at
https://www.aclweb.org/anthology/W04-2319.pdf
"""
MAX_LEN_OF_CUR_TEXT = 50
LABEL_MAP = get_label_map([str(i) for i in range(5)])
def __init__(self, data_dir, mode='train'):
super(MRDA, self).__init__()
self.data = read_da_data(data_dir, mode)
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
label, pre_txt, cur_txt, suf_txt = example
label = np.array([cls.get_label(label)], dtype='int64')
input_ids, input_mask, segment_ids = truncate_and_concat(pre_txt, cur_txt, suf_txt, \
tokenizer, max_seq_length, cls.MAX_LEN_OF_CUR_TEXT)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class SwDA(Dataset):
"""
The dataset SwDA is using in task Dialogue Act.
The source dataset is SwDA(Switchboard Dialog Act). See detail at
http://compprag.christopherpotts.net/swda.html
"""
MAX_LEN_OF_CUR_TEXT = 50
LABEL_MAP = get_label_map([str(i) for i in range(42)])
def __init__(self, data_dir, mode='train'):
super(SwDA, self).__init__()
self.data = read_da_data(data_dir, mode)
@classmethod
def get_label(cls, label):
return cls.LABEL_MAP[label]
@classmethod
def num_classes(cls):
return len(cls.LABEL_MAP)
@classmethod
def convert_example(cls, example, tokenizer, max_seq_length=512):
""" Convert a glue example into necessary features. """
label, pre_txt, cur_txt, suf_txt = example
label = np.array([cls.get_label(label)], dtype='int64')
input_ids, input_mask, segment_ids = truncate_and_concat(pre_txt, cur_txt, suf_txt, \
tokenizer, max_seq_length, cls.MAX_LEN_OF_CUR_TEXT)
return input_ids, input_mask, segment_ids, label
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
|
[
"chenhaozhe1@huawei.com"
] |
chenhaozhe1@huawei.com
|
ae9d6a61eca7fe11f99e20f1e31752dd023a83a1
|
1ec9f86c460a7ca5fadb2ccf9f6cdf9c2c4b3287
|
/backend/users/views.py
|
dc704a594394f7747d430090e38531dd1d68991a
|
[] |
no_license
|
sushant2308/Meet-the-doctor
|
0b53fa7f9200debc8392b79b92bf826e77d8da60
|
1ed16b30ea26434a1ccda298294f1c1550d0857d
|
refs/heads/master
| 2023-08-24T10:21:32.677065
| 2021-10-14T07:43:03
| 2021-10-14T07:43:03
| 384,092,646
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,910
|
py
|
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .serializers import UserSerializer,SigInSerializer
from .models import User
from rest_framework.status import (
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
HTTP_200_OK,
)
from django.contrib.auth import authenticate
from rest_framework.authtoken.models import Token
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
@api_view(['GET', ])
def speciality_doctors(request,slug):
doctors = User.objects.filter(is_doctor=True,speciality=slug)
serializer = UserSerializer(doctors,many=True)
return Response(serializer.data,status=HTTP_200_OK)
@api_view(["POST"])
def signin(request):
signin_serializer = SigInSerializer(data = request.data)
if not signin_serializer.is_valid():
return Response(signin_serializer.errors, status = HTTP_400_BAD_REQUEST)
user = authenticate(
request=request,
username = request.data['email'],
password = request.data['password']
)
if not user:
return Response({'detail': 'Invalid Credentials or activate account'}, status=HTTP_404_NOT_FOUND)
#TOKEN STUFF
user.status=1
user.save()
token, _ = Token.objects.get_or_create(user = user)
user_serialized = UserSerializer(user)
return Response({
'user': user_serialized.data,
'token': token.key
}, status=HTTP_200_OK)
@api_view(['GET', ])
def logout(request):
user=request.user
print(user.status)
user.status=0
user.save()
return Response({"message":"Successfully logged out"},status=HTTP_200_OK)
|
[
"raisushantkumar726@gmail.com"
] |
raisushantkumar726@gmail.com
|
ae7fc20183651cc33c0675dd8b8869440efb4d14
|
2e4bd74698ce47c5f81699076bd367407a1e3a72
|
/lists/tests.py
|
e40f2691d63b115fbc66d5969035aef9ed67542b
|
[] |
no_license
|
Onwughara-CK/obey_the_testing_goat
|
2e0e1d2f1b828b69e4eb638e4a8f18323e6a3abb
|
eaedc4203acb9b9ea461c9970e79a10a53e622ce
|
refs/heads/master
| 2022-11-30T02:55:30.345380
| 2020-08-17T21:02:41
| 2020-08-17T21:02:41
| 287,971,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
from django.test import TestCase
from django.urls import resolve
from django.http import HttpRequest
from .views import home_page
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
self.assertEqual(resolve("/").func, home_page)
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
html = response.content.decode('utf8')
self.assertTrue(html.startswith('<html>'))
self.assertIn('<title>To-Do lists</title>', html)
self.assertTrue(html.endswith('<html>'))
|
[
"kelechicollins.93@gmail.com"
] |
kelechicollins.93@gmail.com
|
fe617ba47c9efdffab6c275fdc564daa8bb65ee9
|
80301f1cffc5afce13256e2ecab6323c5df00194
|
/cn.3rd/py/A0024.py
|
35dc33ee31bc4810216c072c4f632d116a8f110f
|
[] |
no_license
|
ZhenjianYang/SoraVoiceScripts
|
c1ddf7c1bbcb933243754f9669bd6b75777c87b9
|
94a948090aba0f63b10b2c69dc845dc99c822fc4
|
refs/heads/master
| 2023-04-18T04:54:44.306652
| 2023-04-06T11:15:17
| 2023-04-06T11:15:17
| 103,167,541
| 43
| 11
| null | 2021-03-06T08:52:54
| 2017-09-11T17:36:55
|
Python
|
UTF-8
|
Python
| false
| false
| 27,855
|
py
|
from ED63RDScenarioHelper import *
def main():
SetCodePage("gbk")
# 调试地图
CreateScenaFile(
FileName = 'A0024 ._SN',
MapName = 'map1',
Location = 'T0030.x',
MapIndex = 1,
MapDefaultBGM = "ed60010",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'04580尤莉亚待机', # 9
'04581尤莉亚移动', # 10
'04582尤莉亚攻击', # 11
'04583尤莉亚被弹开', # 12
'04584尤莉亚倒下', # 13
'04585尤莉亚魔法咏唱', # 14
'04586尤莉亚魔法发动', # 15
'04570穆拉待机', # 16
'04571穆拉移动', # 17
'04572穆拉攻击', # 18
'04573穆拉被弹开', # 19
'04574穆拉倒下', # 20
'04575穆拉魔法咏唱', # 21
'04576穆拉魔法发动', # 22
'04590希德待机', # 23
'04591希德移动', # 24
'04592希德攻击', # 25
'04593希德被弹开', # 26
'04594希德倒下', # 27
'04595希德魔法咏唱', # 28
'04596希德魔法发动', # 29
'04120凯诺娜待机', # 30
'04121凯诺娜移动', # 31
'04122凯诺娜攻击', # 32
'04123凯诺娜被弹开', # 33
'04124凯诺娜倒下', # 34
'04125凯诺娜魔法咏唱', # 35
'04126凯诺娜魔法发动', # 36
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 0,
Unknown_0C = 4,
Unknown_0E = 5,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 315,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT27/CH04580 ._CH', # 00
'ED6_DT27/CH04581 ._CH', # 01
'ED6_DT27/CH04582 ._CH', # 02
'ED6_DT27/CH04583 ._CH', # 03
'ED6_DT27/CH04584 ._CH', # 04
'ED6_DT27/CH04585 ._CH', # 05
'ED6_DT27/CH04586 ._CH', # 06
'ED6_DT27/CH04583 ._CH', # 07
'ED6_DT27/CH04583 ._CH', # 08
'ED6_DT27/CH04583 ._CH', # 09
'ED6_DT27/CH04570 ._CH', # 0A
'ED6_DT27/CH04571 ._CH', # 0B
'ED6_DT27/CH04572 ._CH', # 0C
'ED6_DT27/CH04573 ._CH', # 0D
'ED6_DT27/CH04574 ._CH', # 0E
'ED6_DT27/CH04575 ._CH', # 0F
'ED6_DT27/CH04576 ._CH', # 10
'ED6_DT27/CH04573 ._CH', # 11
'ED6_DT27/CH04573 ._CH', # 12
'ED6_DT27/CH04573 ._CH', # 13
'ED6_DT27/CH04590 ._CH', # 14
'ED6_DT27/CH04591 ._CH', # 15
'ED6_DT27/CH04592 ._CH', # 16
'ED6_DT27/CH04593 ._CH', # 17
'ED6_DT27/CH04594 ._CH', # 18
'ED6_DT27/CH04595 ._CH', # 19
'ED6_DT27/CH04596 ._CH', # 1A
'ED6_DT27/CH04593 ._CH', # 1B
'ED6_DT27/CH04593 ._CH', # 1C
'ED6_DT27/CH04593 ._CH', # 1D
'ED6_DT27/CH04120 ._CH', # 1E
'ED6_DT27/CH04121 ._CH', # 1F
'ED6_DT27/CH04122 ._CH', # 20
'ED6_DT27/CH04123 ._CH', # 21
'ED6_DT27/CH04124 ._CH', # 22
'ED6_DT27/CH04125 ._CH', # 23
'ED6_DT27/CH04126 ._CH', # 24
'ED6_DT27/CH04123 ._CH', # 25
'ED6_DT27/CH04123 ._CH', # 26
'ED6_DT27/CH04123 ._CH', # 27
)
AddCharChipPat(
'ED6_DT27/CH04580P._CP', # 00
'ED6_DT27/CH04581P._CP', # 01
'ED6_DT27/CH04582P._CP', # 02
'ED6_DT27/CH04583P._CP', # 03
'ED6_DT27/CH04584P._CP', # 04
'ED6_DT27/CH04585P._CP', # 05
'ED6_DT27/CH04586P._CP', # 06
'ED6_DT27/CH04583P._CP', # 07
'ED6_DT27/CH04583P._CP', # 08
'ED6_DT27/CH04583P._CP', # 09
'ED6_DT27/CH04570P._CP', # 0A
'ED6_DT27/CH04571P._CP', # 0B
'ED6_DT27/CH04572P._CP', # 0C
'ED6_DT27/CH04573P._CP', # 0D
'ED6_DT27/CH04574P._CP', # 0E
'ED6_DT27/CH04575P._CP', # 0F
'ED6_DT27/CH04576P._CP', # 10
'ED6_DT27/CH04573P._CP', # 11
'ED6_DT27/CH04573P._CP', # 12
'ED6_DT27/CH04573P._CP', # 13
'ED6_DT27/CH04590P._CP', # 14
'ED6_DT27/CH04591P._CP', # 15
'ED6_DT27/CH04592P._CP', # 16
'ED6_DT27/CH04593P._CP', # 17
'ED6_DT27/CH04594P._CP', # 18
'ED6_DT27/CH04595P._CP', # 19
'ED6_DT27/CH04596P._CP', # 1A
'ED6_DT27/CH04593P._CP', # 1B
'ED6_DT27/CH04593P._CP', # 1C
'ED6_DT27/CH04593P._CP', # 1D
'ED6_DT27/CH04120P._CP', # 1E
'ED6_DT27/CH04121P._CP', # 1F
'ED6_DT27/CH04122P._CP', # 20
'ED6_DT27/CH04123P._CP', # 21
'ED6_DT27/CH04124P._CP', # 22
'ED6_DT27/CH04125P._CP', # 23
'ED6_DT27/CH04126P._CP', # 24
'ED6_DT27/CH04123P._CP', # 25
'ED6_DT27/CH04123P._CP', # 26
'ED6_DT27/CH04123P._CP', # 27
)
DeclNpc(
X = 4000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 2,
ChipIndex = 0x2,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 4,
ChipIndex = 0x4,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 5,
ChipIndex = 0x5,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 6,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 4000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 7,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 10,
ChipIndex = 0xA,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 11,
ChipIndex = 0xB,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 12,
ChipIndex = 0xC,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 13,
ChipIndex = 0xD,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 14,
ChipIndex = 0xE,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 15,
ChipIndex = 0xF,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 6,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 8000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 16,
ChipIndex = 0x10,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 8,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 20,
ChipIndex = 0x14,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 21,
ChipIndex = 0x15,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 22,
ChipIndex = 0x16,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 23,
ChipIndex = 0x17,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 24,
ChipIndex = 0x18,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 25,
ChipIndex = 0x19,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 6,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 12000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 26,
ChipIndex = 0x1A,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 9,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 4000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 30,
ChipIndex = 0x1E,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 8000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 31,
ChipIndex = 0x1F,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 12000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 32,
ChipIndex = 0x20,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 16000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 33,
ChipIndex = 0x21,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 4,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 20000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 34,
ChipIndex = 0x22,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 5,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 24000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 35,
ChipIndex = 0x23,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 6,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = 16000,
Z = 0,
Y = 28000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 36,
ChipIndex = 0x24,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 11,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
ScpFunction(
"Function_0_56A", # 00, 0
"Function_1_56B", # 01, 1
"Function_2_56C", # 02, 2
"Function_3_582", # 03, 3
"Function_4_598", # 04, 4
"Function_5_5B3", # 05, 5
"Function_6_5CE", # 06, 6
"Function_7_61B", # 07, 7
"Function_8_6D7", # 08, 8
"Function_9_793", # 09, 9
"Function_10_84F", # 0A, 10
"Function_11_865", # 0B, 11
"Function_12_921", # 0C, 12
)
def Function_0_56A(): pass
label("Function_0_56A")
Return()
# Function_0_56A end
def Function_1_56B(): pass
label("Function_1_56B")
Return()
# Function_1_56B end
def Function_2_56C(): pass
label("Function_2_56C")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_581")
OP_99(0xFE, 0x0, 0x7, 0x640)
Jump("Function_2_56C")
label("loc_581")
Return()
# Function_2_56C end
def Function_3_582(): pass
label("Function_3_582")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_597")
OP_99(0xFE, 0x0, 0x7, 0x7D0)
Jump("Function_3_582")
label("loc_597")
Return()
# Function_3_582 end
def Function_4_598(): pass
label("Function_4_598")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_5B2")
OP_99(0xFE, 0x0, 0x0, 0x5DC)
Sleep(500)
Jump("Function_4_598")
label("loc_5B2")
Return()
# Function_4_598 end
def Function_5_5B3(): pass
label("Function_5_5B3")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_5CD")
OP_99(0xFE, 0x0, 0x3, 0x3E8)
Sleep(500)
Jump("Function_5_5B3")
label("loc_5CD")
Return()
# Function_5_5B3 end
def Function_6_5CE(): pass
label("Function_6_5CE")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_61A")
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
Jump("Function_6_5CE")
label("loc_61A")
Return()
# Function_6_5CE end
def Function_7_61B(): pass
label("Function_7_61B")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_6D6")
SetChrChipByIndex(0xFE, 5)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
SetChrChipByIndex(0xFE, 6)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(240)
Sleep(1000)
Jump("Function_7_61B")
label("loc_6D6")
Return()
# Function_7_61B end
def Function_8_6D7(): pass
label("Function_8_6D7")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_792")
SetChrChipByIndex(0xFE, 15)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
SetChrChipByIndex(0xFE, 16)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(240)
Sleep(1000)
Jump("Function_8_6D7")
label("loc_792")
Return()
# Function_8_6D7 end
def Function_9_793(): pass
label("Function_9_793")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_84E")
SetChrChipByIndex(0xFE, 25)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
SetChrChipByIndex(0xFE, 26)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(240)
Sleep(1000)
Jump("Function_9_793")
label("loc_84E")
Return()
# Function_9_793 end
def Function_10_84F(): pass
label("Function_10_84F")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_864")
OP_99(0xFE, 0x0, 0x7, 0x640)
Jump("Function_10_84F")
label("loc_864")
Return()
# Function_10_84F end
def Function_11_865(): pass
label("Function_11_865")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_920")
SetChrChipByIndex(0xFE, 35)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
SetChrChipByIndex(0xFE, 36)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(100)
OP_51(0xFE, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Sleep(240)
Sleep(1000)
Jump("Function_11_865")
label("loc_920")
Return()
# Function_11_865 end
def Function_12_921(): pass
label("Function_12_921")
TalkBegin(0xFE)
ChrTalk( #0
0xFE,
"你好。\x02",
)
Jump("loc_93A")
label("loc_93A")
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_12_921 end
SaveToFile()
Try(main)
|
[
"zhenjian.c.yang@gmail.com"
] |
zhenjian.c.yang@gmail.com
|
be62c7f3c5cef47b942b7cd5168fccf4f58c10c0
|
6650b65399aed93cfbc1abc55f2160e3d911b069
|
/noun_generator.py
|
b1507100ae448c1b4cc5296d777a9c6c38ef43d7
|
[] |
no_license
|
Simon198/german_noun_generator_bot
|
832c3e1d80ae04e0bfa1a2d4e171184204ab48c1
|
1eb8368514fdd8c52a17def2f944de22dcdbe950
|
refs/heads/main
| 2023-02-05T06:21:57.560060
| 2020-12-24T13:19:21
| 2020-12-24T13:19:21
| 324,149,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,350
|
py
|
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext
from telegram import Update, Bot
import os
import random
dir_path = os.path.abspath(os.path.dirname(__file__))
with open(dir_path + '/nouns.txt', 'rb') as file:
nouns = file.read()
nouns = nouns.decode('utf-8').split('\n')
with open(dir_path + '/TOKEN.txt', 'r') as file:
token = file.read()
def welcome_message (update, context):
update.message.reply_text('Guten Tag Freund')
update.message.reply_text('Über den Befehl /generate kannst du fünf zufällig deutsche Nomen generieren.')
def generate_random_noun (update, context):
num_nouns = 5
if len(context.args) > 0:
try:
num_nouns = int(context.args[0])
except:
update.message.reply_text('Du musst eine Zahl hinter /generate eingeben')
return
random_nouns = random.sample(range(len(nouns)), num_nouns)
for i, noun_index in enumerate(random_nouns):
update.message.reply_text(str(i + 1) + ' - ' + nouns[noun_index])
def main ():
updater = Updater(token)
dp = updater.dispatcher
dp.add_handler(CommandHandler('start', welcome_message))
dp.add_handler(CommandHandler('generate', generate_random_noun))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
[
"simon.heinrich@iesy.net"
] |
simon.heinrich@iesy.net
|
9aac217d250bec6154a2df018e9272e61fac82ab
|
1cee80627744f448efea6fac3c91c471e6b1cba9
|
/resott/asgi.py
|
b78850c84f3b91e7246d72e948d8cb3ffaf41c63
|
[] |
no_license
|
AJ10-1/resott
|
47cf314b47e8352ab9184785f36986a1915101e7
|
9d1839d7459943eec2cf365490836b4ce78129e6
|
refs/heads/master
| 2023-09-01T07:05:57.440647
| 2021-11-03T08:47:12
| 2021-11-03T08:47:12
| 424,101,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
ASGI config for resott project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'resott.settings')
application = get_asgi_application()
|
[
"ayushjaiss@gmail.com"
] |
ayushjaiss@gmail.com
|
6d6cd4acc897db1f094012fabc3bba85a8afe094
|
5a212d29890119f91d61b0d6c8f701277f25b875
|
/piixxie/errors.py
|
166fa7e9bc0ec01fb61375341af416b64945410d
|
[] |
no_license
|
Hooksie/piixxie
|
c922f78971b9cdea31979a6134180b6bea86704c
|
d1f126de0a3e63fc01548c23789f510c89a0f756
|
refs/heads/master
| 2021-01-20T17:58:17.477498
| 2016-06-24T05:04:18
| 2016-06-24T05:04:18
| 61,847,793
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
class PiixxieError(Exception):
"""
Generic error base class for anything Piixxie related.
"""
pass
class VerificationError(PiixxieError):
"""
Generic error raised when input image does not meet our requirements
for processing.
"""
pass
class DimensionError(VerificationError):
"""
Error for when input image does not have dimensions which are a multiple
of the pixel size.
"""
pass
|
[
"me@matthooks.com"
] |
me@matthooks.com
|
337f7594697dfc64854074ccb19bdcce8234e917
|
7c5fa53b0bf3e45aabc0513f31ee17ad1233bb36
|
/traffic_generator/DragonflyLoadSingleGlobalLinkTrafficGenerator.py
|
e351dfd901bb188d2cd52e0e7dd685b6c744c00a
|
[
"MIT"
] |
permissive
|
minyee/TAGO
|
cd20587a170153871c62636ed75bbe6cbaf36655
|
9fea77cc39aa035796ab3ca52e95ebb66ffe0e7f
|
refs/heads/master
| 2022-09-18T07:00:30.525054
| 2020-06-01T00:47:57
| 2020-06-01T00:47:57
| 268,355,125
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
import TrafficGenerator, sys, os
sys.path.append('../')
import UniformGroupDragonfly
import numpy as np
class DragonflyLoadSingleGlobalLinkTrafficGenerator(TrafficGenerator.TrafficGenerator):
def __init__(self, topology):
TrafficGenerator.TrafficGenerator.__init__(self, topology)
return
def generate_traffic(self):
num_switches = self.topology.get_total_num_switches()
traffic_matrix = np.zeros((num_switches, num_switches))
num_blocks = self.topology.get_num_blocks()
switch_to_block_id_map = self.topology.get_switch_id_to_block_id_map()
block_to_switches_map = self.topology.get_block_id_to_switch_ids()
adj_matrix = self.topology.get_adjacency_matrix()
number_of_global_links = 0
for i in range(num_switches):
i_block = switch_to_block_id_map[i]
for j in range(num_switches):
j_block = switch_to_block_id_map[j]
if i_block != j_block and adj_matrix[i][j] > 0:
number_of_global_links += adj_matrix[i][j]
entry_probability = 1./number_of_global_links
for i in range(num_switches):
i_block = switch_to_block_id_map[i]
for j in range(num_switches):
j_block = switch_to_block_id_map[j]
if i_block != j_block and adj_matrix[i][j] > 0:
traffic_matrix[i][j] = adj_matrix[i][j] * entry_probability
print traffic_matrix
return traffic_matrix
def to_string(self):
return "dfly_strain_single_link"
|
[
"mt3126@columbia.edu"
] |
mt3126@columbia.edu
|
fd79b74367b169eecee4829c8730e2662173b58b
|
3efa3a2bcdd38c27beeb967a9e99c6afc17e6e6f
|
/pipelines/pipeline_dianping.py
|
89e7dab6d480d935465d72ac2124b52a26663b5e
|
[] |
no_license
|
chocoai/integrated_crawler
|
6f266ef54d096096c71ec5bd28463393164126d1
|
5d75d2781d2adfcd6524e8a2edfeb2fb2267571b
|
refs/heads/master
| 2020-04-26T03:07:07.995759
| 2019-02-25T10:42:59
| 2019-02-25T10:42:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,512
|
py
|
# -*- coding: utf-8 -*-
import os, re
import time, datetime
import csv
import sqlite3 as sql
import ssl
import pandas as pd
from utils.general_request import *
logging.basicConfig(filename='logs/utils_pipeline_dianping.log', level=logging.WARNING,
format="%(asctime)s - %(levelname)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S %p")
TIME_INTERVAL_TO_NEXT_PAGE = 2.0
TIME_INTERVAL_TO_NEXT_CITY = 2.0
def get_city_id(csvfilename):
city_ids = dict()
url = 'http://www.dianping.com/citylist'
h = request_url(url, 'GET')
groups = h.find_all('li', class_='letter-item')
with open(csvfilename, 'w+', encoding='UTF-8', newline='') as csvfile:
csvfile.write('city_name,city_url,city_id\n')
for group in groups:
print('Now finding cities whose first-letter = ' + group.find('div', class_='oneletter').text)
city_links = group.find_all('a')
for city_link in city_links:
city = city_link.text
city_url = 'http:' + city_link.attrs['href'] + '/'
h = request_url(city_url, 'GET')
start_point = str(h).find("'cityId'")
end_point = str(h).find(", // 城市id")
city_id = str(h)[start_point + 11:end_point - 1]
csvfile.write(city + ',' + city_url + ',' + city_id + '\n')
time.sleep(TIME_INTERVAL_TO_NEXT_CITY)
return city_ids
def search_restaurant_in_city(keywords, city_id):
url = 'https://www.dianping.com/search/keyword/{}/10_{}'.format(str(city_id), keywords)
h = request_url(url)
detail_csvfile = 'data/dianping_results/raw/' + 'restaurant_details_' + keywords + '.csv'
total_number = 0
if h.find('div', class_='page') is None:
total_pages = 1
else:
total_pages = int(h.find('div', class_='page').find_all('a')[-2].attrs['data-ga-page'])
cur_page = 1
while True:
not_found_div = h.find('div', class_='not-found')
if not_found_div is None:
shoplist = h.find('div', {'id': 'shop-all-list'})
if shoplist is not None:
lis = shoplist.find_all('li')
total_number += len(lis)
with open(detail_csvfile, 'a+', encoding='UTF-8', newline='') as f:
for li in lis:
store_title = li.find('div', class_='tit').find('a').attrs['title']
store_id = li.find('div', class_='tit').find('a').attrs['data-shopid']
store_score = li.find('div', class_='comment').find('span').attrs['title']
store_comment_url = li.find('div', class_='comment').find('a').attrs['href']
store_status = li.find('span', class_='istopTrade')
if store_status is None:
line = str(city_id) + ',' + keywords + ',' + store_id + ',' + store_title + \
',' + store_score + ',' + store_comment_url + ',\n'
elif store_status.text != '歇业/关闭':
line = str(city_id) + ',' + keywords + ',' + store_id + ',' + store_title + \
',' + store_score + ',' + store_comment_url + ',歇业/关闭\n'
else:
line = str(city_id) + ',' + keywords + ',' + store_id + ',' + store_title + \
',' + store_score + ',' + store_comment_url + ',' + store_status.text + '\n'
f.write(line)
else:
print('Found {} restaurant in city_id: {}.'.format(str(0), str(city_id)))
return total_number
cur_page += 1
if cur_page <= total_pages:
time.sleep(TIME_INTERVAL_TO_NEXT_PAGE)
if cur_page == 2:
url = url + '/p' + str(cur_page)
else:
url = url.replace('/p' + str(cur_page - 1), '/p' + str(cur_page))
h = request_url(url)
else:
print('Found {} restaurant in city_id: {}.'.format(str(total_number), str(city_id)))
return total_number
def start_crawler(keyword, city_id_list, start_city_id):
for city_id in city_id_list:
if city_id >= start_city_id:
total_number_in_city = search_restaurant_in_city(keyword, city_id)
print('Total results in city: {} == {}.'.format(str(city_id), str(total_number_in_city)))
time.sleep(2.0)
print(requests.get(url_to_del_whitelist + PROXY.split(':')[0]).text)
def search_keyword_in_dianping(keyword, start_city_id=1):
# If using baidu map source:
# bdmap_result_csvfile = 'data/baidumap_results/{}_20190220.csv'.format(keyword)
df_nierson = pd.read_csv('data/dianping_results/nierson_city_list.csv', encoding='gbk')
city_id_list = sorted(list(df_nierson.meituan_city_id))
start_crawler(keyword, city_id_list, start_city_id)
print('Finished crawling info of: ', keyword)
def clean_csv_results(csvfilename):
try:
df = pd.read_csv(csvfilename,
names=['city_id', 'keyword', 'dianping_shop_id', 'shop_title', 'stars', 'shop_url', 'state'],
encoding='UTF-8')
except UnicodeDecodeError as e1:
df = pd.read_csv(csvfilename,
names=['city_id', 'keyword', 'dianping_shop_id', 'shop_title', 'stars', 'shop_url', 'state'],
encoding='gbk')
except Exception as e2:
print('Exception found when cleaning: ', csvfilename)
print(e2)
return
finally:
df = df.drop_duplicates(keep='first')
new_name = csvfilename.replace('raw', 'cleaned')
df.to_csv(new_name, encoding='utf-8')
print('Finished cleaning file: ' + csvfilename)
def clean_data(path='data/dianping_results/raw/'):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
if name not in ['dianping_city_list.csv', 'nierson_city_list.csv']:
clean_csv_results(path + name)
print('Finished cleaning data.')
def merge_cleaned_data(folder_path='dianping_results/cleaned/'):
dfs = []
for root, dirs, files in os.walk(folder_path, topdown=False):
for name in files:
df = pd.read_csv(folder_path + name, encoding='gbk')
dfs.append(df)
df = pd.concat(dfs)
df.to_csv('dianping_cleaned_in_one.csv', encoding='gbk')
|
[
"kevin_jfzhu@163.com"
] |
kevin_jfzhu@163.com
|
be6a016ce6c16fe2faa6e74c48ad6571cc088641
|
b33ddc7b89d05e19fdeb69593872fd174fab9f4f
|
/URI-py/2875.py
|
49dc31d7091f31bea192a97075a7c40e9e9f21a3
|
[] |
no_license
|
ThiagoCComelli/URI-Online-Judge
|
8b8d609d880342b39ba0d396c0610ecb7e01a5af
|
5348f736b2d683f4b857232c22cccb7c1d8b8d65
|
refs/heads/master
| 2020-07-23T15:14:05.353948
| 2020-03-10T19:42:12
| 2020-03-10T19:42:12
| 207,606,956
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
# -*- coding: utf-8 -*-
while True:
try:
n,m = map(int, input().split())
lista = []
lista1= []
for i in range(n):
lista.append(input().split())
while True:
for i in range(n):
for j in range(m):
a =a
except EOFError:
break
|
[
"thiago.comelli@outlook.com"
] |
thiago.comelli@outlook.com
|
2d883d197753ff27c2d2713d689c61047b3dd2eb
|
517693716ff4d3f642dda194767cbc03bb37cd1b
|
/src/data_functions.py
|
5d48879422f5b92f52c3c1c5dbc7b4a6d8dd580a
|
[] |
no_license
|
bradley-p/Solar_Energy_Forecasting
|
3cb1951507a1336ee0cf65133cfd0b861ee7454c
|
22317b2fdf51e3d973b32ceef42bc6e68754f6cc
|
refs/heads/main
| 2023-04-17T22:52:28.000990
| 2021-05-04T17:03:18
| 2021-05-04T17:03:18
| 348,751,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,194
|
py
|
import numpy as np
import astral
from astral import sun
import pytz
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
###
# File contains methods useful for curating data
# helps to clean-up the data curating notebook
# provides method that computes elevation, azimuth, and zenith using astral package
##
def plotRegression(truth, pred):
plt.figure(figsize=(10,10))
plt.scatter(truth, pred)
plt.grid()
plt.xlabel("Truth")
plt.ylabel("Predicted")
plt.title("Truth Plotted against actual value")
plt.plot([min(truth),max(truth)], [min(truth),max(truth)], 'r')
plt.show()
def computeAverageError(pred, y):
err = []
for i in range(len(pred)):
err.append(abs((y[i] - pred[i])/(y[i] + 1e-6)))
return sum(err)/ len(err)
class LoganAstral:
def __init__(self):
#going to use these variables a lot
self.MST = pytz.timezone('US/Mountain')
self.logan = astral.LocationInfo(name='Logan, UT', region='US/Mountain', timezone=self.MST, latitude=41.7452, longitude=-111.8097)
self.observer = self.logan.observer
# Astral expects UTC time. We are assuming input is in MST
def timeToUTC(self, mstDT):
return self.MST.normalize(self.MST.localize(mstDT)).astimezone(pytz.utc)
# computes the three
def computeElAzZe(self, dt):
utcDT = self.timeToUTC(dt)
elevation = sun.elevation(self.observer, utcDT)
azimuth = sun.azimuth(self.observer, utcDT)
zenith = sun.zenith(self.observer, utcDT)
return (elevation, azimuth, zenith)
if __name__=='__main__':
year = 2021
month = 3
day = 26
hour = 7
minutes = 19
seconds = 0
dt = datetime(year, month, day, hour, minutes, seconds)
lat = 41.7452
lon = -111.8097
MST = pytz.timezone('US/Mountain')
logan = astral.LocationInfo(name='Logan, UT', timezone=MST, latitude=lat, longitude=lon)
# this is how to convert from local time to UTC, which astral expects
utcdt = MST.normalize(MST.localize(dt)).astimezone(pytz.utc)
print(sun.zenith_and_azimuth(logan.observer, utcdt))
print(sun.elevation(logan.observer, utcdt))
|
[
"70186602+bradley-p@users.noreply.github.com"
] |
70186602+bradley-p@users.noreply.github.com
|
0a1abc1df723114b5f626549217071f99ce3f6d6
|
1dce03e6f3f5b23d1e5c599678624638943b9422
|
/docker/create_docker_images2.py
|
c963255960a9c9025948e08941e44f9ffe9c6e2f
|
[] |
no_license
|
volat1977/byte_of_python
|
76ec958bdc51c7538bb24e5d152b456feab603ca
|
60b58ca3927ef5e2801c93dd676d5f8b4c03d9fc
|
refs/heads/master
| 2020-12-26T07:23:10.562537
| 2020-03-24T05:31:03
| 2020-03-24T05:31:03
| 237,431,769
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
from io import BytesIO
import docker
dockerfile = '''
# Shared Volume
FROM busybox:buildroot-2014.02
VOLUME /data
CMD ["/bin/sh"]
'''
f = BytesIO(dockerfile.encode('utf-8'))
cli = docker.from_env()
response = cli.api.build(fileobj=f, rm=True, tag='test3', decode=True)
#for line in response:
# if line.keys()[0] in ('stream', 'error'):
# value = line.values()[0].strip()
# if value:
# print(value)
# for line in response:
# if line.keys in ('stream', 'error'):
# value = line.values()[0].strip()
# if value:
# print(value)
|
[
"alex@pop-os.localdomain"
] |
alex@pop-os.localdomain
|
19d98f14f17b5614f275bb4b833370621df30e75
|
863e3aaca85d79dd9891cc1dc42dcb6541e253c4
|
/src/shortener/migrations/0001_initial.py
|
33d90b3acf25978d34d5ef51632f90056a9c9d7e
|
[] |
no_license
|
Swain0114/trydjango_100
|
47cf65feb44bf93de680bfbcf33e16ea85294ac6
|
5fbe60a5034bfcb0caa62f3f8529e7495cbfc8e6
|
refs/heads/master
| 2021-01-12T09:21:57.298717
| 2016-12-24T02:03:53
| 2016-12-24T02:03:53
| 76,149,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-12 23:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='shortener',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(max_length=220)),
('shortcode', models.CharField(max_length=15, unique=True)),
('update', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"tony820114@gmial.com"
] |
tony820114@gmial.com
|
4aa9aa10086ca521fc6643a0560e8adf06af8ee0
|
ceb282df59afb5714dda768c9ee26ae8c3cd14ef
|
/api/src/apps/pages/models.py
|
c612e3e6d43114951e4100adf6d14aa6688753ef
|
[] |
no_license
|
ukiyodigital/float
|
5aaee3080a7028008edee259e14ba5b5dfe323c8
|
1f3be29cba8273ab1b0e837de4eb53f2d49fc24c
|
refs/heads/develop
| 2023-03-14T03:16:02.859606
| 2022-03-21T15:34:03
| 2022-03-21T15:34:03
| 163,778,265
| 2
| 0
| null | 2023-02-28T06:20:45
| 2019-01-02T00:57:46
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
from django.db import models
from django.contrib.postgres.fields import JSONField
from django.core.serializers.json import DjangoJSONEncoder
from apps.sites.models import Site
from apps.column_headers.models import ColumnHeader
from apps.users.models import User
from apps.column_headers.utils import ColumnManager
class Page(models.Model):
# page_name
name = models.CharField(max_length=15, blank=False)
slug = models.SlugField(max_length=15)
# Foreign Keys
site = models.ForeignKey(Site, on_delete=models.PROTECT, related_name='pages')
users = models.ManyToManyField(User)
class Meta:
unique_together = ('slug', 'site',)
def update_columns(self, columns):
manager = ColumnManager(
model=PageColumnHeader,
column_fields=['name', 'slug', 'order', 'field', 'data'],
)
manager.save_columns(columns, self.id)
class PageColumnHeader(ColumnHeader):
page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name='columns', null=True, blank=True)
data = JSONField(null=True, blank=True, encoder=DjangoJSONEncoder)
class Meta:
# columns cannot have the same parent
unique_together = (
('page', 'slug',),
('parent', 'slug',),
)
|
[
"kevin.a.cunanan@gmail.com"
] |
kevin.a.cunanan@gmail.com
|
974761893925c0cb51e9a1d433306bab6ff66024
|
c083f88701e27bbbda10b8b5e90763ad20297b42
|
/dch_002/settings.py
|
02d9223eb0f82dc23588839fbd3b9aacb51e6a4f
|
[] |
no_license
|
Shakeel-Nawaz/dch_002
|
70e9e713f6b7b23b30c180c2509a8484e1b682b5
|
24eda80b9a66f255fd3b79569caf2d20181e6ecd
|
refs/heads/main
| 2023-08-30T05:11:06.316241
| 2021-10-14T08:02:23
| 2021-10-14T08:02:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,497
|
py
|
"""
Django settings for dch_002 project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-8!+(_8^io@ue!diyhu+sw=%=sio7xoix#k)ksly03il#0#k5y('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'channels',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app1'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dch_002.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# WSGI_APPLICATION = 'dch_002.wsgi.application'
ASGI_APPLICATION = 'dch_002.asgi.application'
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("127.0.0.1", 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"Shakeelnawaz1@gmail.com"
] |
Shakeelnawaz1@gmail.com
|
274f35141adb643bb2d94588530768a198f2b6b9
|
657aa6770a486ed812af26c6ec824a5e8bac1eab
|
/venv/Scripts/pip3.8-script.py
|
15e1a9ac428e1c0fb220fb3ed62f5b3bacc114b5
|
[] |
no_license
|
hemangibavasiya/ImageToArray
|
35f93a194de552832584af3d4d468ee2b2826425
|
3b61d575ec8c5fe652c3e16aeff5c263c1cd2e32
|
refs/heads/master
| 2022-12-17T06:37:54.800910
| 2020-09-21T05:53:03
| 2020-09-21T05:53:03
| 297,242,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
#!C:\Users\Hemangi.Bavasiya\PycharmProjects\ImageToArray\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.8'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.8')()
)
|
[
"hemangibavasiya08@gmail.com"
] |
hemangibavasiya08@gmail.com
|
d753d0c4da9bb638deab2a12cfdd73f9e4680cb5
|
bac7a7507933ac5bb38b41bbe2a587764da3cf94
|
/snappy_wrappers/wrappers/link_in_bam/wrapper.py
|
09790324734c2213f0b8a7b3f82af6b18a1c8997
|
[
"MIT"
] |
permissive
|
Pregelnuss/snappy-pipeline
|
923b0f36117a2f55ee52f9a8564ed3bb82a8be16
|
31200eba84bff8e459e9e210d6d95e2984627f5c
|
refs/heads/master
| 2023-06-19T07:24:04.736033
| 2021-05-27T07:24:05
| 2021-05-27T07:24:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,063
|
py
|
# -*- coding: utf-8 -*-
"""CUBI+Snakemake wrapper code for external: Snakemake wrapper.py
"""
from snakemake import shell
__author__ = "Oliver Stolpe <oliver.stolpe@bihealth.de>"
shell.executable("/bin/bash")
this_file = __file__
input = snakemake.params.args["input"]
if not input:
raise Exception("No bam found")
shell(
r"""
set -x
# Write out information about conda installation.
conda list >{snakemake.log.conda_list}
conda info >{snakemake.log.conda_info}
# Also pipe stderr to log file
if [[ -n "{snakemake.log.log}" ]]; then
if [[ "$(set +e; tty; set -e)" != "" ]]; then
rm -f "{snakemake.log.log}" && mkdir -p $(dirname {snakemake.log.log})
exec 2> >(tee -a "{snakemake.log.log}" >&2)
else
rm -f "{snakemake.log.log}" && mkdir -p $(dirname {snakemake.log.log})
echo "No tty, logging disabled" >"{snakemake.log.log}"
fi
fi
# Setup auto-cleaned TMPDIR
export TMPDIR=$(mktemp -d)
trap "rm -rf $TMPDIR" EXIT
mkdir -p $TMPDIR/tmp.d
# Link in bam files with the proper file name scheme
ln -sr {input} {snakemake.output.bam}
# Link in resultin BAM file or create index
if [[ -e {input}.bai ]]; then
ln -sr {input}.bai {snakemake.output.bam_bai}
else
samtools index {snakemake.output.bam}
fi
# Build MD5 files
pushd $(dirname {snakemake.output.bam})
md5sum $(basename {snakemake.output.bam}) > $(basename {snakemake.output.bam}).md5
md5sum $(basename {snakemake.output.bam_bai}) > $(basename {snakemake.output.bam_bai}).md5
popd
# QC Report ---------------------------------------------------------------------------------------
# gather statistics from BAM file
# TODO: use pipes for only reading once from disk?
samtools stats {snakemake.output.bam} > {snakemake.output.report_bamstats_txt}
samtools flagstat {snakemake.output.bam} > {snakemake.output.report_flagstats_txt}
samtools idxstats {snakemake.output.bam} > {snakemake.output.report_idxstats_txt}
# call plot-bamstats
mkdir $TMPDIR/bamstats.d
plot-bamstats \
-p $TMPDIR/bamstats.d/ \
{snakemake.output.report_bamstats_txt} \
|| true # ignore failure
# Convert HTML report into one file.
inline-html \
--in-file $TMPDIR/bamstats.d/index.html \
--out-file {snakemake.output.report_bamstats_html} \
|| touch {snakemake.output.report_bamstats_html}
# Build MD5 files for the reports
md5sum {snakemake.output.report_bamstats_html} > {snakemake.output.report_bamstats_html_md5}
md5sum {snakemake.output.report_bamstats_txt} > {snakemake.output.report_bamstats_txt_md5}
md5sum {snakemake.output.report_flagstats_txt} >{snakemake.output.report_flagstats_txt_md5}
md5sum {snakemake.output.report_idxstats_txt} > {snakemake.output.report_idxstats_txt_md5}
# Additional logging for transparency & reproducibility
# Logging: Save a copy this wrapper (with the pickle details in the header)
cp {this_file} $(dirname {snakemake.log.log})/wrapper.py
# Logging: Save a permanent copy of the environment file used
cp $(dirname {this_file})/environment.yaml $(dirname {snakemake.log.log})/environment_wrapper.yaml
"""
)
|
[
"manuel.holtgrewe@bihealth.de"
] |
manuel.holtgrewe@bihealth.de
|
744364012adc66c65453484e42e764b92591af0a
|
2fdcbdc3a179a861cf0b59ccbafa6f8153e53566
|
/artifacts/admin.py
|
79873b45d4fea619373aff83133d6f33b7063d85
|
[] |
no_license
|
Rasquin/auction
|
c6342ed4737d024c81667f03550d8dc093bb0458
|
f2fc9dc72ab7a34172329045d4e948780dc2c4e2
|
refs/heads/master
| 2022-07-12T07:04:53.202963
| 2020-02-04T15:52:20
| 2020-02-04T15:52:20
| 211,497,820
| 1
| 1
| null | 2022-06-21T23:50:09
| 2019-09-28T12:34:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 121
|
py
|
from django.contrib import admin
from .models import Artifact
# Register your models here.
admin.site.register(Artifact)
|
[
"ubuntu@ip-172-31-42-208.ec2.internal"
] |
ubuntu@ip-172-31-42-208.ec2.internal
|
583338bd6695ced08d352a82aa0bb9b38a8f8527
|
2ef7e7785b1f4bba60bf384cc878ed6948eb7fbe
|
/4 Kyu/stripComments.py
|
78da9397af08d94eb0602c6e9304375dc13b2ec6
|
[] |
no_license
|
Muneer320/CodeWars-Solved-Katas
|
16efcc9eca9ab635fdcb9c17ac9c177cc49a3ae9
|
4162ae7f9b48bbc08e1fa2743ee11b0fc4fd2318
|
refs/heads/main
| 2023-04-05T16:34:02.606131
| 2021-04-22T14:50:07
| 2021-04-22T14:50:07
| 360,554,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
def solution(string,markers):
parts = string.split('\n')
for s in markers:
parts = [v.split(s)[0].rstrip() for v in parts]
return '\n'.join(parts)
print(solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"]))
|
[
"noreply@github.com"
] |
Muneer320.noreply@github.com
|
1ab744d642d54a043628e569b66babf9d7646fbc
|
1228750f9b95c5c2fb9a1d5cb339275db979356b
|
/anytime_models/examples/resnet-ann.py
|
64befd9e662b1cc774411440061df920bb1e721c
|
[
"MIT"
] |
permissive
|
microsoft/petridishnn
|
be0236b9385c7523ca71cfd171f95beaca5d851a
|
0e0431a56db893ef8ee14501f12bf7046d4d6024
|
refs/heads/master
| 2023-06-29T20:58:01.980267
| 2023-06-12T18:22:32
| 2023-06-12T18:22:32
| 180,651,701
| 123
| 24
|
MIT
| 2023-06-12T18:22:33
| 2019-04-10T19:39:36
|
Python
|
UTF-8
|
Python
| false
| false
| 657
|
py
|
import argparse
import anytime_models.models.anytime_network as anytime_network
from anytime_models.models.anytime_network import AnytimeResNet, AnytimeResNeXt
import ann_app_utils
"""
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser = ann_app_utils.parser_add_app_arguments(parser)
anytime_network.parser_add_resnet_arguments(parser)
args = parser.parse_args()
if args.resnet_version == 'resnet':
model_cls = AnytimeResNet
elif args.resnet_version == 'resnext':
model_cls = AnytimeResNeXt
args.b_type = 'bottleneck'
ann_app_utils.cifar_svhn_train_or_test(args, model_cls)
|
[
"hanzhang@cs.cmu.edu"
] |
hanzhang@cs.cmu.edu
|
93b36a9baec19346f743510cee81a567f11fbd3a
|
d5adda4f7abb3c066b7c3c24e0871cfba0e6ca2d
|
/IPnetwork/get_udp.py
|
1fb90139ca4cda6b073ed7afa071ffddeaf210d4
|
[] |
no_license
|
qwertpas/practice
|
8bb21caa629956787890d631c3026473742ac401
|
29b75ab01a2ce06b8b347aa5ded06451c598d78e
|
refs/heads/master
| 2020-04-09T06:06:45.659528
| 2018-12-02T21:33:20
| 2018-12-02T21:33:20
| 160,098,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
import socket
import sys
port = 8081
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", port))
print("socket: ", sock)
running = True
while running:
the_data, the_addr = sock.recvfrom(1024)
print("R: ", the_data, '\t\t A: ', the_addr)
|
[
"christopher.y.x@gmail.com"
] |
christopher.y.x@gmail.com
|
6915ead1ba750b7569a4d25b34f4be68242230f5
|
a4133ac0cfce656b47fe2ea6161a9f1656afa0e8
|
/video.py
|
db4c55c6c781a82aeb60d2f364d3fcecfe4c2487
|
[] |
no_license
|
xHascox/Simple-HDR-Video
|
531d4b5baba2fd5ed2eac473484f65a54e318b86
|
aac2e6a1acfb6c69de214ac29bf6ba6892723886
|
refs/heads/main
| 2023-03-21T13:54:20.355709
| 2021-03-13T01:59:58
| 2021-03-13T01:59:58
| 346,901,991
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 918
|
py
|
import cv2
import tkinter
from tkinter.filedialog import askopenfilename
def play_videoFile(filePath,mirror=False):
cap = cv2.VideoCapture(filePath)
#modify:
width = 1920
height = 1080
#cv2.namedWindow('VideoHDR',cv2.WINDOW_AUTOSIZE)
cv2.namedWindow('VideoHDR',cv2.WINDOW_NORMAL)
while True:
ret_val, frame = cap.read()
if mirror:
frame = cv2.flip(frame, 1)
cv2.imshow('VideoHDR', frame)
k = cv2.waitKey(1)
if k == 27:
break # esc to quit
if k == 32:
#space to pause
while cv2.waitKey(1) != 32:
pass
cv2.destroyAllWindows()
def main():
filename = askopenfilename(initialdir = "/",title = "Select file",filetypes = (("matroska files","*.mkv"),("all files","*.*")))
play_videoFile(filename,mirror=False)
if __name__ == '__main__':
main()
|
[
"mg.marco@hotmail.ch"
] |
mg.marco@hotmail.ch
|
e61d9c8b65dd2e6ddb62065629685896f512ffb7
|
0fe37e11df976c55fe5bbe492879b7cd8a95b7c5
|
/1_2_python变量_输出和输入_数字_字符串/04_str_test.py
|
3444adc19895857e5d4fee8cb2347e41708b2bfb
|
[] |
no_license
|
1286211699/mmc_code
|
9bb7761107604b445dea4fe5acf9d503fbc28dfa
|
ee97879632dfd7d24c604f7db52c82fa29109daa
|
refs/heads/master
| 2022-12-08T23:19:06.382825
| 2020-05-08T13:59:46
| 2020-05-08T13:59:46
| 177,100,815
| 2
| 0
| null | 2022-12-08T01:42:47
| 2019-03-22T08:25:37
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,896
|
py
|
# name = 'for'
#
# name = "for's name is for"
# print(name)
# print('abcd\tefg')
# print('My name is %s'%('for'))
# print('I am %d years old'%(18))
# print('his height is %f m'%(1.78))
# print('his height is %.2f m'%(1.78))
# name = 'while'
#
# print(name[1:3])
# str_test = 'hello world world'
#
# print(str_test.partition('o'))
# print(str_test.rpartition('o'))
# my_str = 'hello:world:python '
# print(my_str)
# print(my_str.replace('l','w'))
# # print(my_str.splitlines())
# # print(my_str.split(':'))
# print(str_test.count('l'))
#
# print(str_test.find('w'))
#
# print(str_test.rfind('w'))
#
# print(str_test.index('o'))
# print(str_test.rindex('o'))
# print(str_test[::-1])
# print(str_test[::-2])
#
# print(str_test[1:9:-1])
# print(str_test[9:1:-1])
# print(str_test[0:7])
#
# print(str_test[:7])
#
# print(str_test[2:])
#
# print(str_test[:])
# print(str_test[::2])
# print(str_test[0:7:2])
# str_test = ' for '
# print(str_test.strip())#在以后的数据清洗中战友很大的比重
# print(str_test.rstrip())
# print(str_test.lstrip())
# print(str_test.center(10,'x'))
# print(str_test.ljust(10,'x'))
# print(str_test.rjust(10,'x'))
# print(str_test.zfill(10))
#
# python = '{} is {}'
#
# print(python.format('for','cool'))
#
# print('hello'.upper())
# print('HELLO'.lower())
#
# print('12345a'.isalnum())
# print('abcdef'.isalpha())
# print('12345'.isdigit())
# print('HELLO'.isupper())
# print('hello'.islower())
# print(' '.isspace())
#
# print('for is cool'[3:].startswith(' '))
# print('for is cool'[3:].endswith('cool'))
# print(ord('a'))
# print(chr(97))
u = '学神'
str1 = u.encode()
print(str1)
str2 = u.encode()
print(str2)
u1 = str1.decode('gbk')
print(u1)
u2 = str2.decode('utf-8')
print(u2)
|
[
"1286211699@qq.com"
] |
1286211699@qq.com
|
0f7094b034b985bf56a41a69f249db48b9d49c8b
|
2d8ad2abcf35fa4cbaad865b651cdb6f0dcff88a
|
/ibitcy_tests/pages/payment_page.py
|
933d908a5025396ff39c0ae7bca05536466528d4
|
[] |
no_license
|
Raioln/ibitcy_tests
|
a5c5902c9690297649594ab22d84c08f47ce2b41
|
6972f7561a1c517949087b05da420880b7ed676e
|
refs/heads/master
| 2020-08-04T17:33:32.140471
| 2019-10-01T23:59:54
| 2019-10-01T23:59:54
| 212,221,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from selenium.webdriver.common.by import By
from pages.base_page import BasePage
from utils.locator import Locator
class PaymentPage(BasePage):
status_selector = Locator(By.CLASS_NAME, 'status-selector', 'Селектор статусов')
gold_item = Locator(By.CLASS_NAME, 'gold', 'Статус Gold')
|
[
"d.evlashkin@cian.ru"
] |
d.evlashkin@cian.ru
|
404ccc4de81309e69083b0b19bb3d53830a09a20
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/list_instances_datastore_result.py
|
34f5b1f20917eabd5ea29c17543d8217b496429f
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,366
|
py
|
# coding: utf-8
import re
import six
class ListInstancesDatastoreResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'type': 'str',
'version': 'str'
}
attribute_map = {
'type': 'type',
'version': 'version'
}
def __init__(self, type=None, version=None):
"""ListInstancesDatastoreResult - a model defined in huaweicloud sdk"""
self._type = None
self._version = None
self.discriminator = None
self.type = type
self.version = version
@property
def type(self):
"""Gets the type of this ListInstancesDatastoreResult.
数据库引擎。
:return: The type of this ListInstancesDatastoreResult.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ListInstancesDatastoreResult.
数据库引擎。
:param type: The type of this ListInstancesDatastoreResult.
:type: str
"""
self._type = type
@property
def version(self):
"""Gets the version of this ListInstancesDatastoreResult.
数据库版本号。
:return: The version of this ListInstancesDatastoreResult.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this ListInstancesDatastoreResult.
数据库版本号。
:param version: The version of this ListInstancesDatastoreResult.
:type: str
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListInstancesDatastoreResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
c276920814b35fe507549c51ba57f9cb4f8203e7
|
068c4665dc7b803df0fc02524cfdb01fff1674da
|
/Fraction.py
|
0be0f332c82ec1f3e67dc3b912217171f70c3dc5
|
[] |
no_license
|
TingYang227/Python
|
86512e01adf676cee943fa9ab78ce018f19dcc91
|
6bc48c6f688f9a3088e34a2117c861b97ddcdc75
|
refs/heads/master
| 2020-04-20T20:26:53.693987
| 2019-11-07T17:50:53
| 2019-11-07T17:50:53
| 169,076,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
def gcd(m, n):
while m % n != 0:
oldm = m
oldn = n
m = oldn
n = oldm % oldn
return n
# print(gcd(20, 10))
class Fraction:
def __init__(self, top, bottom):
self.num = top
self.den = bottom
def __str__(self):
return str(self.num) + "/" + str(self.den)
def show(self):
print(self.num, "/", self.den)
def __add__(self, otherfraction):
newnum = self.num*otherfraction.den + self.den*otherfraction.num
newden = self.den * otherfraction.den
common = gcd(newnum, newden)
return Fraction(newnum//common, newden//common)
def __mul__(self, other):
newnum = self.num * other.num
newden = self.den * other.den
common = gcd(newnum, newden)
return Fraction(newnum//common, newden//common)
def __sub__(self, other):
newnum = self.num * other.den - other.num * self.den
newden = self.den * self.num
common = gcd(newnum, newden)
return Fraction(newnum//common, newden//common)
def __truediv__(self, other):
newnum = self.num * other.den
newden = self.den * other.num
common = gcd(newnum, newden)
return Fraction(newnum//common, newden//common)
def __eq__(self, other):
firstnum = self.num * other.den
secondnum = other.num * self.den
return firstnum == secondnum
def __lt__(self, other):
firstnum = self.num * other.den
secondnum = other.num * self.den
return firstnum < secondnum
def __gt__(self, other):
firstnum = self.num * other.den
secondnum = other.num * self.den
return firstnum > secondnum
def getNum(self):
return self.num
def getDen(self):
return self.den
x = Fraction(1, 2)
y = Fraction(2, 3)
print(x + y)
print(x == y)
print(x * y)
print(y - x)
print(x - y)
print(x / y)
print(x > y)
print(x < y)
|
[
"39301486+TingYang227@users.noreply.github.com"
] |
39301486+TingYang227@users.noreply.github.com
|
cfa47b057d7d920775909b59ce508a0a03f128f1
|
385a8d743feb238fb0d939c58b564232aa5f5291
|
/tekton-master/backend/appengine/routes/relatorios/rest.py
|
6b7c0b8c7d68982f9ea5b9c07bc20b1b75b9e237
|
[
"MIT"
] |
permissive
|
lucasgcampos/app-engine-learning
|
7189439e9e431f738f05e0463b6dce8bf6601d8f
|
0c582d6150be152e55464b6bdfb5c6ab1d5c26fb
|
refs/heads/master
| 2016-08-02T22:02:31.816654
| 2014-11-14T03:36:01
| 2014-11-14T03:36:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from tekton.gae.middleware.json_middleware import JsonResponse
from relatorio_app import facade
def index():
cmd = facade.list_relatorios_cmd()
relatorio_list = cmd()
short_form=facade.relatorio_short_form()
relatorio_short = [short_form.fill_with_model(m) for m in relatorio_list]
return JsonResponse(relatorio_short)
def save(**relatorio_properties):
cmd = facade.save_relatorio_cmd(**relatorio_properties)
return _save_or_update_json_response(cmd)
def update(relatorio_id, **relatorio_properties):
cmd = facade.update_relatorio_cmd(relatorio_id, **relatorio_properties)
return _save_or_update_json_response(cmd)
def delete(relatorio_id):
facade.delete_relatorio_cmd(relatorio_id)()
def _save_or_update_json_response(cmd):
try:
relatorio = cmd()
except CommandExecutionException:
return JsonResponse({'errors': cmd.errors})
short_form=facade.relatorio_short_form()
return JsonResponse(short_form.fill_with_model(relatorio))
|
[
"lucasgcampos.contato@gmail.com"
] |
lucasgcampos.contato@gmail.com
|
772770f9242c44fcce1f2f8a76f0f56cd8a222fb
|
a29c96b6fc4942b519edcd7157d42f34add78feb
|
/horovod/spark/keras/estimator.py
|
9be8b9bd942d3460316ce5d4764fdfb3ce636617
|
[
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
xielm12/horovod
|
5662456cd3626ba3f9fed426bbee1901f1a27014
|
32e5fdbf33fb2dac9d725028a886a093984c3618
|
refs/heads/master
| 2022-12-25T20:10:39.771600
| 2020-09-22T22:01:47
| 2020-09-22T22:01:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,429
|
py
|
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import horovod.spark.common._namedtuple_fix
import numbers
import time
from distutils.version import LooseVersion
import numpy as np
import tensorflow as tf
from pyspark import keyword_only
from pyspark.ml.util import MLWritable, MLReadable
from pyspark.ml.param.shared import Param, Params
from horovod.runner.common.util import codec
from horovod.spark.common import util
from horovod.spark.common.estimator import HorovodEstimator, HorovodModel
from horovod.spark.common.params import EstimatorParams
from horovod.spark.common.serialization import HorovodParamsWriter, HorovodParamsReader
from horovod.spark.keras import remote
from horovod.spark.keras.util import \
BARE_KERAS, TF_KERAS, \
BareKerasUtil, TFKerasUtil, \
is_instance_of_bare_keras_model, is_instance_of_bare_keras_optimizer
class KerasEstimatorParamsWriter(HorovodParamsWriter):
def saveImpl(self, path):
keras_utils = self.instance._get_keras_utils()
# Write the parameters
HorovodParamsWriter.saveMetadata(self.instance, path, self.sc,
param_serializer_fn=keras_utils.serialize_param_value)
class KerasEstimatorParamsWritable(MLWritable):
def write(self):
return KerasEstimatorParamsWriter(self)
class KerasEstimatorParamsReader(HorovodParamsReader):
def _deserialize_dict(self, dict):
def _param_deserializer_fn(name, param_val, keras_utils, custom_objects):
if param_val is None:
return param_val
if name == EstimatorParams.model.name:
def load_model_fn(x):
with keras_utils.keras().utils.custom_object_scope(custom_objects):
return keras_utils.keras().models.load_model(x, compile=True)
return keras_utils.deserialize_model(param_val,
load_model_fn=load_model_fn)
elif name == KerasEstimator.optimizer.name:
opt_base64_encoded = codec.loads_base64(param_val)
return keras_utils.deserialize_optimizer(opt_base64_encoded)
else:
return codec.loads_base64(param_val)
# In order to deserialize the model, we need to deserialize the custom_objects param
# first.
keras_utils = None
if KerasEstimator._keras_pkg_type.name in dict:
keras_pkg_type = _param_deserializer_fn(KerasEstimator._keras_pkg_type.name,
dict[KerasEstimator._keras_pkg_type.name],
None, None)
if keras_pkg_type == BARE_KERAS:
keras_utils = BareKerasUtil
elif keras_pkg_type == TF_KERAS:
keras_utils = TFKerasUtil
custom_objects = {}
if KerasEstimator.custom_objects.name in dict:
custom_objects = _param_deserializer_fn(KerasEstimator.custom_objects.name,
dict[KerasEstimator.custom_objects.name],
None, None)
for key, val in dict.items():
dict[key] = _param_deserializer_fn(key, val, keras_utils, custom_objects)
return dict
class KerasEstimatorParamsReadable(MLReadable):
@classmethod
def read(cls):
"""Returns a KerasEstimatorParamsReader instance for this class."""
return KerasEstimatorParamsReader(cls)
class KerasEstimator(HorovodEstimator, KerasEstimatorParamsReadable,
KerasEstimatorParamsWritable):
"""Spark Estimator for fitting Keras models to a DataFrame.
Supports standalone `keras` and `tf.keras`, and TensorFlow 1.X and 2.X.
Args:
num_proc: Number of Horovod processes. Defaults to `spark.default.parallelism`.
model: Keras model to train.
backend: Optional Backend object for running distributed training function. Defaults to SparkBackend with
`num_proc` worker processes. Cannot be specified if `num_proc` is also provided.
store: Store object that abstracts reading and writing of intermediate data and run results.
custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered
during serialization/deserialization.
optimizer: Keras optimizer to be converted into a `hvd.DistributedOptimizer` for training.
loss: Keras loss or list of losses.
loss_weights: Optional list of float weight values to assign each loss.
sample_weight_col: Optional column indicating the weight of each sample.
gradient_compression: Gradient compression used by `hvd.DistributedOptimizer`.
metrics: Optional metrics to record.
feature_cols: Column names used as feature inputs to the model. Must be a list with each feature
mapping to a sequential argument in the model's forward() function.
label_cols: Column names used as labels. Must be a list with one label for each output of the model.
validation: Optional validation column name (string) where every row in the column is either 1/True or 0/False,
or validation split (float) giving percent of data to be randomly selected for validation.
callbacks: Keras callbacks.
batch_size: Number of rows from the DataFrame per batch.
epochs: Number of epochs to train.
verbose: Verbosity level [0, 2] (default: 1).
shuffle_buffer_size: Optional size of in-memory shuffle buffer in rows. Allocating a larger buffer size
increases randomness of shuffling at the cost of more host memory. Defaults to estimating
with an assumption of 4GB of memory per host.
partitions_per_process: Number of Parquet partitions to assign per worker process from `num_proc` (default: 10).
run_id: Optional unique ID for this run for organization in the Store. Will be automatically assigned if not
provided.
train_steps_per_epoch: Number of steps to train each epoch. Useful for testing that model trains successfully.
Defaults to training the entire dataset each epoch.
validation_steps_per_epoch: Number of validation steps to perform each epoch.
transformation_fn: Optional function that takes a row as its parameter
and returns a modified row that is then fed into the
train or validation step. This transformation is
applied after batching. See Petastorm [TransformSpec](https://github.com/uber/petastorm/blob/master/petastorm/transform.py)
for more details. Note that this fucntion constructs
another function which should perform the
transformation.
train_reader_num_workers: This parameter specifies the number of parallel processes that
read the training data from data store and apply data
transformations to it. Increasing this number
will generally increase the reading rate but will also
increase the memory footprint. More processes are
particularly useful if the bandwidth to the data store is not
high enough, or users need to apply transformation such as
decompression or data augmentation on raw data.
val_reader_num_workers: Similar to the train_reader_num_workers.
"""
custom_objects = Param(Params._dummy(), 'custom_objects', 'custom objects')
_keras_pkg_type = Param(Params._dummy(), '_keras_pkg_type', 'keras package type')
checkpoint_callback = Param(Params._dummy(), 'checkpoint_callback',
'model checkpointing callback')
@keyword_only
def __init__(self,
num_proc=None,
model=None,
backend=None,
store=None,
custom_objects=None,
optimizer=None,
loss=None,
loss_weights=None,
sample_weight_col=None,
gradient_compression=None,
metrics=None,
feature_cols=None,
label_cols=None,
validation=None,
callbacks=None,
batch_size=None,
epochs=None,
verbose=None,
shuffle_buffer_size=None,
partitions_per_process=None,
run_id=None,
train_steps_per_epoch=None,
validation_steps_per_epoch=None,
transformation_fn=None,
train_reader_num_workers=None,
val_reader_num_workers=None,
label_shapes=None,
checkpoint_callback=None):
super(KerasEstimator, self).__init__()
self._setDefault(optimizer=None,
custom_objects={},
_keras_pkg_type=None,
checkpoint_callback=None)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _get_keras_utils(self):
# This function determines the keras package type of the Estimator based on the passed
# optimizer and model and updates _keras_pkg_type parameter.
model_type = None
model = self.getModel()
if model:
if isinstance(model, tf.keras.Model):
model_type = TF_KERAS
elif is_instance_of_bare_keras_model(model):
model_type = BARE_KERAS
else:
raise ValueError(
"model has to be an instance of tensorflow.keras.Model or keras.Model")
optimizer_type = None
optimizer = self.getOptimizer()
if optimizer:
if isinstance(optimizer, str):
optimizer_type = None
elif isinstance(optimizer, tf.keras.optimizers.Optimizer):
optimizer_type = TF_KERAS
elif is_instance_of_bare_keras_optimizer(optimizer):
optimizer_type = BARE_KERAS
else:
raise ValueError("invalid optimizer type")
types = set([model_type, optimizer_type])
types.discard(None)
if len(types) > 1:
raise ValueError('mixed keras and tf.keras values for optimizers and model')
elif len(types) == 1:
pkg_type = types.pop()
super(KerasEstimator, self)._set(_keras_pkg_type=pkg_type)
if pkg_type == TF_KERAS:
return TFKerasUtil
elif pkg_type == BARE_KERAS:
return BareKerasUtil
else:
raise ValueError("invalid keras type")
def setCustomObjects(self, value):
return self._set(custom_objects=value)
def getCustomObjects(self):
return self.getOrDefault(self.custom_objects)
def setCheckpointCallback(self, value):
return self._set(checkpoint_callback=value)
def getCheckpointCallback(self):
return self.getOrDefault(self.checkpoint_callback)
def _check_metadata_compatibility(self, metadata):
input_shapes, output_shapes = self.get_model_shapes()
util.check_shape_compatibility(metadata,
self.getFeatureCols(),
self.getLabelCols(),
input_shapes=input_shapes,
output_shapes=output_shapes,
label_shapes=self.getLabelShapes())
def get_model_shapes(self):
model = self.getModel()
input_shapes = [[dim if dim else -1 for dim in input.shape.as_list()]
for input in model.inputs]
output_shapes = [[dim if dim else -1 for dim in output.shape.as_list()]
for output in model.outputs]
return input_shapes, output_shapes
def _fit_on_prepared_data(self, backend, train_rows, val_rows, metadata, avg_row_size, dataset_idx=None):
self._check_params(metadata)
keras_utils = self._get_keras_utils()
run_id = self.getRunId()
if run_id is None:
run_id = 'keras_' + str(int(time.time()))
if self._has_checkpoint(run_id):
serialized_model = self._load_model_from_checkpoint(run_id)
else:
serialized_model = self._compile_model(keras_utils)
# Workaround:
# https://stackoverflow.com/questions/50583056/is-there-any-way-to-set-java-opts-for-tensorflow-process/50615570
env = {'LIBHDFS_OPTS': '-Xms2048m -Xmx2048m'}
trainer = remote.RemoteTrainer(self, metadata, keras_utils, run_id, dataset_idx)
handle = backend.run(trainer,
args=(serialized_model, train_rows, val_rows, avg_row_size),
env=env)
return self._create_model(handle, run_id, metadata)
def _load_model_from_checkpoint(self, run_id):
store = self.getStore()
last_ckpt_path = store.get_checkpoint_path(run_id)
if self.getVerbose():
print('Resuming training from last checkpoint: {}'.format(last_ckpt_path))
return store.read_serialized_keras_model(last_ckpt_path, self.getModel())
def _compile_model(self, keras_utils):
# Compile the model with all the parameters
model = self.getModel()
loss = self.getLoss()
loss_weights = self.getLossWeights()
if not loss:
raise ValueError('Loss parameter is required for the model to compile')
optimizer = self.getOptimizer()
if not optimizer:
optimizer = model.optimizer
if not optimizer:
raise ValueError('Optimizer must be provided either as a parameter or as part of a '
'compiled model')
metrics = self.getMetrics()
gradient_compression = self.getGradientCompression()
optimizer_weight_values = optimizer.get_weights()
dist_optimizer_args = dict(optimizer=optimizer)
if gradient_compression:
dist_optimizer_args['compression'] = gradient_compression
# Horovod: wrap optimizer with DistributedOptimizer.
dist_optimizer = keras_utils.get_horovod().DistributedOptimizer(**dist_optimizer_args)
model.compile(optimizer=dist_optimizer,
loss=loss,
loss_weights=loss_weights,
metrics=metrics)
if optimizer_weight_values:
model.optimizer.set_weights(optimizer_weight_values)
return keras_utils.serialize_model(model)
def _create_model(self, run_results, run_id, metadata):
keras_utils = self._get_keras_utils()
keras_module = keras_utils.keras()
floatx = keras_module.backend.floatx()
custom_objects = self.getCustomObjects()
history, serialized_model, hvd_size = run_results[0]
def load_model_fn(x):
with keras_module.utils.custom_object_scope(custom_objects):
return keras_module.models.load_model(x)
model = keras_utils.deserialize_model(serialized_model, load_model_fn=load_model_fn)
# Here, learning rate is scaled down with the number of horovod workers.
# This is important the retraining of the model. User may retrain the model with
# different number of workers and we need the raw learning rate to adjust with the
# new number of workers.
scaled_lr = keras_module.backend.get_value(model.optimizer.lr)
keras_module.backend.set_value(model.optimizer.lr, scaled_lr / hvd_size)
return self.get_model_class()(**self._get_model_kwargs(
model, history, run_id, metadata, floatx))
def get_model_class(self):
return KerasModel
def _get_model_kwargs(self, model, history, run_id, metadata, floatx):
return dict(history=history,
model=model,
feature_columns=self.getFeatureCols(),
label_columns=self.getLabelCols(),
custom_objects=self.getCustomObjects(),
run_id=run_id,
_metadata=metadata,
_floatx=floatx)
class KerasModel(HorovodModel, KerasEstimatorParamsReadable,
KerasEstimatorParamsWritable):
"""Spark Transformer wrapping a Keras model, used for making predictions on a DataFrame.
Retrieve the underlying Keras model by calling `keras_model.getModel()`.
Args:
history: List of metrics, one entry per epoch during training.
model: Trained Keras model.
feature_columns: List of feature column names.
label_columns: List of label column names.
custom_objects: Keras custom objects.
run_id: ID of the run used to train the model.
"""
custom_objects = Param(Params._dummy(), 'custom_objects', 'custom objects')
# Setting _keras_pkg_type parameter helps us determine the type of keras package during
# deserializing the transformer
_keras_pkg_type = Param(Params._dummy(), '_keras_pkg_type', 'keras package type')
_floatx = Param(Params._dummy(), '_floatx', 'keras default float type')
@keyword_only
def __init__(self,
history=None,
model=None,
feature_columns=None,
label_columns=None,
custom_objects=None,
run_id=None,
_metadata=None,
_floatx=None):
super(KerasModel, self).__init__()
if label_columns:
self.setOutputCols([col + '__output' for col in label_columns])
self._setDefault(custom_objects={})
kwargs = self._input_kwargs
self.setParams(**kwargs)
def setCustomObjects(self, value):
return self._set(custom_objects=value)
def getCustomObjects(self):
return self.getOrDefault(self.custom_objects)
def _get_keras_utils(self, model=None):
# infer keras package from model
model = self.getModel()
if model:
if isinstance(model, tf.keras.Model):
pkg_type = TF_KERAS
elif is_instance_of_bare_keras_model(model):
pkg_type = BARE_KERAS
else:
raise ValueError(
"model has to be an instance of tensorflow.keras.Model or keras.Model")
super(KerasModel, self)._set(_keras_pkg_type=pkg_type)
if pkg_type == TF_KERAS:
return TFKerasUtil
elif pkg_type == BARE_KERAS:
return BareKerasUtil
else:
raise ValueError("invalid keras type")
raise ValueError("model is not set")
def _get_floatx(self):
return self.getOrDefault(self._floatx)
# To run locally on OS X, need export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
def _transform(self, df):
keras_utils = self._get_keras_utils()
floatx = self._get_floatx()
serialized_model = keras_utils.serialize_model(self.getModel())
label_cols = self.getLabelColumns()
output_cols = self.getOutputCols()
feature_cols = self.getFeatureColumns()
custom_objects = self.getCustomObjects()
metadata = self._get_metadata()
pin_cpu = remote._pin_cpu_fn()
def predict(rows):
import tensorflow as tf
from pyspark import Row
from pyspark.ml.linalg import DenseVector, SparseVector
k = keras_utils.keras()
k.backend.set_floatx(floatx)
# Do not use GPUs for prediction, use single CPU core per task.
pin_cpu(tf, k)
def load_model_fn(x):
with k.utils.custom_object_scope(custom_objects):
return k.models.load_model(x)
model = keras_utils.deserialize_model(serialized_model,
load_model_fn=load_model_fn)
input_shapes = [[dim if dim else -1 for dim in input.shape.as_list()]
for input in model.inputs]
def to_array(item):
if type(item) in [DenseVector or SparseVector]:
return item.toArray()
else:
return np.array(item)
def to_numpy(item):
# Some versions of TensorFlow will return an EagerTensor
return item.numpy() if hasattr(item, 'numpy') else item
# Perform predictions.
for row in rows:
fields = row.asDict().copy()
preds = model.predict_on_batch(
[to_array(row[feature_cols[i]]).reshape(input_shapes[i])
for i in range(len(feature_cols))])
preds = [to_numpy(item) for item in preds]
for label_col, output_col, pred, in zip(label_cols, output_cols, preds):
meta = metadata[label_col]
col_type = meta['spark_data_type']
# dtype for DenseVector and SparseVector is always np.float64
if col_type == DenseVector:
shape = np.prod(pred.shape)
flattened_pred = pred.reshape(shape, )
field = DenseVector(flattened_pred)
elif col_type == SparseVector:
shape = meta['shape']
flattened_pred = pred.reshape(shape, )
nonzero_indices = flattened_pred.nonzero()[0]
field = SparseVector(shape, nonzero_indices,
flattened_pred[nonzero_indices])
else:
# If the column is scalar type, int, float, etc.
value = pred[0]
python_type = util.spark_scalar_to_python_type(col_type)
if issubclass(python_type, numbers.Integral):
value = round(value)
field = python_type(value)
fields[output_col] = field
yield Row(**fields)
return df.rdd.mapPartitions(predict).toDF()
|
[
"noreply@github.com"
] |
xielm12.noreply@github.com
|
5c3a52dd83cd5f04121594050743968d48bc5958
|
7d21205946b306ca29aace9b4a798b8d9fa5bad2
|
/bot.py
|
5c3636b09b508ad15c20cfeff277833272b75e45
|
[] |
no_license
|
simorautiainen/aimboosterbot
|
9e66108da2780df2a0d0e0428ab1e55e8d2f5533
|
f2512289255126fdfb8d39f90b01c6cf043fa82c
|
refs/heads/master
| 2020-04-14T05:50:09.237499
| 2018-12-31T13:39:34
| 2018-12-31T13:39:34
| 163,670,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
import cv2
import numpy as np
import pyautogui
image = "dot7.png"
img = cv2.imread(image)
height, width, channels = img.shape
def imagesearch(image):
im = pyautogui.screenshot()
img_rgb = np.array(im)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(image, 0)
template.shape[::-1]
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val < 0.8:
return [-1,-1]
return max_loc
while True:
pos = imagesearch(image)
while pos[0] == -1:
pos = imagesearch(image)
pyautogui.moveTo(pos[0] + (width / 2), pos[1] + (height / 2))
pyautogui.click()
|
[
"noreply@github.com"
] |
simorautiainen.noreply@github.com
|
ae63166e12243568d153ba12655979e284186b4d
|
4529f9b7a19536b01873bc23440f2192a98d3c50
|
/Easy/746_Min Cost Climbing Stairs.py
|
a1249d945a74fceb13cd93e8891a09e44754e11b
|
[] |
no_license
|
j611062000/leetcode
|
c6bf315ce682dc362ac5dcd856c30c2af1aad90c
|
cbaa63d4f094f58d48037119b60aed73edb166e5
|
refs/heads/master
| 2020-03-31T01:50:12.088992
| 2018-11-17T03:48:35
| 2018-11-17T03:48:35
| 151,796,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
"""
To construc the answer for n data (i.e. P(n)), two secenarios are introduced to simplified
the calculation.
First (one step to the end): The minimal cost of this scenario is S(n-1) + X(n).
Second (two step to the end): The minimal cost of this scenario is S(n-2) + X(n-1).
data = [X(1), X(2), ..., X(n-2), X(n-1), X(n)]
"""
class Solution(object):
def minCostClimbingStairs(self, cost):
"""
:type cost: List[int]
:rtype: int
"""
# temp[]: the cost of length(i)
n_1 = cost[1]
n_2 = cost[0]
temp = None
for element in cost[2:]:
temp = n_1
n_1 = min(n_1, n_2) + element
n_2 = temp
return min(n_1, n_2)
if __name__ == "__main__":
data = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
sol = Solution().minCostClimbingStairs(data)
print(sol)
|
[
"j611062000@gmail.com"
] |
j611062000@gmail.com
|
1c18ab29aa811efe6d238546d9645e0ba2238440
|
118b53acb66b52e1a2c87129c680074a4b3a24a1
|
/utils/gen_config.py
|
137e297393c3aeba869cc170266e736288626e87
|
[] |
no_license
|
LomiJA/TTS-Eval
|
7f1be8ed27f1feb0fe656b14107f53963ce566b8
|
07c6e20499162b74a9190771f401aa4c528b56a5
|
refs/heads/master
| 2020-12-31T00:39:57.240621
| 2017-03-27T15:05:07
| 2017-03-27T15:05:07
| 86,559,081
| 1
| 0
| null | 2017-03-29T08:47:45
| 2017-03-29T08:47:45
| null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
import os
if __name__ == "__main__":
json_str = "var config = "
json_data = {"baseurl":"data", "exps":[]}
for exp in os.listdir("./data"):
exp_dic = {"path":exp}
exp_dic["styles"] = []
exp_dic["info"] = ""
exp_path = os.path.join("./data", exp)
for stl in os.listdir(exp_path):
exp_dic["styles"].append(stl)
if(exp[:3] == "ABX" or exp[:3] == "MOS"):
exp_dic["type"] = exp[:3]
exp_dic["files"] = []
style = exp_dic["styles"][0]
file_path = os.path.join(exp_path,style)
for fnm in os.listdir(file_path):
exp_dic["files"].append(fnm)
elif(exp[:2] == "CM"):
exp_dic["type"] = "CM"
exp_dic["files"] = []
for stl in exp_dic["styles"]:
file_path = os.path.join(exp_path,stl)
for fnm in os.listdir(file_path):
exp_dic["files"].append(stl + "/" + fnm)
else:
pass
json_data["exps"].append(exp_dic)
json_str += str(json_data) + ";"
handle = open("./scripts/config.js","w")
handle.write(json_str)
handle.close()
|
[
"nanqiao15@126.com"
] |
nanqiao15@126.com
|
7f4785759eb9b5506425258ad834ea689dbb737f
|
3452e3335bce9dc6405175ea3b7d1a4bf75988dd
|
/core/creature/__init__.py
|
4ce294630de5da88e39eac68fba9f57f3ac62f54
|
[] |
no_license
|
mwerezak/arena
|
7480723b98f51aee259812b2890bdb1c08f201b9
|
31e27a9bdb83c9e9d28a1419d1dabdddf2906d82
|
refs/heads/master
| 2023-04-10T00:33:07.199527
| 2021-04-15T12:27:28
| 2021-04-15T12:27:28
| 358,059,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
from core.creature.creature import Creature
from core.constants import Stance
|
[
"mwerezak@gmail.com"
] |
mwerezak@gmail.com
|
f49836386eb4a843e803fa9e83c186e024a5b259
|
6ef0bbc5be7ba14286725cd37b01522bda1bd405
|
/judgements/indicator.py
|
0be983132959fc9802d2e58873722dc9a75e1fcb
|
[] |
no_license
|
Shanney/StockCenter
|
b27646ed91899221e37af6685d533a46d1bb10a9
|
e757430d733405b2219fae15951c9c460783171b
|
refs/heads/master
| 2021-06-29T23:59:40.503090
| 2021-01-03T12:59:06
| 2021-01-03T12:59:06
| 203,547,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,094
|
py
|
import numpy as np
def consecutive_five_year_roe(indicators):
# 返回连续五年ROE,应该只关注roe大于15%的企业
result = {}
roe_positive_flag = True
consecutive_detail = ''
for indicator in indicators:
# print(indicator.loc[0].statDate + ' ' + str(indicator.loc[0].roe))
if indicator.loc[0].roe < 15:
roe_positive_flag = False
consecutive_detail += str(indicator.loc[0].roe) + ' '
result['roe_positive_flag'] = roe_positive_flag
result['consecutive_detail'] = consecutive_detail
return result
def ent_mode(income, cash_flow, balance_two, indicator):
"""
roe可以看成是三个部分乘积组成
1.产品净利润率(净利润/销售收入)
2.总资产周转率(销售收入/平均总资产)
3.杠杆系数(平均总资产/净资产)
即查看企业模式,茅台模式,沃尔玛模式,银行模式
但是净资产没法算啊。。。。如果用净利润/ROE呢?是平均净资产
:param indicator: 财务指标表
:param balance_two: 连续两年的资产负债表,为了使用期初和期末数据
:param cash_flow: 现金流量表
:param income: 利润表
:return:
"""
ind_one = np.nan_to_num(income.net_profit) / np.nan_to_num(cash_flow.goods_sale_and_service_render_cash)
# 平均总资产=(期初+期末)/2
ave_asset = (np.nan_to_num(balance_two[0].loc[0].total_sheet_owner_equities) + np.nan_to_num(
balance_two[1].loc[0].total_sheet_owner_equities)) / 2
ind_two = np.nan_to_num(cash_flow.goods_sale_and_service_render_cash) / np.nan_to_num(ave_asset)
ave_net_asset = np.nan_to_num(income.net_profit) / np.nan_to_num(indicator.roe)
ind_three = np.nan_to_num(ave_asset) / np.nan_to_num(ave_net_asset)
return {'ind_one': str(ind_one), 'ind_two': str(ind_two), 'ind_three': str(ind_three)}
# print('产品利润率:' + str(ind_one))
# print('总资产周转率' + str(ind_two))
# print('杠杆系数' + str(ind_three))
|
[
"49220598@qq.com"
] |
49220598@qq.com
|
dafe3dc13683000b2708c82793a3b1ef4ea3dff2
|
ae1e3dc35b67479ee2d15475c29ccf849c9b02a7
|
/ext.py
|
497548aef6f152e29cde53bc33fd30414c5c0986
|
[] |
no_license
|
matrixback/network_printer
|
22d862d7741231b19f352c8369a3cdaad670691d
|
bd0c0cb653033d6b9d42aca0231e5f6af8e728d9
|
refs/heads/master
| 2021-01-24T06:49:06.725377
| 2017-06-06T15:48:07
| 2017-06-06T15:48:07
| 93,324,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
# coding: utf-8
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
|
[
"18302835641@163.com"
] |
18302835641@163.com
|
7030689c1007a648531f281ccdefe78c8ca50ba3
|
6abccf219d813a7d328c8fc351cba992e77fa18a
|
/utilities/teststatus.py
|
1c0996f1b51278c8a180533d21d7ffbb1aad6f08
|
[] |
no_license
|
thotha3/pythonProject
|
65bee0d9533590b44a9d884007d03dfe70e2509b
|
902f551430a43e6d3012145603acb728c67537b5
|
refs/heads/master
| 2023-08-08T01:40:32.882290
| 2021-09-16T20:26:29
| 2021-09-16T20:26:29
| 407,305,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,319
|
py
|
"""
@package utilities
Checkpoint class implementation
It provides functionality to assert the result
Example: self.check_point.markFinal("Test Name", result, "Message")
"""
import logging
from base.selenium_driver import SeleniumDriver
from utilities import custom_logger as cl
class TestStatus(SeleniumDriver):
log = cl.customLogger(logging.INFO)
def __init__(self, driver):
"""
Inits CheckPoint class
:param driver:
"""
super(TestStatus, self).__init__(driver)
self.resultList = []
def setResult(self, result, resultMessage):
try:
if result is not None:
if result:
self.resultList.append("PASS")
self.log.info('### VERIFICATION SUCCESSFUL :: ' + resultMessage)
else:
self.resultList.append("FAIL")
self.log.error('### VERIFICATION FAILED :: ' + resultMessage)
self.screenShot(resultMessage)
else:
self.resultList.append("FAIL")
self.log.info('### VERIFICATION FAILED :: ' + resultMessage)
self.screenShot(resultMessage)
except:
self.resultList.append("FAIL")
self.log.error('### EXCEPTION OCCURRED !!!')
self.screenShot(resultMessage)
def mark(self, result, resultMessage):
"""
Mark the result of the verification point in a test case
:param result:
:param resultMessage:
:return:
"""
self.setResult(result, resultMessage)
def markFinal(self, testName, result, resultMessage):
"""
Mark the final result of the verification point ina test case
This needs to be called at least once in a test case
This should be final test status of the test case
:param testname:
:param result:
:param resultMessage:
:return:
"""
self.setResult(result, resultMessage)
if 'FAIL' in self.resultList:
self.log.error(testName + ' ### FAILED')
self.resultList.clear()
assert True == False
else:
self.log.error(testName + ' ### PASSED')
self.resultList.clear()
assert True == True
|
[
"thotha3@hotmail.com"
] |
thotha3@hotmail.com
|
3c061683d05e01d2e49fdf44a9642b8ba3230d38
|
7942342d457276bb266228d0236af647b3d55477
|
/django/contrib/auth/__init__.pyi
|
24b49bc00c2f2782b020918d77e8d81ac3a388da
|
[
"MIT"
] |
permissive
|
AsymmetricVentures/mypy-django
|
847c4e521ce4dec9a10a1574f9c32b234dafd00b
|
f6e489f5cf5672ecede323132665ccc6306f50b8
|
refs/heads/master
| 2020-06-30T01:53:44.434394
| 2016-12-22T22:45:50
| 2016-12-22T22:45:50
| 74,397,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 840
|
pyi
|
# Stubs for django.contrib.auth (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any, Optional
from django.apps import apps as django_apps
from .signals import user_logged_in as user_logged_in, user_logged_out as user_logged_out, user_login_failed as user_login_failed
SESSION_KEY = ... # type: str
BACKEND_SESSION_KEY = ... # type: str
HASH_SESSION_KEY = ... # type: str
REDIRECT_FIELD_NAME = ... # type: str
def load_backend(path): ...
def get_backends(): ...
def authenticate(**credentials): ...
def login(request, user, backend: Optional[Any] = ...): ...
def logout(request): ...
def get_user_model(): ...
def get_user(request): ...
def get_permission_codename(action, opts): ...
def update_session_auth_hash(request, user): ...
default_app_config = ... # type: str
|
[
"reames@asymmetricventures.com"
] |
reames@asymmetricventures.com
|
48f499336b8be9120c3c86fe72d451b976c35f50
|
6a893f1219c1fc94b60f19c95596fabb1a18b241
|
/Assignment2/main.py
|
c6bb8b7a6adbe8b47c13ec8fcea92ea4b467ca11
|
[] |
no_license
|
WangZesen/DD2424-Assignment
|
3f4f30442578b7d11871da5c9d69b3fc797b6942
|
e1b284b5b0e7174dbcdf665402efb12cb696c36a
|
refs/heads/master
| 2020-03-11T21:37:18.077471
| 2018-04-19T20:52:43
| 2018-04-19T20:52:43
| 130,270,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,639
|
py
|
import random, math
import numpy as np
import copy as cp
import scipy.io as sio
import matplotlib.pyplot as plt
def activateRelu(input_data):
# input_data: d_in * N
output_data = cp.deepcopy(input_data)
output_data[output_data <= 0] *= 0.00 # change to 0.01 if it's leaky ReLU
return output_data
def fullyConnect(input_data, W, b):
# input_data: d_in * N
# W: d_out * d_in
# b: d_out * 1
assert input_data.shape[0] == W.shape[1]
assert W.shape[0] == b.shape[0]
output_data = np.dot(W, input_data) + b
return output_data
def softmax(input_data):
# input_data: K * N
output_data = np.exp(input_data)
for i in range(input_data.shape[1]):
output_data[:, i] = output_data[:, i] / sum(output_data[:, i])
return output_data
def crossEntropyLoss(output_data, label):
# input_data: K * N
# label: one-hot
assert output_data.shape == label.shape
out = - np.log(output_data)
out = np.multiply(out, label)
out = np.sum(out)
return out / output_data.shape[1]
def regularisationLoss(W, lambda_):
# W: d_out * d_in
loss = sum([np.sum(np.square(w)) for w in W]) * lambda_
return loss
def evaluateClassifierVerbose(X, W, b):
fc = []
act = []
last = X
fc.append(fullyConnect(X, W[0], b[0]))
act.append(activateRelu(fc[0]))
fc.append(fullyConnect(act[0], W[1], b[1]))
p = softmax(fc[1])
return fc, act, p
def evaluateClassifier(X, W, b):
fc = []
act = []
last = X
fc.append(fullyConnect(X, W[0], b[0]))
act.append(activateRelu(fc[0]))
fc.append(fullyConnect(act[0], W[1], b[1]))
p = softmax(fc[1])
return p
def computeLoss(X, Y, W, b, lambda_):
p = evaluateClassifier(X, W, b)
loss = crossEntropyLoss(p, Y) + regularisationLoss(W, lambda_)
return loss
def regularisationLossGradient(W, lambda_):
grad_W = []
for i in range(len(W)):
grad_W.append(2 * lambda_ * W[i])
return grad_W
def softmaxCrossEntropyLossGradient(p, Y):
return p - Y
def activationReluGradient(lastGrad, fc):
grad = cp.deepcopy(lastGrad)
grad[fc <= 0] *= 0.00 # change to 0.01 if it's leaky ReLU
return grad
def fullyConnectGradient(lastGrad, W):
return np.dot(W.T, lastGrad)
def computeGradient(X, Y, W, b, lambda_):
d = X.shape[0]
K = Y.shape[0]
m = 50
grad_W = [np.zeros((m, d)), np.zeros((K, m))]
grad_b = [np.zeros((m, 1)), np.zeros((K, 1))]
for i in range(X.shape[1]):
fc, act, p = evaluateClassifierVerbose(X[:, i : i+1], W, b)
grad = softmaxCrossEntropyLossGradient(p, Y[:, i : i+1])
# grad = activationReluGradient(grad, fc[1])
grad_W[1] = grad_W[1] + np.dot(grad, act[0].T)
grad_b[1] = grad_b[1] + grad
grad = fullyConnectGradient(grad, W[1])
grad = activationReluGradient(grad, fc[0])
grad_W[0] = grad_W[0] + np.dot(grad, X[:, i : i+1].T)
grad_b[0] = grad_b[0] + grad
grad_W[0] = grad_W[0] / X.shape[1]
grad_W[1] = grad_W[1] / X.shape[1]
grad_b[0] = grad_b[0] / X.shape[1]
grad_b[1] = grad_b[1] / X.shape[1]
grad_RW = regularisationLossGradient(W, lambda_)
grad_W[0] = grad_W[0] + grad_RW[0]
grad_W[1] = grad_W[1] + grad_RW[1]
return grad_W, grad_b
def computeGradsNumSlow(X, Y, W, b, lambda_, h):
grad_W = [np.zeros(W[i].shape) for i in range(len(W))]
grad_b = [np.zeros(b[i].shape) for i in range(len(b))]
for k in range(len(W)):
for i in range(W[k].shape[0]):
for j in range(W[k].shape[1]):
W[k][i][j] -= h
c1 = computeLoss(X, Y, W, b, lambda_)
W[k][i][j] += h + h
c2 = computeLoss(X, Y, W, b, lambda_)
W[k][i][j] -= h
grad_W[k][i][j] = (c2 - c1) / (2 * h)
for i in range(b[k].shape[0]):
for j in range(b[k].shape[1]):
b[k][i][j] -= h
c1 = computeLoss(X, Y, W, b, lambda_)
b[k][i][j] += h + h
c2 = computeLoss(X, Y, W, b, lambda_)
b[k][i][j] -= h
grad_b[k][i][j] = (c2 - c1) / (2 * h)
return grad_W, grad_b
def computeAccuracy(X, y, W, b):
p = evaluateClassifier(X, W, b)
count = 0
for i in range(X.shape[1]):
if np.argmax(p[:, i]) == y[i]:
count = count + 1
return count / X.shape[1]
def miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params, verbose = False, early_stop = False):
N = train_X.shape[1]
last_grad_W = [np.zeros(W[i].shape) for i in range(len(W))]
last_grad_b = [np.zeros(b[i].shape) for i in range(len(b))]
Wstar = cp.deepcopy(W)
bstar = cp.deepcopy(b)
Wbest = cp.deepcopy(W)
bbset = cp.deepcopy(b)
best_acc = 0
best_epoch = 0
eta = params['eta']
train_loss = []
val_loss = []
for i in range(params['n_epochs']):
for j in range(N // params['n_batch']):
batch_X = train_X[:, j * params['n_batch'] : (j + 1) * params['n_batch']]
batch_Y = train_Y[:, j * params['n_batch'] : (j + 1) * params['n_batch']]
grad_W, grad_b = computeGradient(batch_X, batch_Y, Wstar, bstar, lambda_)
for k in range(len(W)):
grad_W[k] = eta * grad_W[k] + params['momentum'] * last_grad_W[k]
grad_b[k] = eta * grad_b[k] + params['momentum'] * last_grad_b[k]
Wstar[k] = Wstar[k] - grad_W[k]
bstar[k] = bstar[k] - grad_b[k]
last_grad_W = cp.deepcopy(grad_W)
last_grad_b = cp.deepcopy(grad_b)
if (i + 1) % params['decay_gap'] == 0:
eta = eta * params['decay']
if verbose:
train_loss.append(computeLoss(train_X, train_Y, Wstar, bstar, lambda_))
val_loss.append(computeLoss(val_X, val_Y, Wstar, bstar, lambda_))
val_acc = computeAccuracy(val_X, val_y, Wstar, bstar)
if val_acc > best_acc:
Wbest = cp.deepcopy(Wstar)
bbest = cp.deepcopy(bstar)
best_epoch = i
best_acc = val_acc
print ("Current Best Validation Accuracy at Epoch {}: {}".format(i + 1, best_acc))
elif (i - best_epoch > 10) and early_stop:
print ("Early stopping at epoch {}".format(i + 1))
return Wstar, bstar, train_loss, val_loss, Wbest, bbest
print ("Epoch {} Finished, Train Loss: {}, Validation Loss: {}".format(i + 1, train_loss[-1], val_loss[-1]))
if verbose:
return Wstar, bstar, train_loss, val_loss, Wbest, bbest
else:
return Wstar, bstar
def computeRelativeError(p1, p2):
eps = 1e-12
error = 0
for i in range(len(p1)):
absolute_error = np.abs(p1[i] - p2[i])
denominator = np.maximum(eps, np.abs(p1[i]) + np.abs(p2[i]))
error += np.sum(np.divide(absolute_error, denominator)) / p1[i].size
return error
def loadBatch(filename):
# Load mat file
content = sio.loadmat("Datasets/cifar-10-batches-mat/{}".format(filename))
X = content['data'].T / 255
mean = np.mean(X, axis = 1)
# X = (X.T - mean).T
y = content['labels']
y = np.reshape(y, (y.shape[0],))
Y = []
for i in range(X.shape[1]):
Y.append([0 for col in range(10)])
Y[i][y[i]] = 1
Y = np.array(Y).T
return X, Y, y, mean
def normalize(X, mean):
X = (X.T - mean).T
return X
def initial(K, d, t):
# Initialize paramters
m = 50
if t == "Gaussian":
W = [np.random.normal(0, 0.001, (m, d)), np.random.normal(0, 0.001, (K, m))]
b = [np.random.normal(0, 0.001, (m, 1)), np.random.normal(0, 0.001, (K, 1))]
elif t == "Xavier":
W = [np.random.normal(0, (2 / (m + d)) ** 0.5, (m, d)), np.random.normal(0, (2 / (K + m)) ** 0.5, (K, m))]
b = [np.random.normal(0.001, (2 / (m + d)) ** 0.5, (m, 1)), np.random.normal(0.001, (2 / (K + m)) ** 0.5, (K, 1))]
# b = [np.ones((m, 1)) * 0.01, np.ones((K, 1)) * 0.01]
elif t == "He":
W = [np.random.normal(0, (2 / d) ** 0.5, (m, d)), np.random.normal(0, (2 / m) ** 0.5, (K, m))]
b = [np.random.normal(0.001, (2 / d) ** 0.5, (m, 1)), np.random.normal(0.001, (2 / m) ** 0.5, (K, 1))]
else:
print ("Initialization Type Error!")
return W, b
if __name__ == "__main__":
np.random.seed(1)
train_X, train_Y, train_y, mean = loadBatch("data_batch_1.mat")
val_X, val_Y, val_y, mean_ = loadBatch("data_batch_2.mat")
test_X, test_Y, test_y, mean_ = loadBatch("test_batch.mat")
train_X = normalize(train_X, mean)
val_X = normalize(val_X, mean)
test_X = normalize(test_X, mean)
tasks = ["Task 1: Compute Relative Error",
"Task 2: Check Overfit",
"Task 3: Find the Best Momentum",
"Task 4: Find Reasonable Range for Eta",
"Task 5: Find the Best Eta and Lambda",
"Task 6: Train the Network",
"Task 7 (Optional): Optimize the performance"]
task_label = input("\n".join(tasks) + "\nTask #: ")
if task_label == "1":
train_X = train_X[1:400, :]
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
lambda_ = 0.1
grad_W, grad_b = computeGradient(train_X[:, 0:10], train_Y[:, 0:10], W, b, lambda_)
grad_W1, grad_b1 = computeGradsNumSlow(train_X[:, 0:10], train_Y[:, 0:10], W, b, lambda_, 1e-6)
print ("Relative Error for W (lambda = 0.1): ", computeRelativeError([grad_W[1]], [grad_W1[1]]))
print ("Relative Error for b (lambda = 0.1): ", computeRelativeError(grad_b, grad_b1))
lambda_ = 0
grad_W, grad_b = computeGradient(train_X[:, 0:10], train_Y[:, 0:10], W, b, lambda_)
grad_W1, grad_b1 = computeGradsNumSlow(train_X[:, 0:10], train_Y[:, 0:10], W, b, lambda_, 1e-6)
print ("Relative Error for W (lambda = 0): ", computeRelativeError([grad_W[1]], [grad_W1[1]]))
print ("Relative Error for b (lambda = 0): ", computeRelativeError(grad_b, grad_b1))
if task_label == "2":
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
lambda_ = 0
train_X = train_X[:, 0:100]
train_Y = train_Y[:, 0:100]
train_y = train_y[0:100]
params = {
'n_batch': 100,
'n_epochs': 200,
'eta': 5e-2,
'momentum': 0,
'decay': 1,
'decay_gap': 1
}
x = [i + 1 for i in range(params['n_epochs'])]
Wstar, bstar, train_loss, val_loss, Wbest, bbest = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params, verbose = True)
plt.plot(x, train_loss, label = "train")
plt.plot(x, val_loss, label = "validation")
plt.legend()
plt.show()
if task_label == "3":
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
lambda_ = 1e-6
params = {
'n_batch': 100,
'n_epochs': 10,
'eta': 1e-2,
'momentum': 0.9,
'decay': 0.95,
'decay_gap': 1
}
x = [i + 1 for i in range(params['n_epochs'])]
for m in [0, 0.5, 0.9, 0.95, 0.99]:
params['momentum'] = m
Wstar, bstar, train_loss, val_loss, Wbest, bbest = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params, verbose = True)
plt.plot(x, train_loss, label = 'rho = {} (train)'.format(m))
print ("Momentum = {}".format(m))
print ("Accuracy on Test Set: {}".format(computeAccuracy(test_X, test_y, Wstar, bstar)))
plt.legend()
plt.show()
if task_label == "4":
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
lambda_ = 1e-6
params = {
'n_batch': 100,
'n_epochs': 5,
'eta': 1e-2,
'momentum': 0.95,
'decay': 0.95,
'decay_gap': 1
}
x = [i + 1 for i in range(params['n_epochs'])]
for m in range(5):
params['eta'] = 5e-3 + 2e-2 * m
Wstar, bstar, train_loss, val_loss, Wbest, bbest = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params, verbose = True)
plt.plot(x, train_loss, label = 'eta = {} (train)'.format(params['eta']))
print ("Learning Rate = {}".format(params['eta']))
print ("Accuracy on Test Set: {}".format(computeAccuracy(test_X, test_y, Wstar, bstar)))
plt.legend()
plt.show()
pass
if task_label == "5":
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
lambda_e_min = -8
lambda_e_max = -2
eta_e_min = math.log(0.001) / math.log(10)
eta_e_max = math.log(0.040) / math.log(10)
params = {
'n_batch': 100,
'n_epochs': 10,
'eta': 0,
'momentum': 0.95,
'decay': 0.95,
'decay_gap': 1
}
lambdas = []
etas = []
results = []
exp_time = 160
f = open("lambda_eta_select.txt", "w")
for i in range(exp_time):
lambda_ = 10 ** (lambda_e_min + random.uniform(0, 1) * (lambda_e_max - lambda_e_min))
params['eta'] = 10 ** (eta_e_min + random.uniform(0, 1) * (eta_e_max - eta_e_min))
Wstar, bstar = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params)
results.append(computeAccuracy(val_X, val_y, Wstar, bstar))
lambdas.append(lambda_)
etas.append(params['eta'])
print ("Lambda = {}, Eta = {}, Accuracy = {}".format(lambda_, params['eta'], results[-1]))
results = list(zip(results, lambdas, etas))
results.sort(key = lambda x: -x[0])
for i in range(min(exp_time, 500)):
f.write("Accuracy: {}, lambda: {}, eta: {}\n".format(results[i][0], results[i][1], results[i][2]))
f.close()
if task_label == "6":
train_X, train_Y, train_y, mean_ = loadBatch("data_batch_1.mat")
test_X, test_Y, test_y, mean_ = loadBatch("test_batch.mat")
for i in range(1, 5):
tem_X, tem_Y, tem_y, mean_ = loadBatch("data_batch_{}.mat".format(i + 1))
train_X = np.concatenate((train_X, tem_X), axis = 1)
train_Y = np.concatenate((train_Y, tem_Y), axis = 1)
train_y = np.concatenate((train_y, tem_y))
val_X = train_X[:, 0:1000]
val_Y = train_Y[:, 0:1000]
val_y = train_y[0:1000]
print (val_X.shape, val_Y.shape, val_y.shape)
train_X = train_X[:, 1000:]
train_Y = train_Y[:, 1000:]
train_y = train_y[1000:]
mean = np.mean(train_X, axis = 1)
train_X = normalize(train_X, mean)
val_X = normalize(val_X, mean)
test_X = normalize(test_X, mean)
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "Gaussian")
params = {
'n_batch': 100,
'n_epochs': 30,
'eta': 0.017453577972249945, # 0.010800662290914505,
'momentum': 0.95,
'decay': 0.95,
'decay_gap': 1
}
lambda_ = 0.0023292248102687557 # 0.002963774526491722
Wstar, bstar, train_loss, val_loss, Wbest, bbest = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b, lambda_, params, verbose = True)
x = [i + 1 for i in range(params['n_epochs'])]
plt.plot(x, train_loss, label = 'train')
plt.plot(x, val_loss, label = 'val')
print ("Accuracy on test set (final): {}".format(computeAccuracy(test_X, test_y, Wstar, bstar)))
print ("Accuracy on test set (best): {}".format(computeAccuracy(test_X, test_y, Wbest, bbest)))
plt.legend()
plt.show()
if task_label == "7":
train_X, train_Y, train_y, mean_ = loadBatch("data_batch_1.mat")
test_X, test_Y, test_y, mean_ = loadBatch("test_batch.mat")
for i in range(1, 5):
tem_X, tem_Y, tem_y, mean_ = loadBatch("data_batch_{}.mat".format(i + 1))
train_X = np.concatenate((train_X, tem_X), axis = 1)
train_Y = np.concatenate((train_Y, tem_Y), axis = 1)
train_y = np.concatenate((train_y, tem_y))
val_X = train_X[:, 0:1000]
val_Y = train_Y[:, 0:1000]
val_y = train_y[0:1000]
print (val_X.shape, val_Y.shape, val_y.shape)
train_X = train_X[:, 1000:]
train_Y = train_Y[:, 1000:]
train_y = train_y[1000:]
mean = np.mean(train_X, axis = 1)
train_X = normalize(train_X, mean)
val_X = normalize(val_X, mean)
test_X = normalize(test_X, mean)
d = train_X.shape[0]
K = train_Y.shape[0]
W, b = initial(K, d, "He")
params = {
'n_batch': 100,
'n_epochs': 50,
'eta': 0.017453577972249945, # 0.010800662290914505,
'momentum': 0.95,
'decay': 0.1,
'decay_gap': 8,
}
lambda_ = 0.0023292248102687557 # 0.002963774526491722
Wstar, bstar, train_loss, val_loss, Wbest, bbest = miniBatchGD(train_X, train_Y, train_y, val_X, val_Y, val_y, W, b,
lambda_, params, verbose = True, early_stop = True)
x = [i + 1 for i in range(len(train_loss))]
plt.plot(x, train_loss, label = 'train')
plt.plot(x, val_loss, label = 'val')
print ("Accuracy on test set (final): {}".format(computeAccuracy(test_X, test_y, Wstar, bstar)))
print ("Accuracy on test set (best): {}".format(computeAccuracy(test_X, test_y, Wbest, bbest)))
plt.legend()
plt.show()
|
[
"noreply@github.com"
] |
WangZesen.noreply@github.com
|
a78236e4cafcb2ac69887a145feeb786c907399e
|
6dda2ac01f624757069a9f9a7328b5a574a480c0
|
/week-04/day-04/11.py
|
0fac651072311c1af79f160402bc616d7a50041d
|
[] |
no_license
|
greenfox-zerda-lasers/brigittaforrai
|
976b8e0dacbf791a76e5e59c3f034cadd106b8e6
|
a2213ba268f2e777b1190a79d9ff0360f593cad5
|
refs/heads/master
| 2021-01-12T18:18:49.042219
| 2017-02-19T15:36:49
| 2017-02-19T15:36:49
| 71,362,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
from tkinter import *
root = Tk()
size = 600
canvas = Canvas(root,width=size, height=size, bg="yellow")
canvas.pack()
def draw(x,y,size):
canvas.create_rectangle(x,y,x+size,y+size)
if size > 5:
draw(x,y+size/3,size/3)
draw(x+(size*(2/3)),y+size/3,size/3)
draw(x+size/3,y,size/3)
draw(x+size/3,y+(size*(2/3)),size/3)
draw(0,0,600)
root.mainloop()
|
[
"forraibrigi@gmail.com"
] |
forraibrigi@gmail.com
|
b5e8c503a72c662e758f0301bb837a77098edce3
|
4e980eca143b2e3fd9523014d4a9e22a79089328
|
/pontuacoes/apps.py
|
80570d53af2bd7de854f3cd52a6728421acdcefa
|
[] |
no_license
|
silasgon/gamep-admin
|
5d1f9149c0a10260a93f4020108806df3b8c15de
|
9f3c9970b92dfb7254c4ccf081446303a25df8b9
|
refs/heads/master
| 2020-04-08T08:33:53.411726
| 2018-11-13T13:29:57
| 2018-11-13T13:29:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
from django.apps import AppConfig
class PontuacoesConfig(AppConfig):
name = 'pontuacoes'
|
[
"kavalerskialexandre@gmail.com"
] |
kavalerskialexandre@gmail.com
|
0405898d24af93f463de789847b0398a0e8e0b97
|
092d82f8a64f8e33a739ae023667253a75bfb9ae
|
/jury/forms.py
|
ac08bc91b6d6b266345bc9fb2f865acbf50bba23
|
[
"MIT"
] |
permissive
|
COdingaorg/The_Jury
|
8c103eec028891b1ee98ede786fb54638bd16ba6
|
a4432269a023edf49a010644ca4f06324a934d7f
|
refs/heads/main
| 2023-06-18T10:43:14.888503
| 2021-07-20T16:05:59
| 2021-07-20T16:05:59
| 386,658,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
from jury.models import UserProfile, UserProject
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class registerUser(UserCreationForm):
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email', 'password1', 'password2']
class UploadProjectForm(forms.ModelForm):
class Meta:
model = UserProject
fields = ['project_title', 'project_image', 'project_description', 'project_link']
class AddorEditProfile(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['photo_path', 'user_bio', 'facebook_account', 'twitter_account', 'instagram_account']
|
[
"calemasanga@gmail.com"
] |
calemasanga@gmail.com
|
16ac6d820543f041aa2c474fcb8afa4d895ce380
|
e3c9665e6c3b2a9a632ae00a3e896feb32cbb745
|
/foodgram/recipes/migrations/0020_auto_20210409_1023.py
|
52b73d6c650a816a113a40cd2b31ece2ea474ec9
|
[] |
no_license
|
girik108/foodgram-project
|
dc1addde0f99cf0ce74888119610c024ab5984c4
|
6f5b44da90563c25b9c7d66591244b85c7d63560
|
refs/heads/master
| 2023-04-10T00:35:44.916391
| 2021-04-19T06:26:31
| 2021-04-19T06:26:31
| 338,977,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
# Generated by Django 3.1.6 on 2021-04-09 06:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0019_auto_20210408_0937'),
]
operations = [
migrations.AlterField(
model_name='ingredient',
name='title',
field=models.CharField(max_length=100, verbose_name='Наименование'),
),
]
|
[
"gimatov@list.ru"
] |
gimatov@list.ru
|
d757c2b9d5123a880f8485775e37908e83cfa73b
|
81e5105ba9519dfaae3985e99f36d62ff3283276
|
/rgw/v2/tests/s3_swift/user_op_using_rest.py
|
99e37aa21dfc3b2e691d9b42c6b42da522bb5d96
|
[] |
no_license
|
sunilangadi2/ceph-qe-scripts
|
7fea0786a1a006d9877200cb308d65c21b34937d
|
1edad8710e283f464d42aeee4099b2128e178a95
|
refs/heads/master
| 2022-11-22T16:38:25.911294
| 2020-07-22T07:14:57
| 2020-07-22T07:14:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,173
|
py
|
"""
user_op_using_rest - Test user operation using REST API
Usage: user_op_using_rest.py -c <input_yaml>
<input_yaml>
test_user_with_REST.yaml
Operation:
Create Admin user
Using admin user, create new user using REST request
Using admin user, Modify existing user using REST request
Using admin user, Delete user using REST request
"""
# test REST api operation
import os, sys
import random
import string
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../..")))
from v2.lib.resource_op import Config
import v2.utils.log as log
import v2.utils.utils as utils
import traceback
import argparse
import yaml
import json
#import v2.lib.resource_op as swiftlib
from v2.lib.exceptions import TestExecError, RGWBaseException
from v2.utils.test_desc import AddTestInfo
from v2.lib.s3.write_io_info import IOInfoInitialize, BasicIOInfoStructure
from v2.lib.swift.auth import Auth
#import v2.lib.manage_data as manage_data
from v2.lib.admin import UserMgmt
from rgwadmin import RGWAdmin
#from v2.lib.frontend_configure import Frontend
TEST_DATA_PATH = None
def randomString(stringLength=3):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def s3_list(l):
a = []
a.append(l['user_id'])
a.append(l['display_name'])
a.append(l['email'])
a.append(l['max_buckets'])
a.append(l['keys'][0]['access_key'])
a.append(l['keys'][0]['secret_key'])
return a
def verify_user(api_user,regular_user):
x = s3_list(api_user)
y = s3_list(regular_user)
if x == y:
return True
else:
return False
def test_exec(config):
io_info_initialize = IOInfoInitialize()
basic_io_structure = BasicIOInfoStructure()
io_info_initialize.initialize(basic_io_structure.initial())
umgmt = UserMgmt()
host, ip = utils.get_hostname_ip()
port = utils.get_radosgw_port_no()
hostname=str(ip)+":"+str(port)
log.info(hostname)
# preparing data
admin_api_user = "admin_user_"+randomString()
log.info(admin_api_user)
user_info = umgmt.create_rest_admin_user(user_id=admin_api_user,
displayname=admin_api_user)
rgw = RGWAdmin(
access_key=user_info['access_key'],
secret_key=user_info['secret_key'],
server=hostname, secure=False, verify=False)
api_user = "api_user_"+randomString()
log.info(api_user)
for uc in range(config.user_count):
#Create User
data=rgw.create_user(
uid=api_user,
display_name=api_user,
email=api_user+'@abc.xyz')
log.info("User created successfully")
log.info(data)
log.info('verification starts')
op = utils.exec_shell_cmd("radosgw-admin user info --uid %s" % api_user)
json_doc = json.loads(op)
log.info(json_doc)
v=verify_user(data, json_doc)
if v is False:
test_info.failed_status('test failed')
sys.exit(1)
log.info("Verification for create operation completed")
#Update User
data = rgw.modify_user(
uid=api_user,
display_name=api_user+"_11",
email=api_user+'_11@umd.edu')
log.info("User Updated successfully")
log.info(data)
log.info('verification starts')
op = utils.exec_shell_cmd("radosgw-admin user info --uid %s" % api_user)
json_doc = json.loads(op)
log.info(json_doc)
v = verify_user(data, json_doc)
if v is False:
test_info.failed_status('test failed')
sys.exit(1)
log.info("Verification for Update operation completed")
#delete User
data = rgw.remove_user(uid=api_user, purge_data=False)
log.info(data)
log.info("User removed")
op = utils.exec_shell_cmd("radosgw-admin user list")
json_doc = json.loads(op)
if api_user in json_doc:
test_info.failed_status('test failed')
sys.exit(1)
log.info("Verification for Delete operation completed")
if __name__ == '__main__':
test_info = AddTestInfo('test REST api operation')
try:
project_dir = os.path.abspath(os.path.join(__file__, "../../.."))
test_data_dir = 'test_data'
TEST_DATA_PATH = (os.path.join(project_dir, test_data_dir))
log.info('TEST_DATA_PATH: %s' % TEST_DATA_PATH)
if not os.path.exists(TEST_DATA_PATH):
log.info('test data dir not exists, creating.. ')
os.makedirs(TEST_DATA_PATH)
parser = argparse.ArgumentParser(description='RGW S3 Automation')
parser.add_argument('-c', dest="config",
help='RGW Test yaml configuration')
args = parser.parse_args()
yaml_file = args.config
config = Config(yaml_file)
config.read()
test_exec(config)
test_info.success_status('test passed')
sys.exit(0)
except (RGWBaseException, Exception) as e:
log.info(e)
log.info(traceback.format_exc())
test_info.failed_status('test failed')
sys.exit(1)
|
[
"ukurundw@redhat.com"
] |
ukurundw@redhat.com
|
5404e3ad8934d8abdd386447c64ee0c0a8c716f7
|
93f5ee5cc7b863029c54a766e9f5fa0b0e52191f
|
/BayesianOptimization/20180403_two_hparas.py
|
f2c660d6aa1078720adfdb30d305f189ed7051c7
|
[] |
no_license
|
ShihPingLai/Jacob-deep_learning
|
29ad17839da7a34e01db1a626942862e250e8619
|
dfbaa178ac537a189a062a23904072a7d8e550a9
|
refs/heads/master
| 2020-03-13T11:51:51.276939
| 2018-04-26T04:19:15
| 2018-04-26T04:19:15
| 131,108,620
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,156
|
py
|
#!/usr/bin/python3
'''
Abstract:
This is a program to exercise how to optimize deep learning with Bayesian Optimization.
Copy from "BayesianOptimization/examples/exploitation vs exploration.ipynb"
Usage:
20180403_two_hparas.py
Source:
BayesianOptimization/examples/exploitation vs exploration.ipynb
##################################
# Python3 #
# This code is made in python3 #
##################################
20170403
####################################
update log
20180403 version alpha 1:
1. I don't know
'''
# modules for Bayesian
from bayes_opt import BayesianOptimization
import pymc as pm
# modules for deep learning
import tensorflow as tf
# common modules
import numpy as np
import matplotlib.pyplot as plt
import time
from IPython.core.pylabtools import figsize
# Utility function for plotting
def plot_bo(f, bo, figname):
xs = [x["x"] for x in bo.res["all"]["params"]]
ys = bo.res["all"]["values"]
mean, sigma = bo.gp.predict(np.arange(len(f)).reshape(-1, 1), return_std=True)
plt.figure(figsize=(16, 9))
plt.plot(f)
plt.plot(np.arange(len(f)), mean)
plt.fill_between(np.arange(len(f)), mean+sigma, mean-sigma, alpha=0.1)
plt.scatter(bo.X.flatten(), bo.Y, c="red", s=50, zorder=10)
plt.xlim(0, len(f))
plt.ylim(f.min()-0.1*(f.max()-f.min()), f.max()+0.1*(f.max()-f.min()))
plt.savefig(figname)
return
#--------------------------------------------
# main code
if __name__ == "__main__":
VERBOSE = 0
# measure times
start_time = time.time()
#-----------------------------------
# load hyperparas
# use sklearn's default parameters for theta and random_start
gp_params = {"alpha": 1e-5, "n_restarts_optimizer": 2}
# Target function
np.random.seed(42)
xs = np.linspace(-2, 10, 10000)
f = np.exp(-(xs - 2)**2) + np.exp(-(xs - 6)**2/10) + 1/ (xs**2 + 1)
if VERBOSE>0:
plt.plot(f)
plt.show()
#-----------------------------------
# Acquisition function 1: Upper Confidence Bound
# Prefer exploitation (kappa=1.0)
bo = BayesianOptimization(f=lambda x: f[int(x)],
pbounds={"x": (0, len(f)-1)},
verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="ucb", kappa=1, **gp_params)
plot_bo(f, bo, "ucb_exploitation.png")
# Prefer exploration (kappa=10)
bo = BayesianOptimization(f=lambda x: f[int(x)],
pbounds={"x": (0, len(f)-1)},
verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="ucb", kappa=10, **gp_params)
plot_bo(f, bo, "ucb_exploration.png")
#-----------------------------------
# Acquisition function 2: Expected Improvement
# Prefer exploitation (xi=0.0)
bo = BayesianOptimization(f=lambda x: f[int(x)],
pbounds={"x": (0, len(f)-1)},
verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="ei", xi=1e-4, **gp_params)
plot_bo(f, bo, "ei_exploitation.png")
# Prefer exploration (xi=0.1)
bo = BayesianOptimization(f=lambda x: f[int(x)],
pbounds={"x": (0, len(f)-1)},
verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="ei", xi=0.1, **gp_params)
plot_bo(f, bo, "ei_exploration.png")
#-----------------------------------
# Acquisition function 3: Probability of Improvement
# Prefer exploitation (xi=0.0)
bo = BayesianOptimization(f=lambda x: f[int(x)], pbounds={"x": (0, len(f)-1)}, verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="poi", xi=1e-4, **gp_params)
plot_bo(f, bo, "poi_exploitation.png")
# Prefer exploration (xi=0.1)
bo = BayesianOptimization(f=lambda x: f[int(x)], pbounds={"x": (0, len(f)-1)}, verbose=0)
bo.maximize(init_points=2, n_iter=25, acq="poi", xi=0.1, **gp_params)
plot_bo(f, bo, "poi_exploration.png")
#-----------------------------------
# measuring time
elapsed_time = time.time() - start_time
print ("Exiting Main Program, spending ", elapsed_time, "seconds.")
|
[
"z123a123s123@gmail.com"
] |
z123a123s123@gmail.com
|
3be5a911c554072c02c06f1a186d5799347d1876
|
8394c2b1bd17f04e5cb219c98e300d91530ba831
|
/project/utils/models/model_handling.py
|
ba756f593b55b1bc91b8ac5cecc3c61af35624f4
|
[] |
no_license
|
justinwhatley/interpretability_experiment
|
f26356ce16282a715ba951560c56a94823f733b6
|
fcfdd2441f47dab7f1b711f7fe18b49efbe6b791
|
refs/heads/master
| 2022-11-05T14:42:19.367835
| 2020-06-26T20:54:27
| 2020-06-26T20:54:27
| 259,716,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
import joblib
def load_or_store_model(func):
"""
Wrapper/decorator to check whether the model is already saved to return saved model instead of new training
Function must have a 'save_to' filepath and 'recompute' bool must be defined
"""
def loading_wrapper(*args, **kwargs):
recompute = kwargs['recompute']
save_to = kwargs['save_to']
if not recompute:
try:
print('Loading previously trained model: ' + str(save_to))
return joblib.load(save_to)
except:
print('Model not found: ' + str(save_to))
print('Training: ' + func.__module__)
model = func(*args, **kwargs)
return save_model(model, save_to)
def save_model(model, save_to):
print('Saving model to: ' + str(save_to))
joblib.dump(model, save_to)
return model
return loading_wrapper
|
[
"justinwhatley5@gmail.com"
] |
justinwhatley5@gmail.com
|
835b080ae5e52498164715e7341be0d16a872109
|
a2f8b748a3427b8ffa622c96dc6a4f4339495672
|
/migrations/versions/12ae296935d5_.py
|
923e41d481cb910ac14eeab7c4f6ee0b1d665f64
|
[] |
no_license
|
quinnwu/pvapp
|
96242f6b6f1b1410fd4777579856d4ac8959dd47
|
db3c507b9d35fe468f5d358a41336fbfa26117e2
|
refs/heads/master
| 2021-03-27T14:54:11.295834
| 2018-04-28T18:09:35
| 2018-04-28T18:09:35
| 119,205,359
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
"""empty message
Revision ID: 12ae296935d5
Revises: 2af6e619b2f1
Create Date: 2016-01-03 19:20:57.386338
"""
# revision identifiers, used by Alembic.
revision = '12ae296935d5'
down_revision = '2af6e619b2f1'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('competitioncycle', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('project', 'competitioncycle')
### end Alembic commands ###
|
[
"wu.quinn@gmail.com"
] |
wu.quinn@gmail.com
|
8ebe3c061d8acbaf5cbbcdb7219aa906364cb940
|
3760f688b5f03b3334853500a960b3daf2666dd6
|
/todos/urls.py
|
9d26ca819a09a7a00ed16e791f66d6bc3b8f291f
|
[] |
no_license
|
Cody1009/django_todo_api
|
a1ece2cdf6f1ddd1299fb3d095859419329cbfd4
|
4057ccddb3211abb25e1f8ae3e572b2a6c72257c
|
refs/heads/master
| 2023-07-31T02:32:50.473349
| 2020-05-03T01:05:09
| 2020-05-03T01:05:09
| 260,803,895
| 0
| 0
| null | 2021-09-22T19:02:43
| 2020-05-03T01:02:10
|
Python
|
UTF-8
|
Python
| false
| false
| 159
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.ListTodo.as_view()),
path('<int:pk>/', views.DetailTodo.as_view())
]
|
[
"nansiki02@gmail.com"
] |
nansiki02@gmail.com
|
c3b224c2fb8cd240476e5ebc7795c22ed913304e
|
6a7e6a9a27b2141c7312b04b6cba3852af016c69
|
/Lauhdutin/@Resources/Frontend/GenericFilePathDialog.py
|
9411750cd205238ebe70047d24473a5aba624706
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
Tene21/Lauhdutin
|
35ed025f286503a3e861fc4c92415e84b1509ee2
|
998bfac4c02fc404614fb96c215bbe45bc8aca01
|
refs/heads/master
| 2021-01-20T22:02:30.201304
| 2017-05-29T08:09:15
| 2017-05-29T08:09:15
| 101,792,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
import sys, os, subprocess
try:
from tkinter import *
from tkinter import filedialog
RainmeterPath = os.path.join(sys.argv[1][:-1], "Rainmeter.exe")
FunctionName = sys.argv[2][:-1]
InitialDir = sys.argv[3][:-1]
Config = sys.argv[4][:-1]
root = Tk()
root.withdraw()
path = filedialog.askopenfile(initialdir=InitialDir)
subprocess.call(
[
RainmeterPath, "!CommandMeasure", "SettingsScript",
"%s('%s')" % (FunctionName, path), Config
],
shell=False)
except ImportError:
import traceback
traceback.print_exc()
input()
|
[
"noreply@github.com"
] |
Tene21.noreply@github.com
|
c88a1af397f5418a03100cac9cde8e9e4629f207
|
34d1d64a049dd3a25293955f6312072f2fcb3905
|
/set-1/challenge2.py
|
f54288641f2df4a0648832da78827542e6a9bb54
|
[] |
no_license
|
alex-bellon/cryptopals
|
c82ec87377911e6cae365cb48b2058789b93b9a1
|
5bc6242a5b972866ba7eebe2f6efa80c7ebff71c
|
refs/heads/master
| 2020-05-03T18:40:02.320249
| 2019-08-16T21:15:27
| 2019-08-16T21:15:27
| 178,761,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
a = '1c0111001f010100061a024b53535009181c'
b = '686974207468652062756c6c277320657965'
aBin = bin(int(a, 16))[2:]
bBin = bin(int(b, 16))[2:]
c = int(aBin, 2) ^ int(bBin, 2)
print(hex(c))
|
[
"alexrbellon@gmail.com"
] |
alexrbellon@gmail.com
|
9f4e62cb49368115d24ed01964de31c04727d60e
|
4ce1cecacda0da4f662f188c89e793a60c8c0439
|
/Door.py
|
be43c539033cd6d0c4f732b7321357ef4af02a9e
|
[] |
no_license
|
EuanOR/FYP
|
5419b1c8c18a0f24a1628e54c068aadf121ebe9e
|
91fb5803cad09d6eb7b2c1ed74b7fe45120248ea
|
refs/heads/master
| 2020-04-24T11:54:47.632710
| 2019-03-25T19:58:55
| 2019-03-25T19:58:55
| 171,941,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
class Door(object):
def __init__(self,thickness):
if 1.0 < thickness < 3.0:
self._thickness = thickness
else:
print("Door must be between 1 and 3 inches thick")
self._open = False
def get_thickness(self):
return self._thickness
def set_thickness(self, thickness):
if 1.0 < thickness < 3.0:
self._thickness = thickness
else:
print("Door must be between 1 and 3 inches thick")
def open_door(self):
self._open = True
def close_door(self):
self._open = False
def is_open(self):
return self._open
|
[
"115312821@umail.ucc.ie"
] |
115312821@umail.ucc.ie
|
1b36b1e22e63bb7817827b4a02f3f2d9c90b4691
|
49c0056ccde2d893e56e2f15c24b19659312c073
|
/blog/migrations/0005_auto_20210112_2004.py
|
ccb3308cd5c2343ac991a08fc6f24b7a56ea450f
|
[] |
no_license
|
ferdousdjango/blogdupl
|
5f5c1ed140fac0060584c7344e6b7e6403b23a06
|
3171566cddfb6e231079f03da5f2c308891e982e
|
refs/heads/main
| 2023-02-27T20:06:55.151176
| 2021-02-03T15:20:17
| 2021-02-03T15:20:17
| 333,327,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
# Generated by Django 3.1.4 on 2021-01-12 14:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_postright'),
]
operations = [
migrations.AddField(
model_name='post',
name='homeimage',
field=models.ImageField(blank=True, max_length=300, upload_to='media'),
),
migrations.AddField(
model_name='post',
name='hometitle',
field=models.CharField(blank=True, max_length=155),
),
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(blank=True, max_length=300, upload_to='media'),
),
]
|
[
"helloferdous@gmail.com"
] |
helloferdous@gmail.com
|
943f9a56f01dbd5d3da769e1bca8d7b26ee4f82a
|
cec2ba69ce9cb84f05097a135a64497852016c45
|
/Battleship.py
|
d4d2a82a071cfca0d63d5249e42ee1d6f3457a4d
|
[] |
no_license
|
EthanTaft/PythonLearning
|
22d11f7b37c7f6069e90f5edcf174cdc86b15664
|
8947b576f5045bcaa705d9d270fcc9a5c7f20640
|
refs/heads/master
| 2021-08-20T09:32:51.899628
| 2017-11-28T20:33:07
| 2017-11-28T20:33:07
| 112,286,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 14:19:41 2017
@author: Ethan
"""
from random import randint
board = []
for i in range(5):
board.append(['O', 'O', 'O', 'O', 'O'])
print(board)
def print_board(board_in):
for row in board_in:
print(" ".join(row))
print_board(board)
def random_row(board_in):
return(randint(0, len(board_in) - 1))
def random_col(board_in):
return(randint(0, len(board_in) - 1))
ship_row = random_row(board)
ship_col = random_col(board)
for turn in range(4):
print("Turn", turn + 1)
guess_row = int(input("Guess Row: "))
guess_col = int(input("Guess Col: "))
if guess_row == ship_row and guess_col == ship_col:
print("Congratulations! you sank my battleship!")
break
else:
if guess_row not in range(5) or guess_col not in range(5):
print("Oops, that's not even in the ocean.")
elif board[guess_row][guess_col] == "X":
print("You guessed that one already.")
else:
print("You missed my battleship!")
board[guess_row][guess_col] = "X"
print_board(board)
if turn == 3:
print("Game Over")
|
[
"ethan.taft@healthcatalyst.com"
] |
ethan.taft@healthcatalyst.com
|
6d61f171ddbc7385d9fec8b40e92e0a29e3dd8dd
|
916586620128e8c357b634192512b253bb4fc944
|
/00_mysite/mysite/settings.py
|
f9f391ba5dc44ba1d02b040704163d93f59a11dc
|
[] |
no_license
|
Kevinqian0501/Django_start
|
f11fdc9a2a548b7623ee29de32c8303d746bde30
|
315abaabb28fd4137b9e4f9bd32b44e6db410adc
|
refs/heads/master
| 2021-05-16T14:44:25.983886
| 2018-01-24T18:30:07
| 2018-01-24T18:30:07
| 118,492,770
| 0
| 0
| null | 2018-01-24T18:30:08
| 2018-01-22T17:46:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,150
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.dev20180121070910.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!u*lko(6$ux(ksrs&)!g6qr8fkx(%b9v1io09f%^1z4ywd!zly'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['209.126.122.45']
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
|
[
"kevin@kavout.co"
] |
kevin@kavout.co
|
cc42ed3292ae011c58c3f52d8268253828b8b0f6
|
97e764ca8ee0ef7c1943b97b736f3b7190170787
|
/Regression_Problem/PearsonCorrelation.py
|
3ff6d34eda4c477d65751fd523b6513098b32695
|
[
"MIT"
] |
permissive
|
xinpengliu/Machine-Learning-Practice
|
2aa7b82216e5a4506a2cd191cc57d3d4c55f0d86
|
dae55f52bb31f428526d6d60229bd1827c4e0af0
|
refs/heads/master
| 2020-03-14T00:35:33.942020
| 2017-07-20T05:54:21
| 2017-07-20T05:54:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
'''
Created on Apr 24, 2017
@author: Leo Zhong
'''
import numpy as np
from astropy.units import Ybarn
import math
def computeCorrelation(X, Y):
xBar = np.mean(X)
yBar = np.mean(Y)
SSR = 0
varX = 0
varY = 0
for i in range(0 , len(X)):
diffXXBar = X[i] - xBar
diffYYBar = Y[i] - yBar
SSR += (diffXXBar * diffYYBar)
varX += diffXXBar**2
varY += diffYYBar**2
SST = math.sqrt(varX * varY)
return SSR / SST
def polyfit(x,y,degree):
result={}
coffs = np.polyfit(x, y, degree)
#polynomial cofficient
result['polynomial']=coffs.tolist()
#r-squared
p=np.poly1d(coffs)
yhat=p(x)
ybar=np.sum(y)/len(y)
ssreg=np.sum((yhat-ybar)**2)
sstot=np.sum((y-ybar)**2)
result['determination']=ssreg/sstot
return result
testX = [1, 3, 8, 7, 9]
testY = [10, 12, 24, 21, 34]
print (computeCorrelation(testX, testY))
print (polyfit(testX, testY, 1))
|
[
"zhong5930@gmail.com"
] |
zhong5930@gmail.com
|
9bbe6ad656b19e2b6235563076647a80dba49d14
|
f6100704f93c448f357c4753aec50799c396d991
|
/操作db离线脚本.py
|
edc1a663af5ab4013a3c6b4b0fe174629bdb2c24
|
[] |
no_license
|
wssf812/Flask-basic-options
|
9c28aa12367b247c026a3f7643000354ea271613
|
340194a9e28adab92f135b410d17bb5e210bbfc1
|
refs/heads/master
| 2023-03-01T14:06:54.022222
| 2021-02-09T06:58:34
| 2021-02-09T06:58:34
| 337,299,254
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
# -*- coding: utf-8 -*-
# *Time* : 2021/2/3 10:06
# *Author* : wssf
# *File* : 操作db离线脚本.py
# *Software*: PyCharm
"离线脚本,用来创建数据库,插入数据,可以在不启动flask程序的基础上"
from Flask_example import db
from Flask_example import create_app
from werkzeug.security import generate_password_hash # 导入加密工具
from Flask_example import models
app = create_app()
with app.app_context():
# db.create_all() #根据类创建所有表
user = models.Users(
username="liu",
password=generate_password_hash("123456")
)
# 向数据库中增加数据
db.session.add(user)
# 提交数据
db.session.commit()
|
[
"1228589545@qq.com"
] |
1228589545@qq.com
|
26534e055871d229971a287afd01f30afec488e8
|
03d07de94fc22d1583c45ca84c711a06df8a40ff
|
/lc/dynamic_programming/lc_91_decode-ways.py
|
47e6fb60ea6793ea85275e7e4575d8b528ab5713
|
[] |
no_license
|
gaopenghigh/algorithm
|
94e04293c69a2ad6903495e1cf6e1b75556535bb
|
f5d78c98c7201c56f9d4c3a9c0c76e9447a17985
|
refs/heads/master
| 2022-03-11T18:46:38.712923
| 2022-02-20T14:20:54
| 2022-02-20T14:20:54
| 54,484,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,054
|
py
|
# 91. 解码方法
# 难度 中等
# 一条包含字母 A-Z 的消息通过以下映射进行了 编码 :
# 'A' -> "1"
# 'B' -> "2"
# ...
# 'Z' -> "26"
# 要 解码 已编码的消息,所有数字必须基于上述映射的方法,反向映射回字母(可能有多种方法)。例如,"11106" 可以映射为:
# "AAJF" ,将消息分组为 (1 1 10 6)
# "KJF" ,将消息分组为 (11 10 6)
# 注意,消息不能分组为 (1 11 06) ,因为 "06" 不能映射为 "F" ,这是由于 "6" 和 "06" 在映射中并不等价。
# 给你一个只含数字的 非空 字符串 s ,请计算并返回 解码 方法的 总数 。
# 题目数据保证答案肯定是一个 32 位 的整数。
#
# 示例 1:
# 输入:s = "12"
# 输出:2
# 解释:它可以解码为 "AB"(1 2)或者 "L"(12)。
#
# 示例 2:
# 输入:s = "226"
# 输出:3
# 解释:它可以解码为 "BZ" (2 26), "VF" (22 6), 或者 "BBF" (2 2 6) 。
#
# 示例 3:
# 输入:s = "0"
# 输出:0
# 解释:没有字符映射到以 0 开头的数字。
# 含有 0 的有效映射是 'J' -> "10" 和 'T'-> "20" 。
# 由于没有字符,因此没有有效的方法对此进行解码,因为所有数字都需要映射。
#
# 提示:
# 1 <= s.length <= 100
# s 只包含数字,并且可能包含前导零。
# 动态规划第一步要明确两点,「状态」和「选择」。
# 状态,就是对一个局面的描述。通过一个状态,可以定义一个子问题,而动态规划的核心就是分解为子问题。
# 选择,就是某个动作,通过一个动作,问题可以拆解为子问题
# 动态规划的框架如下:
# for 状态1 in 状态1的所有取值:
# for 状态2 in 状态2的所有取值:
# for ...
# dp[状态1][状态2][...] = 择优(选择1,选择2...)
#
# 本题中,“状态”就是带解码的字符串,
# 至于选择,对于每个字符串的最后一个字符,可以选择自成一体,或者选择与它前面的字符合体。
# 使用 dp[i] = x 表示 s[:i] 最多有 x 中解码方式。
# 对于 s[:i] 的最后一个字符 s[i-1],有如下几种情况
# 1. s[i-1] 自称一体,前提是 1 <= int(s[i-1]) <= 9,则 dp[i] = dp[i-1]
# 2. s[i-1] 和 s[i-2] 合体,前提是 s[i-2] != '0' 并且 1 <= int(s[i-2]) * 10 + int(s[i-1]) <= 26,则 dp[i] = dp[i-2]
# 两者之和就是最终 dp[i] 的值
# base case: dp[0] = 1, 表示空字符串也算是一种解码方法
# 另外由于 dp[i] 只依赖于 dp[i-1] 和 dp[i-2],所以可以压缩 dp 数组,只用 3 个变量即可
class Solution:
def numDecodings(self, s: str) -> int:
dp = [0 for _ in range(len(s)+1)]
dp[0] = 1
for i in range(1, len(s)+1):
x = 0
if 1 <= int(s[i-1]) <= 9:
x = dp[i-1]
if s[i-2] != '0' and 1 <= int(s[i-2])*10 + int(s[i-1]) <= 26:
x += dp[i-2]
dp[i] = x
return dp[len(s)]
if __name__ == '__main__':
s = '12'
print(Solution().numDecodings(s))
|
[
"jh.gao@ucloud.cn"
] |
jh.gao@ucloud.cn
|
b57deb3a8dace434bd99d855347a2ca3f1cf04e0
|
f714430490229ce0e8d5e160fdb3bfbc041173e3
|
/migrations/versions/51f1ee7915bf_migrate.py
|
9b239b5d4641e189342b12852593130894f562c4
|
[] |
no_license
|
HEW2meiG/HEW2
|
717fa1fae135b20617c53727005c6940b401b0f8
|
f8626b8edd2d4b0f8fc915acd45062a02399ef40
|
refs/heads/master
| 2023-03-14T13:54:22.187884
| 2021-03-12T16:50:39
| 2021-03-12T16:50:39
| 285,750,649
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,528
|
py
|
"""migrate
Revision ID: 51f1ee7915bf
Revises:
Create Date: 2021-02-04 00:17:37.826629
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '51f1ee7915bf'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('BuyCredit',
sa.Column('BuyCredit_id', sa.Integer(), nullable=False),
sa.Column('credit_name', sa.String(length=255), nullable=False),
sa.Column('credit_num', sa.Integer(), nullable=False),
sa.Column('expire', sa.Date(), nullable=False),
sa.Column('security_code_hash', sa.String(length=255), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.PrimaryKeyConstraint('BuyCredit_id')
)
op.create_table('BuyShippingAddress',
sa.Column('BuyShippingAddress_id', sa.Integer(), nullable=False),
sa.Column('last_name', sa.String(length=255), nullable=False),
sa.Column('first_name', sa.String(length=255), nullable=False),
sa.Column('last_name_kana', sa.String(length=255), nullable=False),
sa.Column('first_name_kana', sa.String(length=255), nullable=False),
sa.Column('zip_code', sa.Integer(), nullable=False),
sa.Column('prefecture', sa.String(length=64), nullable=False),
sa.Column('address1', sa.String(length=255), nullable=False),
sa.Column('address2', sa.String(length=255), nullable=False),
sa.Column('address3', sa.String(length=255), nullable=True),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.PrimaryKeyConstraint('BuyShippingAddress_id')
)
op.create_table('Credit',
sa.Column('Credit_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('credit_name', sa.String(length=255), nullable=False),
sa.Column('credit_num', sa.Integer(), nullable=False),
sa.Column('expire', sa.Date(), nullable=False),
sa.Column('security_code_hash', sa.String(length=255), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Credit_id')
)
op.create_table('ShippingAddress',
sa.Column('ShippingAddress_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('last_name', sa.String(length=255), nullable=False),
sa.Column('first_name', sa.String(length=255), nullable=False),
sa.Column('last_name_kana', sa.String(length=255), nullable=False),
sa.Column('first_name_kana', sa.String(length=255), nullable=False),
sa.Column('zip_code', sa.Integer(), nullable=False),
sa.Column('prefecture', sa.String(length=64), nullable=False),
sa.Column('address1', sa.String(length=255), nullable=False),
sa.Column('address2', sa.String(length=255), nullable=False),
sa.Column('address3', sa.String(length=255), nullable=True),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('ShippingAddress_id')
)
op.create_table('User',
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('user_code', sa.String(length=64), nullable=False),
sa.Column('username', sa.String(length=64), nullable=False),
sa.Column('email', sa.String(length=64), nullable=False),
sa.Column('password_hash', sa.String(length=128), nullable=False),
sa.Column('picture_path', sa.Text(), nullable=False),
sa.Column('prof_comment', sa.Text(), nullable=True),
sa.Column('default_ShippingAddress_id', sa.Integer(), nullable=True),
sa.Column('default_pay_way', sa.Integer(), nullable=False),
sa.Column('default_Credit_id', sa.Integer(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['default_Credit_id'], ['Credit.Credit_id'], ),
sa.ForeignKeyConstraint(['default_ShippingAddress_id'], ['ShippingAddress.ShippingAddress_id'], ),
sa.PrimaryKeyConstraint('User_id')
)
op.create_index(op.f('ix_User_email'), 'User', ['email'], unique=True)
op.create_index(op.f('ix_User_user_code'), 'User', ['user_code'], unique=True)
op.create_index(op.f('ix_User_username'), 'User', ['username'], unique=False)
op.create_table('UserTempToken',
sa.Column('UserTempTokenToken_id', sa.Integer(), nullable=False),
sa.Column('token', sa.String(length=64), nullable=False),
sa.Column('email', sa.String(length=64), nullable=False),
sa.Column('expire_at', sa.DateTime(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.PrimaryKeyConstraint('UserTempTokenToken_id'),
sa.UniqueConstraint('email')
)
op.create_index(op.f('ix_UserTempToken_token'), 'UserTempToken', ['token'], unique=True)
op.create_table('Address',
sa.Column('Address_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('zip_code', sa.Integer(), nullable=False),
sa.Column('prefecture', sa.String(length=64), nullable=False),
sa.Column('address1', sa.String(length=255), nullable=False),
sa.Column('address2', sa.String(length=255), nullable=False),
sa.Column('address3', sa.String(length=255), nullable=True),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Address_id')
)
op.create_table('MailResetToken',
sa.Column('MailResetToken_id', sa.Integer(), nullable=False),
sa.Column('token', sa.String(length=64), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=False),
sa.Column('expire_at', sa.DateTime(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('MailResetToken_id'),
sa.UniqueConstraint('email')
)
op.create_index(op.f('ix_MailResetToken_token'), 'MailResetToken', ['token'], unique=True)
op.create_table('PasswordResetToken',
sa.Column('PasswordResetToken_id', sa.Integer(), nullable=False),
sa.Column('token', sa.String(length=64), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('expire_at', sa.DateTime(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('PasswordResetToken_id')
)
op.create_index(op.f('ix_PasswordResetToken_token'), 'PasswordResetToken', ['token'], unique=True)
op.create_table('Sell',
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('sell_title', sa.String(length=255), nullable=False),
sa.Column('key1', sa.String(length=255), nullable=False),
sa.Column('key2', sa.String(length=255), nullable=False),
sa.Column('key3', sa.String(length=255), nullable=False),
sa.Column('sell_comment', sa.Text(), nullable=False),
sa.Column('price', sa.Integer(), nullable=False),
sa.Column('item_picture_path', sa.Text(), nullable=False),
sa.Column('genre', sa.Integer(), nullable=False),
sa.Column('item_state', sa.Integer(), nullable=False),
sa.Column('postage', sa.Integer(), nullable=False),
sa.Column('send_way', sa.Integer(), nullable=False),
sa.Column('consignor', sa.String(length=64), nullable=False),
sa.Column('schedule', sa.Integer(), nullable=False),
sa.Column('remarks', sa.Text(), nullable=True),
sa.Column('deal_status', sa.Integer(), nullable=False),
sa.Column('sell_flg', sa.Boolean(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('has_sent', sa.Boolean(), nullable=False),
sa.Column('has_got', sa.Boolean(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Sell_id')
)
op.create_table('UserConnect',
sa.Column('UserConnect_id', sa.Integer(), nullable=False),
sa.Column('to_user_id', sa.Integer(), nullable=False),
sa.Column('from_user_id', sa.Integer(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['from_user_id'], ['User.User_id'], ),
sa.ForeignKeyConstraint(['to_user_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('UserConnect_id')
)
op.create_table('UserInfo',
sa.Column('UserInfo_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('last_name', sa.String(length=255), nullable=False),
sa.Column('first_name', sa.String(length=255), nullable=False),
sa.Column('last_name_kana', sa.String(length=255), nullable=False),
sa.Column('first_name_kana', sa.String(length=255), nullable=False),
sa.Column('birth', sa.Date(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('UserInfo_id')
)
op.create_table('BrowsingHistory',
sa.Column('BrowsingHistory_id', sa.Integer(), nullable=False),
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('BrowsingHistory_id')
)
op.create_table('Buy',
sa.Column('Buy_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('pay_way', sa.Integer(), nullable=False),
sa.Column('Credit_id', sa.Integer(), nullable=False),
sa.Column('ShippingAddress_id', sa.Integer(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Credit_id'], ['BuyCredit.BuyCredit_id'], ),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['ShippingAddress_id'], ['BuyShippingAddress.BuyShippingAddress_id'], ),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Buy_id')
)
op.create_table('DealMessage',
sa.Column('DealMessage_id', sa.Integer(), nullable=False),
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('to_user_id', sa.Integer(), nullable=False),
sa.Column('from_user_id', sa.Integer(), nullable=False),
sa.Column('message', sa.Text(), nullable=False),
sa.Column('is_read', sa.Boolean(), nullable=False),
sa.Column('is_checked', sa.Boolean(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['from_user_id'], ['User.User_id'], ),
sa.ForeignKeyConstraint(['to_user_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('DealMessage_id')
)
op.create_table('Likes',
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('User_id', sa.Integer(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['User_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Sell_id', 'User_id')
)
op.create_table('PostMessage',
sa.Column('PostMessage_id', sa.Integer(), nullable=False),
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('from_user_id', sa.Integer(), nullable=False),
sa.Column('message', sa.Text(), nullable=False),
sa.Column('is_read', sa.Boolean(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['from_user_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('PostMessage_id')
)
op.create_table('Rating',
sa.Column('Rating_id', sa.Integer(), nullable=False),
sa.Column('Sell_id', sa.Integer(), nullable=False),
sa.Column('to_user_id', sa.Integer(), nullable=False),
sa.Column('from_user_id', sa.Integer(), nullable=False),
sa.Column('rating', sa.Integer(), nullable=False),
sa.Column('rating_message', sa.Text(), nullable=True),
sa.Column('create_at', sa.DateTime(), nullable=False),
sa.Column('update_at', sa.DateTime(), nullable=False),
sa.CheckConstraint('update_at >= create_at'),
sa.ForeignKeyConstraint(['Sell_id'], ['Sell.Sell_id'], ),
sa.ForeignKeyConstraint(['from_user_id'], ['User.User_id'], ),
sa.ForeignKeyConstraint(['to_user_id'], ['User.User_id'], ),
sa.PrimaryKeyConstraint('Rating_id')
)
op.drop_table('sessions')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('sessions',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('session_id', sa.VARCHAR(length=255), nullable=True),
sa.Column('data', sa.TEXT(), nullable=True),
sa.Column('expiry', sa.DATETIME(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('session_id')
)
op.drop_table('Rating')
op.drop_table('PostMessage')
op.drop_table('Likes')
op.drop_table('DealMessage')
op.drop_table('Buy')
op.drop_table('BrowsingHistory')
op.drop_table('UserInfo')
op.drop_table('UserConnect')
op.drop_table('Sell')
op.drop_index(op.f('ix_PasswordResetToken_token'), table_name='PasswordResetToken')
op.drop_table('PasswordResetToken')
op.drop_index(op.f('ix_MailResetToken_token'), table_name='MailResetToken')
op.drop_table('MailResetToken')
op.drop_table('Address')
op.drop_index(op.f('ix_UserTempToken_token'), table_name='UserTempToken')
op.drop_table('UserTempToken')
op.drop_index(op.f('ix_User_username'), table_name='User')
op.drop_index(op.f('ix_User_user_code'), table_name='User')
op.drop_index(op.f('ix_User_email'), table_name='User')
op.drop_table('User')
op.drop_table('ShippingAddress')
op.drop_table('Credit')
op.drop_table('BuyShippingAddress')
op.drop_table('BuyCredit')
# ### end Alembic commands ###
|
[
"mei.shimomura@icloud.com"
] |
mei.shimomura@icloud.com
|
7d121df9ea5860e1d137894783587cac87de54f9
|
0f4e610ca8a0be43674abe2c88c53af4eb5bd834
|
/codility/easy/1_MaxProductOfThree/dosun.py
|
d24e8b3bd3ae87e64c8b835f58e01391f70ffc5a
|
[] |
no_license
|
Jungeol/algorithm
|
6dde6f736159905dc3d7d88005f2b515dcd1b52d
|
459caa33681fe67801f0fac01f7de82456529ab1
|
refs/heads/master
| 2020-09-21T01:17:16.589098
| 2020-05-22T09:27:59
| 2020-05-22T09:27:59
| 224,638,291
| 2
| 0
| null | 2020-05-22T09:28:00
| 2019-11-28T11:27:35
|
Python
|
UTF-8
|
Python
| false
| false
| 365
|
py
|
"""https://app.codility.com/programmers/lessons/6-sorting/max_product_of_three/
Task Score :100%
Correctness : 100%
Performance : 100%
result: https://app.codility.com/demo/results/trainingBNAHGU-WCZ/
"""
def solution(A):
A.sort()
n = len(A)
product1 = A[0] * A[1] * A[n-1]
product2 = A[n-1] * A[n-2] * A[n-3]
return max(product1, product2)
|
[
"noreply@github.com"
] |
Jungeol.noreply@github.com
|
c4a1df2d9ae8ee97feb1e460d630361ef6d293ba
|
6c3dd7bbac078d9a83554333f9a3f880006f6caa
|
/src/ec2/ec2.py
|
44208cc321beda870456ff497fbdb167c7e27775
|
[] |
no_license
|
syck40/boto
|
2ceefb61d2ab2cc3ab42de6783828359cc30f550
|
dca6543400a02633f849ffc545ef0c2cc3c71a51
|
refs/heads/master
| 2020-05-03T12:36:00.456702
| 2019-03-31T06:59:57
| 2019-03-31T06:59:57
| 178,630,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
class EC2:
def __init__(self, client):
self._client = client
""":type:pyboto3.ec2"""
def create_key_pair(self, key_name):
print('Creating key pair with name '+key_name)
return self._client.create_key_pair(KeyName=key_name)
|
[
"syck40@gmail.com"
] |
syck40@gmail.com
|
b11b2e7f23d825eb1fda17d1546294cfbf352e88
|
515870d521b3b3f8f8f4b2aebee593670b02e708
|
/src/Gon/realtime_starter_redis_queue.py
|
584c2277a120b800a36d7b503279b6c1219ba035
|
[
"MIT"
] |
permissive
|
jsyzc2019/Listed-company-news-crawl-and-text-analysis
|
2d806e8b3dfb2df97cd70908a365efc3e6b9ca1e
|
a5fb02dbfe2869b4016da06a3a15dd16171b6031
|
refs/heads/master
| 2023-07-07T19:12:46.259018
| 2023-01-13T16:03:48
| 2023-01-13T16:03:48
| 260,937,347
| 0
| 0
|
MIT
| 2020-05-03T14:09:11
| 2020-05-03T14:09:11
| null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
import __init__
import redis
from Kite import config
from Killua.buildstocknewsdb import GenStockNewsDB
redis_client = redis.StrictRedis(config.REDIS_IP,
port=config.REDIS_PORT,
db=config.CACHE_RECORED_OPENED_PYTHON_PROGRAM_DB_ID)
redis_client.lpush(config.CACHE_RECORED_OPENED_PYTHON_PROGRAM_VAR, "realtime_starter_redis_queue.py")
gen_stock_news_db = GenStockNewsDB()
gen_stock_news_db.listen_redis_queue()
|
[
"bingzhenli@hotmail.com"
] |
bingzhenli@hotmail.com
|
6ffabdb437b2f0229262f2a7b57b5eb2b66df757
|
beb12cce69e21804a9ec4d64062bf6bb062261aa
|
/bin/EAFP.py
|
74646c34e932b3821298f5c393f4bebacf076c1c
|
[] |
no_license
|
voyeg3r/dotfaster
|
f7a0cad32ea3420417cd728be24a58533cb907fa
|
90c4f1ec4471668fec1f4db755158058fb533be2
|
refs/heads/master
| 2021-01-02T22:49:47.246952
| 2018-06-02T20:56:58
| 2018-06-02T20:56:58
| 99,405,357
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
#!/usr/bin/env python3
# # -*- coding: UTF-8 -*-"
# ------------------------------------------------
# Creation Date: 23-03-2017
# Last Change: ter 29 nov 2016 09:21:52 BRT
# File: EAFP.py
# author: sergio luiz araujo silva
# site: http://vivaotux.blogspot.com
# twitter: @voyeg3r
# ------------------------------------------------
'''
This script attempts to show the concept of:
It is easyer to ask forgiveness than permission
'''
person = {'name': 'Jess', 'age': 23, 'job': 'Programmer'}
try:
print("I'm {name}. I'm {age} years old and I'm {job}".format(**person))
except KeyError as e:
print(f"Missing {e} key")
|
[
"voyeg3r@gmail.com"
] |
voyeg3r@gmail.com
|
3fcfb778b0855ff4cb8210f9e3e4818cf4cd7f03
|
c5b5a2375f83fa61a734aa4a87732d092108b1b8
|
/GaulToMosaic.py
|
a434e4ba5b59ff5fdceffe5573615da14d771271
|
[] |
no_license
|
Obywatelecki/ArcPy_scripts
|
3a0225834ee6df9f3b2746a86f6fe68277933cc8
|
81d6432f8cfcd866c078e7f0e0541efb13bb04d6
|
refs/heads/master
| 2021-01-24T20:48:02.941389
| 2018-07-24T19:51:19
| 2018-07-24T19:51:19
| 123,260,446
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,306
|
py
|
import time
print "Importing Arcpy...." + str(time.ctime())
import arcpy
print " Arcpy imported! " + str(time.ctime())
print "Setting local variables" + str(time.ctime())
arcpy.env.workspace = "D:/GD/IHPAN/Gaul/_Mapy/_metaarkusze/data.gdb"
# mxd = arcpy.mapping.MapDocument("D:/GD/WGiSR/_Konferencje/Plener 2018/heatMap/HeatMap.mxd")
# df = arcpy.mapping.ListDataFrames(mxd)[0]
print " Local variables set!" + str(time.ctime())
print "Clipping..." + str(time.ctime())
arcpy.Clip_management(
r"GAUL_RASTER\Babimost_A2_B2_meta.tif",
"265690.022579334 444111.323305845 333117.820225502 527358.613670745",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Babimost_clip",
r"GAUL_MASKS\POWIAT_Babimost",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Poznan_A1-B2_meta.tif",
"299400.899102051 470779.676501803 382321.502278291 540453.896805332",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Poznan_clip",
r"GAUL_MASKS\POWIAT_Poznań",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Srem_A2-B2_meta.tif",
"335720.040082338 441921.717819948 400351.860474886 515204.67834739",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Srem_clip",
r"GAUL_MASKS\POWIAT_Śrem",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Miedzyrzecz_A2-B2_meta.tif",
"231042.34059775 485283.89837235 332281.278737942 559072.743229139",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Miedzyrzecz_clip",
r"GAUL_MASKS\POWIAT_Międzyrzecz",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Wschowa_A2-B2_meta.tif",
"277331.797332692 411648.690308725 359810.429110255 482980.143615188",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Wschowa_clip",
r"GAUL_MASKS\POWIAT_Wschowa",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Krobia_A1_meta.tif",
"325559.668889663 387037.86742851 395016.309742185 470321.802898691",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Krobia_clip",
r"GAUL_MASKS\POWIAT_Krobia",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Oborniki_A1-B2_meta.tif",
"289538.110717687 498943.938028237 379936.142480935 573069.735483128",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Oborniki_clip",
r"GAUL_MASKS\POWIAT_Oborniki",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
arcpy.Clip_management(
r"GAUL_RASTER\Koscian_A2-B2_meta.tif",
"302944.357398094 432303.434413203 369814.26984427 507153.17713879",
"D:\GD\IHPAN\Gaul\_Mapy\_metaarkusze\data.gdb\Koscian_clip",
r"GAUL_MASKS\POWIAT_Kościan",
256,
"ClippingGeometry",
"MAINTAIN_EXTENT")
print " Clipped!" + str(time.ctime())
print "Mosaicking rasters...." + str(time.ctime())
arcpy.MosaicToNewRaster_management(
"Babimost_clip; Koscian_clip; Oborniki_clip; Krobia_Clip; Wschowa_clip; Miedzyrzecz_clip; Srem_clip; Poznan_clip",
r"D:/GD/IHPAN/Gaul/_Mapy/_metaarkusze/data.gdb",
"GAUL_mosaicked",
"",
"8_BIT_UNSIGNED",
"",
3,
"FIRST",
"FIRST"
)
print " Rasters mosaicked!" + str(time.ctime())
|
[
"tpanecki@gmail.com"
] |
tpanecki@gmail.com
|
9a518550ecc9610bfeed5e94cc14082c1480cbad
|
526176649fc3d37c87c06626a2e8fcb1cc840bf0
|
/sqlite_db/db6.py
|
8717163a9439512d44881c22e9bb759d7bff7640
|
[] |
no_license
|
rames4498/Bootcamps_and_workshops
|
cd193bb302f4b2ed9037750b07e35f6875415476
|
402ef143be7a52ae71e08cdf8b7f0ff35d502455
|
refs/heads/master
| 2022-09-22T04:49:10.657585
| 2022-09-13T07:06:36
| 2022-09-13T07:06:36
| 239,116,561
| 9
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
import sqlite3
conn = sqlite3.connect('my_data.sqlite')
cursor = conn.cursor()
print("Opened database successfully")
cursor.execute('''CREATE TABLE SCHOOL
(ID INT PRIMARY KEY NOT NULL,
NAME TEXT NOT NULL,
AGE INT NOT NULL,
ADDRESS CHAR(50),
MARKS INT);''')
cursor.close()
|
[
"noreply@github.com"
] |
rames4498.noreply@github.com
|
0876651216fe8d66b6ac1486bdb463a7eb6bcf0b
|
b37b62a73a14ed3904ffed1db99dafe01bc9eca3
|
/app/list/models.py
|
3c3e2f812571158f337b54618fddebb78ef4c17e
|
[] |
no_license
|
gambler1541/django-pagination
|
d340d7ce3186f801ce1cf4aadb59ee77bd52e9d6
|
44c32be793c0bd2332f29ba5422205ccf0c2d2b8
|
refs/heads/master
| 2020-04-16T22:56:16.565405
| 2019-01-16T06:59:51
| 2019-01-16T06:59:51
| 165,990,830
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from django.db import models
from django.views.generic import ListView
class Constacts(models.Model):
text = models.TextField(default='')
|
[
"gambler1541@gmail.com"
] |
gambler1541@gmail.com
|
1f18c643dafb612801fe04bca072bfe0dace75d7
|
4a7705fb9b16d03377600f49770ae31b2c7358a5
|
/day9/gpzdsy股票最大收益2.py
|
a0c58c7282884d90b4b718cebb850ea29e7e0aee
|
[] |
no_license
|
dsgdtc/everything_arithmetic
|
600e5c4f8e95331689b73b27ee01432f196457ae
|
4b2d490c03467b7fa6cba36f9e27cf60bfce396c
|
refs/heads/master
| 2020-03-08T13:43:16.537525
| 2018-04-05T14:17:48
| 2018-04-05T14:35:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,692
|
py
|
# -*- coding: utf-8 -*-
"""
给定数组A,其中A[i]表示某股票第i天的价格。
如果允许最多进行K次交易(K是已经给定的定值),
请计算何时买卖达到最大收益,返回最大收益值。
规定 不能嵌套买卖 Z只能是买卖-买卖-买卖......
eg
[7,1,5,3,6,4]最大收益值为5-1=4,6-3=3,4+3 = 7
算法:
dp[k][i] 表示最多k次交易在第i天的最大收益
在第i天,有两种选择,要么卖出股票,要么不卖出股票,从而得到最大收益
dp[k][i] = max { dp[k][i-1] 不卖出 }
{ dp[k-1][j] + prices[i] - prices[j] , j属于[0,i-1] }
"""
__author__ = 'guyu'
def max_profit(A, size, K):
# dp[k][i] 表示最多K次交易在第i天的最大收益
# +1是为了好数数
dp = [[0 for col in range(size+1)] for row in range(K+1)]
profit = 0
price = A
price.insert(0, None) #首位占个空位置,为了方便天从第1天开始数
for k in range(1, K+1):
for i in range(1, size+1):
dp[k][i] = dp[k][i - 1] # 第i天不卖出时的价格
for j in range(1, i+1):
# print (dp[k][i-1])
# print (dp[k-1][j]+(price[i] - price[j]))
dp[k][i] = max(dp[k][i], dp[k-1][j]+(price[i] - price[j]) )
# print ("dp[%s][%s]设置为%s" %(k,i, dp[k][i]))
# print ("What is dp:%s" %(dp))
# input("etc...")
# print (dp)
# print (dp[K])
return dp[K][size-1]
return profit
if __name__ == "__main__":
A= [7,1,5,3,6,4]
size = len(A)
K = 3
result = max_profit(A, size, K)
print (result)
|
[
"dsgdtc@163.com"
] |
dsgdtc@163.com
|
fb20a737b4b3bc2e0a86a1ea9b5a7945456c6851
|
dacdebab897f9287f37a2e85c5705a926ddd36aa
|
/tests/test_issue930/Snakefile
|
06cbf60fd181788b35dd44ff28d8bc6855f13952
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
snakemake/snakemake
|
5d4528193d87786d7b372ca7653ece302ff46965
|
27b224ed12448df8aebc7d1ff8f25e3bf7622232
|
refs/heads/main
| 2023-09-02T08:37:04.323976
| 2023-08-11T10:02:34
| 2023-08-11T10:02:34
| 212,840,200
| 1,941
| 536
|
MIT
| 2023-09-11T09:51:44
| 2019-10-04T14:58:11
|
HTML
|
UTF-8
|
Python
| false
| false
| 646
|
samples = ["0","1"]
rule all:
input:
"test.out"
rule build_index:
output:
"large_reference_index"
shell:
"touch {output}"
rule a:
output:
"a/{sample}.out"
group:
"sample_group"
shell:
"touch {output}"
rule b:
input:
rules.a.output,
rules.build_index.output
output:
"b/{sample}.out"
group:
"sample_group"
shell:
"touch {output}"
rule c:
input:
expand("a/{sample}.out", sample=samples),
expand("b/{sample}.out", sample=samples)
output:
"test.out"
shell:
"touch {output}"
|
[
"johannes.koester@tu-dortmund.de"
] |
johannes.koester@tu-dortmund.de
|
|
25ed4fc80f15bd27a6243626cc74db6d6f20abe2
|
8bb3bcf914860c20fb4a7163a8e0691cd802dd65
|
/ve/unit/test_list_object.py
|
df090cc057e76b5308629ac65f3383056bb0ac50
|
[
"Apache-2.0"
] |
permissive
|
nitinm694/pyvsc
|
8586cc2497f336289fecbfeb9e6dd788f4070b60
|
612de9e6244c685a3df1972e4860abfe35b614e1
|
refs/heads/master
| 2023-07-28T01:49:10.917496
| 2021-09-12T19:06:00
| 2021-09-12T19:06:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,362
|
py
|
'''
Created on Jun 20, 2020
@author: ballance
'''
import vsc
from vsc_test_case import VscTestCase
from vsc.visitors.model_pretty_printer import ModelPrettyPrinter
class TestListObject(VscTestCase):
def test_smoke(self):
@vsc.randobj
class item_c(object):
def __init__(self):
self.a = vsc.rand_uint8_t()
self.b = vsc.rand_uint8_t()
@vsc.randobj
class container_c(object):
def __init__(self):
self.l = vsc.rand_list_t(item_c())
for i in range(10):
self.l.append(item_c())
c = container_c()
c.randomize()
for i,it in enumerate(c.l):
print("Item[" + str(i) + "] a=" + str(it.a) + " b=" + str(it.b))
def test_constraints(self):
@vsc.randobj
class item_c(object):
def __init__(self):
self.a = vsc.rand_uint8_t()
self.b = vsc.rand_uint8_t()
@vsc.randobj
class container_c(object):
def __init__(self):
self.l = vsc.rand_list_t(item_c())
for i in range(10):
self.l.append(item_c())
@vsc.constraint
def all_eq_c(self):
with vsc.foreach(self.l) as it:
it.a == it.b
c = container_c()
for i in range(100):
c.randomize()
for it in c.l:
self.assertEqual(it.a, it.b)
def test_init_array_block(self):
@vsc.randobj
class item_c(object):
def __init__(self):
self.a = vsc.rand_uint8_t()
self.b = vsc.rand_uint8_t()
@vsc.randobj
class container_c(object):
def __init__(self):
self.l = vsc.rand_list_t(item_c())
for i in range(10):
self.l.append(item_c())
@vsc.constraint
def all_eq_c(self):
with vsc.foreach(self.l, it=True,idx=True) as (idx,it):
with vsc.if_then((idx&1) == 0):
it.a < it.b
with vsc.else_then:
it.a > it.b
c = container_c()
for i in range(100):
c.randomize()
self.assertEqual(10, len(c.l))
for i,it in enumerate(c.l):
if (i%2) == 0:
self.assertLess(it.a, it.b)
else:
self.assertGreater(it.a, it.b)
def test_diff_classes(self):
@vsc.randobj
class item_c(object):
def __init__(self):
self.a = vsc.rand_uint8_t()
self.b = vsc.rand_uint8_t()
@vsc.randobj
class item_c_1(item_c):
def __init__(self):
super().__init__()
@vsc.constraint
def a_lt_b_c(self):
self.a < self.b
@vsc.randobj
class item_c_2(item_c):
def __init__(self):
super().__init__()
@vsc.constraint
def a_gt_b_c(self):
self.a > self.b
@vsc.randobj
class container_c(object):
def __init__(self):
self.l = vsc.rand_list_t(item_c())
for i in range(10):
if i%2 == 0:
self.l.append(item_c_1())
else:
self.l.append(item_c_2())
c = container_c()
print("Model: " + ModelPrettyPrinter.print(c.get_model()))
for i in range(100):
c.randomize()
self.assertEqual(10, len(c.l))
for i,it in enumerate(c.l):
if i%2 == 0:
self.assertLess(it.a, it.b)
else:
self.assertGreater(it.a, it.b)
|
[
"matt.ballance@gmail.com"
] |
matt.ballance@gmail.com
|
1d14e48594666b2b66dcfaef63ff96ea4d743632
|
e9266d5632d8d0da25d95dc0fd912379335328e0
|
/src/plants/schemas.py
|
d54a2c19e2c5665ffbaff4e9e9c05879b3e997be
|
[] |
no_license
|
capmayer/plantas-indicadoras
|
5921df6d634afc3df2b4e94db2b95418d3787fdc
|
174a8288ff2fdd6a259a669a7ec776d3721f239b
|
refs/heads/main
| 2023-06-22T09:51:10.877814
| 2021-07-29T00:05:39
| 2021-07-29T00:05:39
| 389,454,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
from pydantic import BaseModel
class PlantBase(BaseModel):
scientific_name: str
popular_names: str
description: str
indicates: str
class PlantList(PlantBase):
scientific_name_slug: str
class Plant(PlantList):
id: int
class Config:
orm_mode = True
|
[
"henriqmayer@gmail.com"
] |
henriqmayer@gmail.com
|
8083d6ab3311a0ec517636a91fd33a22445421bd
|
7fa15c4dbca224aed616e76074bf017699af00df
|
/examples/sum_client.py
|
0011bc63474cfec50e1d633ae091f99a0ddb1f0e
|
[
"Apache-2.0"
] |
permissive
|
studio-ousia/mprpc
|
cc272e650b46a21997c680cf00e5ccbc015dc709
|
6076f68a16f78e0010307344afa253e0956f2a9d
|
refs/heads/master
| 2023-01-14T02:33:22.171728
| 2022-12-27T07:13:23
| 2022-12-27T07:13:23
| 13,551,567
| 170
| 60
|
NOASSERTION
| 2023-02-18T15:15:10
| 2013-10-14T03:15:41
|
Cython
|
UTF-8
|
Python
| false
| false
| 595
|
py
|
# -*- coding: utf-8 -*-
import gsocketpool.pool
import gevent.pool
from mprpc import RPCClient, RPCPoolClient
def call():
client = RPCClient('127.0.0.1', 6000)
print client.call('sum', 1, 2)
def call_using_pool():
options = dict(host='127.0.0.1', port=6000)
client_pool = gsocketpool.pool.Pool(RPCPoolClient, options)
def _call(n):
with client_pool.connection() as client:
return client.call('sum', 1, 2)
glet_pool = gevent.pool.Pool(10)
print [result for result in glet_pool.imap_unordered(_call, xrange(10))]
call()
call_using_pool()
|
[
"ikuya@ikuya.net"
] |
ikuya@ikuya.net
|
234e009d0b1fedd51b2692aa9e1401871a904c8e
|
5b05b2a15e5ad633f4f87124a5eff0d662af6e3c
|
/CONTEST-DIV2/Round 714/B/B.py
|
146cdb225283534f6605ea8c5721ab243cda0f83
|
[] |
no_license
|
CristianLazoQuispe/CODEFORCES-Contest
|
505eaf7d4dd3473a07ba828ab614f4c504fbc853
|
27f5c490e59d90437833369c32d5a8dd042b262f
|
refs/heads/main
| 2023-04-27T13:26:17.608905
| 2021-05-07T20:27:12
| 2021-05-07T20:27:12
| 355,346,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
import functools,operator
T = int(input())
def solve(lista):
ida = []
vuelta = []
maxi = len(lista)
back = None
for i in range(len(lista)):
if back is None:
value = functools.reduce(operator.and_, [lista[i]])
ida.append(value)
back = value
else:
value = functools.reduce(operator.and_, [value,lista[i]])
ida.append(value)
back = value
back = None
for i in range(len(lista)):
i = maxi-i-1
if back is None:
value = functools.reduce(operator.and_, [lista[i]])
vuelta.append(value)
back = value
else:
value = functools.reduce(operator.and_, [value,lista[i]])
vuelta.append(value)
back = value
suma = 0
for idx,ida_i in enumerate(ida):
if vuelta[maxi-idx-1] == ida_i:
suma+=1
print(idx,ida_i)
return suma
for i in range(T):
n = int(input())
lista = list(map(int,input().split()))
ans = solve(lista)
print(ans)
|
[
"mecatronico.lazo@gmail.com"
] |
mecatronico.lazo@gmail.com
|
8c6f5b33b6a30d003e781cf744afdf6b61f3b51e
|
68ddfb2d1dad5399cb224a1c2a5f7cd5aa87ebf7
|
/HW04/HW04.py
|
de3167147296186ffd9e025bad32324202ad8683
|
[] |
no_license
|
VanSubstance/data_Analysis
|
d78d4724d0521cb954bcee624d646c61d37dc9a1
|
0dd6e2689c9b576dd39af1660ef70f8e13dfb2f3
|
refs/heads/master
| 2023-01-09T23:16:41.576145
| 2020-11-17T13:58:43
| 2020-11-17T13:58:43
| 294,380,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,728
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 21:21:00 2020
@author: sungh
"""
#%% Initiating
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
from dateutil.parser import parse
from scipy import stats, polyval
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from sklearn.model_selection import cross_val_score as cvs
train=pd.read_csv('https://drive.google.com/uc?export=download&id=1KA7mKUmQv4PrF-qMFrH35LN6q_i56Bf1',
header = 0, dtype={'StateHoliday':'str'})
store=pd.read_csv('https://drive.google.com/uc?export=download&id=1_o04Vnqzo3v-MTk20MF3OMw2QFz0Fbo0')
tgt = 'Sales'
train.columns
vals = ['Store', 'DayOfWeek', 'Date', 'Customers', 'Open', 'Promo', 'StateHoliday', 'SchoolHoliday']
#%% Conclusion
discards = ['SchoolHoliday', 'StateHoliday', 'Promo', 'Store']
selects = ['Date', 'Customers', 'Open', 'DayOfWeek']
train = train.drop(discards, axis = 1)
newDay = train['DayOfWeek'] != 7
newDay = newDay.astype(int)
train = train.drop(['DayOfWeek'], axis = 1)
train = pd.concat((train, newDay), axis = 1)
condTrain = (train['Date'] < '2015-01-01')
Xtrain = train[condTrain][selects].drop(['Date'], axis = 1).values
ytrain = train[condTrain]['Sales'].values
Xtest = train[condTrain != True][selects].drop(['Date'], axis = 1).values
ytest = train[condTrain != True]['Sales'].values
#%% Cross validation -> Failed
C_s = np.logspace(-10, 0, 10)
logistic = LogisticRegression()
skf = StratifiedKFold(n_splits = 5, shuffle = True, random_state = 100)
kf = KFold(n_splits = 3, shuffle = True, random_state = 100)
Xtest[0:236380]
ytest[0:236380]
score = cvs(logistic, Xtrain, ytrain, cv = kf)
accs = []
for c in C_s:
logistic.C = c
temp = []
print("C!\t")
for Ptrain, Ptest in skf.split(Xtest, ytest):
print("Fit!\t")
logistic.fit(Xtest[Ptrain], ytest[Ptest])
temp.append(logistic.score(Xtest[Ptrain], ytest[Ptest]))
print("Append!\n")
accs.append(temp)
accs = np.array(accs)
avg = np.mean(accs, axis = 1)
C_s[np.argmax(avg)]
#%% Learning Method: Linear Regression
train=pd.read_csv('https://drive.google.com/uc?export=download&id=1KA7mKUmQv4PrF-qMFrH35LN6q_i56Bf1',
header = 0, dtype={'StateHoliday':'str'})
discards = ['SchoolHoliday', 'StateHoliday', 'Promo', 'Store']
selects = ['Date', 'Customers', 'Open', 'DayOfWeek']
train = train.drop(discards, axis = 1)
newDay = train['DayOfWeek'] != 7
newDay = newDay.astype(int)
train = train.drop(['DayOfWeek'], axis = 1)
train = pd.concat((train, newDay), axis = 1)
condTrain = (train['Date'] < '2015-01-01')
Xtrain = train[condTrain][selects].drop(['Date'], axis = 1).values
ytrain = train[condTrain]['Sales'].values
Xtest = train[condTrain != True][selects].drop(['Date'], axis = 1).values
ytest = train[condTrain != True]['Sales'].values
lin1 = LinearRegression()
lin1.fit(Xtrain, ytrain)
lin1.score(Xtrain, ytrain)
y_pred = lin1.predict(Xtest)
(ytrain == lin1.predict(Xtrain))
(ytest == lin1.predict(Xtest))
y_true = ytest
sse = sum((y_true - y_pred) ** 2)
sst = sum((y_true - np.mean(y_true)) ** 2)
ssr = sst - sse
adj_r2_02 = 1 - (sse / sst)
plt.figure(figsize = (36, 4))
plt.scatter(range(len(ytest)), ytest, marker = 'x')
plt.scatter(range(len(ytest)), y_pred, marker = 'x')
plt.figure(figsize = (12, 8))
plt.scatter(Xtest[:, 2], y_pred, marker = '+')
slope, intercept, r_value, p_value, stderr = stats.linregress(Xtest[:, 2], y_pred)
ry = polyval([slope, intercept], Xtest[:, 2])
plt.plot(Xtest[:, 2], ry, 'r')
#%% Logistic Regression -> Failed -> MemoryError
import gc
gc.collect()
train=pd.read_csv('https://drive.google.com/uc?export=download&id=1KA7mKUmQv4PrF-qMFrH35LN6q_i56Bf1',
header = 0, dtype={'StateHoliday':'str'})
discards = ['SchoolHoliday', 'StateHoliday', 'Promo', 'Store']
selects = ['Date', 'Customers', 'Open', 'DayOfWeek']
train = train.drop(discards, axis = 1)
newDay = train['DayOfWeek'] != 7
newDay = newDay.astype(int)
train = train.drop(['DayOfWeek'], axis = 1)
train = pd.concat((train, newDay), axis = 1)
condTrain = (train['Date'] < '2015-01-01')
Xtrain = train[condTrain][selects].drop(['Date'], axis = 1).values
ytrain = train[condTrain]['Sales'].values
Xtest = train[condTrain != True][selects].drop(['Date'], axis = 1).values
ytest = train[condTrain != True]['Sales'].values
lin2 = LogisticRegression()
lin2.fit(Xtrain, ytrain)
lin2.score(Xtrain, ytrain)
y_pred = lin1.predict(Xtest)
(ytrain == lin2.predict(Xtrain))
(ytest == lin2.predict(Xtest))
plt.figure(figsize = (36, 4))
plt.scatter(range(len(ytest)), ytest, marker = 'x')
plt.scatter(range(len(ytest)), y_pred, marker = 'x')
plt.figure(figsize = (12, 8))
plt.scatter(Xtest[:, 0], y_pred, marker = '+')
slope, intercept, r_value, p_value, stderr = stats.linregress(Xtest[:, 0], y_pred)
ry = polyval([slope, intercept], Xtest[:, 0])
plt.plot(Xtest[:, 0], ry, 'r')
#%% KNeighborsRegressor
train=pd.read_csv('https://drive.google.com/uc?export=download&id=1KA7mKUmQv4PrF-qMFrH35LN6q_i56Bf1',
header = 0, dtype={'StateHoliday':'str'})
discards = ['SchoolHoliday', 'StateHoliday', 'Promo', 'Store']
selects = ['Date', 'Customers', 'Open', 'DayOfWeek']
train = train.drop(discards, axis = 1)
newDay = train['DayOfWeek'] != 7
newDay = newDay.astype(int)
train = train.drop(['DayOfWeek'], axis = 1)
train = pd.concat((train, newDay), axis = 1)
condTrain = (train['Date'] < '2015-01-01')
Xtrain = train[condTrain][selects].drop(['Date'], axis = 1).values
ytrain = train[condTrain]['Sales'].values
Xtest = train[condTrain != True][selects].drop(['Date'], axis = 1).values
ytest = train[condTrain != True]['Sales'].values
lin2 = KNeighborsRegressor(n_neighbors = 3, weights = "distance")
lin2.fit(Xtrain, ytrain)
lin2.score(Xtrain, ytrain)
y_pred = lin2.predict(Xtest)
(ytrain == lin2.predict(Xtrain))
(ytest == lin2.predict(Xtest))
plt.figure(figsize = (36, 4))
plt.scatter(range(len(ytest)), ytest, marker = 'x')
plt.scatter(range(len(ytest)), y_pred, marker = 'x')
plt.figure(figsize = (12, 8))
plt.scatter(Xtest[:, 2], y_pred, marker = '+')
slope, intercept, r_value, p_value, stderr = stats.linregress(Xtest[:, 2], y_pred)
ry = polyval([slope, intercept], Xtest[:, 2])
plt.plot(Xtest[:, 2], ry, 'b')
#%% Time series Analysis -> VAR
import statsmodels.api as sm
var1 = sm.tsa.VAR(Xtrain)
result1 = var1.fit()
result1.summary()
result1.forecast(result1.model.endog[-1:], 10)
#%% Time series Analysis -> AR
from statsmodels.tsa.ar_model import AR
from sklearn.metrics import mean_squared_error
#%% Only the univariate case is implemented
#%% 'Date' and 'Sales'
model = AR(Xtrain)
model_fit = model.fit()
#%% Open -> Select
a = []
for date, week in Xtrain.groupby('Open'):
a.append(week['Sales'])
plt.figure()
plt.boxplot(a)
#%% Promo -> Discard
train['Promo'].unique
train.groupby('Promo')['Sales'].var()
means = train.groupby('Promo')['Sales'].mean()
std = train.groupby('Promo')['Sales'].std()
plt.bar(range(len(means)), means)
plt.errorbar(range(len(means)), means, yerr = std, fmt = 'o', c = 'r', ecolor = 'r',
capthick = 2, capsize = 10)
plt.xticks(range(len(means)), means.index)
train[['Promo', 'Sales']].corr()
plt.figure(figsize = (12, 8))
plt.scatter(train['Promo'], train['Sales'], marker = '+')
slope, intercept, r_value, p_value, stderr = stats.linregress(train['Promo'], train['Sales'])
ry = polyval([slope, intercept], train['Promo'])
plt.plot(train['Promo'], ry, 'r')
a = []
for date, week in Xtrain.groupby('Promo'):
a.append(week['Sales'])
plt.figure()
plt.boxplot(a)
#%% Customers -> Select
train[['Customers', 'Sales']].corr()
plt.figure(figsize = (12, 8))
plt.scatter(train['DayOfWeek'], train['Sales'], marker = '+')
slope, intercept, r_value, p_value, stderr = stats.linregress(train['DayOfWeek'], train['Sales'])
ry = polyval([slope, intercept], train['DayOfWeek'])
plt.plot(train['DayOfWeek'], ry, 'y')
#%% DayOfWeek -> Select
test = ['DayOfWeek']
train.groupby('DayOfWeek')['Sales'].describe()
a = []
means = [0]
for date, week in Xtrain.groupby('DayOfWeek'):
a.append(week['Sales'])
means.append(week['Sales'].mean())
plt.figure()
plt.boxplot(a)
plt.plot(means)
plt.show()
means = train.groupby('DayOfWeek')['Sales'].mean()
std = train.groupby('DayOfWeek')['Sales'].std()
plt.bar(range(len(means)), means)
plt.errorbar(range(len(means)), means, yerr = std, fmt = 'o', c = 'r', ecolor = 'r',
capthick = 2, capsize = 10)
plt.xticks(range(len(means)), means.index)
#%% State Holiday -> Discard
means = train.groupby('StateHoliday')['Sales'].mean()
std = train.groupby('StateHoliday')['Sales'].std()
plt.bar(range(len(means)), means)
plt.errorbar(range(len(means)), means, yerr = std, fmt = 'o', c = 'r', ecolor = 'r',
capthick = 2, capsize = 10)
plt.xticks(range(len(means)), means.index)
## 실행
train['StateHoliday'].unique
holiday = (train['StateHoliday'] == "0") | (train['StateHoliday'] == 0)
holiday = holiday.astype(int)
train = train.drop(['StateHoliday'], axis = 1)
train = pd.concat((train, holiday), axis = 1)
#### 여기까지
#%% Correlation Graph
corr = train.corr()
fig=plt.figure(figsize=(12,8))
cax=plt.imshow(corr, vmin=-1, vmax=1, cmap=plt.cm.RdBu)
ax=plt.gca()
ax.set_xticks(range(len(corr)))
ax.set_yticks(range(len(corr)))
ax.set_xticklabels(corr,fontsize=10,rotation='vertical')
ax.set_yticklabels(corr,fontsize=10)
plt.colorbar(cax)
train[['StateHoliday', 'Sales']].corr()
train[train['Open'] == 1]['Sales'].describe()
train[(train['Open'] == 1) & (train['Sales'] > 8360)].count()
means = train.groupby('Open')['Sales'].mean()
std = train.groupby('Open')['Sales'].std()
plt.bar(range(len(means)), means)
plt.errorbar(range(len(means)), means, yerr = std, fmt = 'o', c = 'r', ecolor = 'r',
capthick = 2, capsize = 10)
plt.xticks(range(len(means)), means.index)
train[train['Open'] == 1]
plt.figure()
plt.boxplot(train[train['Open'] == 1]['Sales'])
#%% School Holiday -> Discard
means = train.groupby('SchoolHoliday')['Sales'].mean()
std = train.groupby('SchoolHoliday')['Sales'].std()
plt.bar(range(len(means)), means)
plt.errorbar(range(len(means)), means, yerr = std, fmt = 'o', c = 'r', ecolor = 'r',
capthick = 2, capsize = 10)
plt.xticks(range(len(means)), means.index)
"""
plt.plot_date(train['Date'], train['Sales'])
plt.figure(figsize = (20, 1))
plt.plot(train['Date'], train['Sales'], linewidth = 1)
"""
|
[
"50601968+VanSubstance@users.noreply.github.com"
] |
50601968+VanSubstance@users.noreply.github.com
|
530d9a1a9c81e48861a573078a5fcca53d28e741
|
e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67
|
/azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/network/v2017_06_01/models/network_interface_association.py
|
56f1d3b0eda3f4acd5b0007f57df14bfd8f42f49
|
[] |
no_license
|
EnjoyLifeFund/macHighSierra-cellars
|
59051e496ed0e68d14e0d5d91367a2c92c95e1fb
|
49a477d42f081e52f4c5bdd39535156a2df52d09
|
refs/heads/master
| 2022-12-25T19:28:29.992466
| 2017-10-10T13:00:08
| 2017-10-10T13:00:08
| 96,081,471
| 3
| 1
| null | 2022-12-17T02:26:21
| 2017-07-03T07:17:34
| null |
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NetworkInterfaceAssociation(Model):
"""Network interface and its custom security rules.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Network interface ID.
:vartype id: str
:param security_rules: Collection of custom security rules.
:type security_rules: list of :class:`SecurityRule
<azure.mgmt.network.v2017_06_01.models.SecurityRule>`
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'},
}
def __init__(self, security_rules=None):
self.id = None
self.security_rules = security_rules
|
[
"Raliclo@gmail.com"
] |
Raliclo@gmail.com
|
e990c045732a8d86cc9581ce152319b250823e60
|
a6efd75e038b6d2c28fc74a34ad9454c4f70da0c
|
/resnet_3d/train_TReNDs.py
|
f554f9b8ef6ba49f0963ed509a2f7df82146a7b8
|
[] |
no_license
|
qkqkfldis1/TRENDS_kaggle
|
a5886fde100364acef50763d621a7067893326d7
|
3d0c60a42afb654dbfcbdfe69b113a636d8bb00d
|
refs/heads/main
| 2023-04-18T10:47:21.418371
| 2021-05-02T20:02:12
| 2021-05-02T20:02:12
| 363,738,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,820
|
py
|
'''
Written by SeuTao
'''
import os
import time
import numpy as np
import torch
from setting import parse_opts
from torch.utils.data import DataLoader
from datasets.TReNDs import TReNDsDataset
from model import generate_model
from tqdm import tqdm
import random
#from apex import amp, optimizers
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="4"
#device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
seed = 42
print(f'setting everything to seed {seed}')
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def metric(y_true, y_pred):
return np.mean(np.sum(np.abs(y_true - y_pred), axis=0) / np.sum(y_true, axis=0))
def weighted_nae(inp, targ):
W = torch.FloatTensor([0.3, 0.175, 0.175, 0.175, 0.175])
return torch.mean(torch.matmul(torch.abs(inp - targ), W.to(device) / torch.mean(targ, axis=0)))
def valid(data_loader, model, sets):
# settings
print("validation")
model.eval()
y_pred = []
y_true = []
loss_ave = []
with torch.no_grad():
for batch_data in tqdm(data_loader):
# getting data batch
volumes, feats, fncs, degs, label = batch_data
if not sets.no_cuda:
volumes = volumes.to(device)
feats = feats.to(device)
fncs = fncs.to(device)
degs = degs.to(device)
label = label.to(device)
logits = model(volumes, feats, fncs, degs)
# calculating loss
loss_value = weighted_nae(logits, label)
y_pred.append(logits.data.cpu().numpy())
y_true.append(label.data.cpu().numpy())
loss_ave.append(loss_value.data.cpu().numpy())
print('valid loss', np.mean(loss_ave))
y_pred = np.concatenate(y_pred,axis=0)
y_true = np.concatenate(y_true,axis=0)
domain = ['age', 'domain1_var1', 'domain1_var2', 'domain2_var1', 'domain2_var2']
w = [0.3, 0.175, 0.175, 0.175, 0.175]
m_all = 0
for i in range(5):
m = metric(y_true[:,i], y_pred[:,i])
print(domain[i],'metric:', m)
m_all += m*w[i]
print('all_metric:', m_all)
model.train()
return np.mean(loss_ave)
def test(data_loader, model, sets, save_path):
# settings
print("validation")
model.eval()
y_pred = []
ids_all = []
with torch.no_grad():
for batch_data in tqdm(data_loader):
# getting data batch
ids, volumes, feats, fncs, degs = batch_data
if not sets.no_cuda:
volumes = volumes.to(device)
feats = feats.to(device)
fncs = feats.to(device)
degs = degs.to(device)
logits = model(volumes, feats, fncs, degs)
y_pred.append(logits.data.cpu().numpy())
ids_all += ids
y_pred = np.concatenate(y_pred, axis=0)
np.savez_compressed(save_path,
y_pred = y_pred,
ids = ids_all)
print(y_pred.shape)
def train(train_loader,valid_loader, model, optimizer, total_epochs, save_interval, save_folder, sets):
f = open(os.path.join(save_folder,'log.txt'),'w')
# settings
batches_per_epoch = len(train_loader)
print("Current setting is:")
print(sets)
print("\n\n")
model.train()
train_time_sp = time.time()
valid_loss = 99999
min_loss = 99999
for epoch in range(total_epochs):
rate = adjust_learning_rate(optimizer, epoch)
# Training
# log.info('lr = {}'.format(scheduler.get_lr()))
tk0 = tqdm(train_loader, total=int(len(train_loader)))
for batch_id, batch_data in enumerate(tk0):
# getting data batch
batch_id_sp = epoch * batches_per_epoch
volumes, feats, fncs, degs, label = batch_data
if not sets.no_cuda:
volumes = volumes.to(device)
feats = feats.to(device)
fncs = fncs.to(device)
degs = degs.to(device)
label = label.to(device)
optimizer.zero_grad()
logits = model(volumes, feats, fncs, degs)
# calculating loss
loss = weighted_nae(logits, label)
#with amp.scale_loss(loss, optimizer) as scaled_loss:
# scaled_loss.backward()
loss.backward()
optimizer.step()
avg_batch_time = (time.time() - train_time_sp) / (1 + batch_id_sp)
log_ = '{} Batch: {}-{} ({}), ' \
'lr = {:.5f}, ' \
'train loss = {:.3f}, ' \
'valid loss = {:.3f}, ' \
'avg_batch_time = {:.3f} '.format(sets.model_name, epoch, batch_id, batch_id_sp, rate, loss.item(), valid_loss, avg_batch_time)
#print(log_)
f.write(log_ + '\n')
f.flush()
# valid
valid_loss = valid(valid_loader,model,sets)
if valid_loss < min_loss:
min_loss = valid_loss
model_save_path = '{}/epoch_{}_batch_{}_loss_{}.pth.tar'.format(save_folder, epoch, batch_id, valid_loss)
model_save_dir = os.path.dirname(model_save_path)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
log_ = 'Save checkpoints: epoch = {}, batch_id = {}'.format(epoch, batch_id)
print(log_)
f.write(log_ + '\n')
torch.save({'epoch': epoch,
'batch_id': batch_id,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()},
model_save_path)
print('Finished training')
f.close()
import torch
import torch.nn as nn
import torch.nn.functional as F
class MishFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x * torch.tanh(F.softplus(x)) # x * tanh(ln(1 + exp(x)))
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
sigmoid = torch.sigmoid(x)
tanh_sp = torch.tanh(F.softplus(x))
return grad_output * (tanh_sp + x * sigmoid * (1 - tanh_sp * tanh_sp))
class Mish(nn.Module):
def forward(self, x):
return MishFunction.apply(x)
def to_Mish(model):
for child_name, child in model.named_children():
if isinstance(child, nn.ReLU):
setattr(model, child_name, Mish())
else:
to_Mish(child)
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = 3e-4 * (0.9 ** epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
if __name__ == '__main__':
sets = parse_opts()
sets.no_cuda = False
sets.resume_path = None
sets.pretrain_path = None
sets.model_name = r'prue_3dconv'
sets.save_folder = r'./TReNDs/{}/' \
r'models_{}_{}_{}_fold_{}'.format(sets.model_name, 'resnet',sets.model_depth,sets.resnet_shortcut,sets.fold_index)
if not os.path.exists(sets.save_folder):
os.makedirs(sets.save_folder)
# getting model
torch.manual_seed(sets.manual_seed)
model, parameters = generate_model(sets)
model = model.to(device)
to_Mish(model)
print(model)
print(device)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=3e-4,
betas=(0.9, 0.999),
eps=1e-08)
#model, optimizer = amp.initialize(model, optimizer,
# opt_level='O1',
# verbosity=0
# )
model = torch.nn.DataParallel(model).to(device)
# train from resume
if sets.resume_path:
if os.path.isfile(sets.resume_path):
print("=> loading checkpoint '{}'".format(sets.resume_path))
checkpoint = torch.load(sets.resume_path)
model.load_state_dict(checkpoint['state_dict'])
# getting data
sets.phase = 'train'
if sets.no_cuda:
sets.pin_memory = False
else:
sets.pin_memory = True
train_dataset = TReNDsDataset(mode='train', fold_index=sets.fold_index)
train_loader = DataLoader(train_dataset, batch_size=sets.batch_size,
shuffle=True, num_workers=sets.num_workers,drop_last=True)
valid_dataset = TReNDsDataset(mode='valid', fold_index=sets.fold_index)
valid_loader = DataLoader(valid_dataset, batch_size=sets.batch_size,
shuffle=False, num_workers=sets.num_workers, drop_last=False)
# # training
train(train_loader, valid_loader,model, optimizer,
total_epochs=sets.n_epochs,
save_interval=sets.save_intervals,
save_folder=sets.save_folder, sets=sets)
# # validate
#valid(valid_loader, model, sets)
# test_dataset = TReNDsDataset(mode='test', fold_index=sets.fold_index)
# test_loader = DataLoader(test_dataset, batch_size=sets.batch_size,
# shuffle=False, num_workers=sets.num_workers,
# pin_memory=sets.pin_memory, drop_last=False)
# test(test_loader, model, sets, sets.resume_path.replace('.pth.tar','.npz'))
|
[
"bshz15@gmail.com"
] |
bshz15@gmail.com
|
40704cee49a3949e9dcf543e0695bacb829c017f
|
e885c02621101ea646c9dcc3e934dd7ceaaf4f04
|
/djangocms_disqus/migrations/0001_initial.py
|
7be273f44c0b09ed5f6447a8d57db12cadbb0691
|
[
"BSD-3-Clause"
] |
permissive
|
mishbahr/djangocms-disqus
|
40421d6662ef911542287fc0c2e8b81a63e49667
|
49e75a024e2ca1c932a8b9134500c2f24137a153
|
refs/heads/master
| 2023-01-05T00:46:39.514178
| 2017-05-23T22:15:12
| 2017-05-23T22:15:12
| 42,411,019
| 21
| 5
|
BSD-3-Clause
| 2022-12-26T19:52:38
| 2015-09-13T20:07:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,804
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from connected_accounts.fields import AccountField
from ..conf import settings
class Migration(migrations.Migration):
dependencies = [
('connected_accounts', '__latest__'),
('cms', '__latest__'),
]
operations = [
migrations.CreateModel(
name='Disqus',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('shortname', models.CharField(help_text='Select a website Or register a new one on the Disqus website. https://disqus.com/admin/signup/', max_length=150, verbose_name='Shortname')),
('enable_sso', models.BooleanField(default=False, help_text='Allows users to log in to Disqus via your site.', verbose_name='Enable Single Sign-On')),
('load_event', models.CharField(default=settings.DJANGOCMS_DISQUS_LOADING_CHOICES[0][0], max_length=100, verbose_name='Load Disqus', choices=settings.DJANGOCMS_DISQUS_LOADING_CHOICES)),
('site_name', models.CharField(help_text='Used for the SSO login button.', max_length=100, verbose_name='Site Name', blank=True)),
('button_text', models.CharField(help_text='By default it will be "Load Comments..."', max_length=100, verbose_name='Button Text', blank=True)),
('account', AccountField(verbose_name='Connected Account', to='connected_accounts.Account', provider='disqus', help_text='Select a connected Disqus account or connect to a new account.')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
[
"mishbah@jp74.com"
] |
mishbah@jp74.com
|
4ceb508de96190a7e0a24c04b217aef38ed63e63
|
fb9722f0bf9556f5c04ba5c2795a7c23e7bff7ca
|
/lista.py
|
e6605f71cc764640f8d592c6ae6c6a4b54c215bb
|
[] |
no_license
|
anastasiacebotari15/List
|
d59aad164bf082537bed6f86fb3bba087e1a5e22
|
432dcd0fd6b3b0369b843da71586cd073476d770
|
refs/heads/main
| 2023-02-21T08:54:17.280665
| 2021-01-25T20:04:14
| 2021-01-25T20:04:14
| 332,862,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
x=[-1,0,-5,-7,-6,5,6,7,9,2,-3]
lista1=x
print('lista1=', lista1)
lista2=sorted(x)
print('lista2=', lista2)
x.sort(reverse=True)
lista3=x
print('lista3=', lista3)
print(len(x))
print('nr maxim=', max(x))
print('nr minim=', min(x))
x.extend([111])
print('lista4=', x)
x.insert(1,222)
x.remove(111)
print('lista5=', x)
|
[
"noreply@github.com"
] |
anastasiacebotari15.noreply@github.com
|
84bc89794412d5e88416f0917f873ba361cbb1cd
|
41f28fc3b3c7f34b879bacb2e25157b551c054bb
|
/label_studio/data_manager/functions.py
|
655b82b5cef12f9c8eed602a30de76d3a8b7085e
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dolanor-galaxy/label-studio
|
cd478cb54e4948cbb5226c02e088465cdaa12a6b
|
722358a6cdfbe5a35e7b16f586675df4b598f74f
|
refs/heads/master
| 2023-08-11T08:52:52.433731
| 2021-09-30T09:52:05
| 2021-09-30T09:52:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,278
|
py
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import logging
from collections import OrderedDict
from django.conf import settings
from rest_framework.generics import get_object_or_404
from core.utils.common import int_from_request
from data_manager.prepare_params import PrepareParams
from data_manager.models import View
from tasks.models import Task
TASKS = 'tasks:'
logger = logging.getLogger(__name__)
class DataManagerException(Exception):
pass
def get_all_columns(project, *_):
""" Make columns info for the frontend data manager
"""
result = {'columns': []}
# frontend uses MST data model, so we need two directional referencing parent <-> child
task_data_children = []
i = 0
data_types = OrderedDict()
# add data types from config again
project_data_types = project.data_types
data_types.update(project_data_types.items())
# all data types from import data
all_data_columns = project.summary.all_data_columns
if all_data_columns:
data_types.update({key: 'Unknown' for key in all_data_columns if key not in data_types})
# remove $undefined$ if there is one type at least in labeling config, because it will be resolved automatically
if len(project_data_types) > 0:
data_types.pop(settings.DATA_UNDEFINED_NAME, None)
for key, data_type in list(data_types.items()): # make data types from labeling config first
column = {
'id': key,
'title': key if key != settings.DATA_UNDEFINED_NAME else 'data',
'type': data_type if data_type in ['Image', 'Audio', 'AudioPlus', 'Unknown'] else 'String',
'target': 'tasks',
'parent': 'data',
'visibility_defaults': {
'explore': True,
'labeling': key in project_data_types or key == settings.DATA_UNDEFINED_NAME
}
}
result['columns'].append(column)
task_data_children.append(column['id'])
i += 1
# --- Data root ---
data_root = {
'id': 'data',
'title': "data",
'type': "List",
'target': 'tasks',
'children': task_data_children
}
result['columns'] += [
# --- Tasks ---
{
'id': 'id',
'title': "ID",
'type': 'Number',
'help': 'Task ID',
'target': 'tasks',
'visibility_defaults': {
'explore': True,
'labeling': False
}
},
{
'id': 'completed_at',
'title': 'Completed',
'type': 'Datetime',
'target': 'tasks',
'help': 'Last annotation date',
'visibility_defaults': {
'explore': True,
'labeling': False
}
},
{
'id': 'total_annotations',
'title': 'Annotations',
'type': "Number",
'target': 'tasks',
'help': 'Total annotations per task',
'visibility_defaults': {
'explore': True,
'labeling': True
}
},
{
'id': 'cancelled_annotations',
'title': "Cancelled",
'type': "Number",
'target': 'tasks',
'help': 'Total cancelled (skipped) annotations',
'visibility_defaults': {
'explore': True,
'labeling': False
}
},
{
'id': 'total_predictions',
'title': "Predictions",
'type': "Number",
'target': 'tasks',
'help': 'Total predictions per task',
'visibility_defaults': {
'explore': True,
'labeling': False
}
},
{
'id': 'annotators',
'title': 'Annotated by',
'type': 'List',
'target': 'tasks',
'help': 'All users who completed the task',
'schema': {'items': project.organization.members.values_list('user__id', flat=True)},
'visibility_defaults': {
'explore': True,
'labeling': False
}
},
{
'id': 'annotations_results',
'title': "Annotation results",
'type': "String",
'target': 'tasks',
'help': 'Annotation results stacked over all annotations',
'visibility_defaults': {
'explore': False,
'labeling': False
}
},
{
'id': 'annotations_ids',
'title': "Annotation IDs",
'type': "String",
'target': 'tasks',
'help': 'Annotation IDs stacked over all annotations',
'visibility_defaults': {
'explore': False,
'labeling': False
}
},
{
'id': 'predictions_score',
'title': "Prediction score",
'type': "Number",
'target': 'tasks',
'help': 'Average prediction score over all task predictions',
'visibility_defaults': {
'explore': False,
'labeling': False
}
},
{
'id': 'predictions_results',
'title': "Prediction results",
'type': "String",
'target': 'tasks',
'help': 'Prediction results stacked over all predictions',
'visibility_defaults': {
'explore': False,
'labeling': False
}
},
{
'id': 'file_upload',
'title': "Source filename",
'type': "String",
'target': 'tasks',
'help': 'Source filename from import step',
'visibility_defaults': {
'explore': False,
'labeling': False
}
},
{
'id': 'created_at',
'title': 'Created at',
'type': 'Datetime',
'target': 'tasks',
'help': 'Task creation time',
'visibility_defaults': {
'explore': False,
'labeling': False
}
}
]
result['columns'].append(data_root)
return result
def get_prepare_params(request, project):
# use filters and selected items from view
view_id = int_from_request(request.GET, 'view_id', 0)
if view_id > 0:
view = get_object_or_404(request, View, pk=view_id)
if view.project.pk != project.pk:
raise DataManagerException('Project and View mismatch')
prepare_params = view.get_prepare_tasks_params(add_selected_items=True)
# use filters and selected items from request if it's specified
else:
selected = request.data.get('selectedItems', {"all": True, "excluded": []})
if not isinstance(selected, dict):
raise DataManagerException('selectedItems must be dict: {"all": [true|false], '
'"excluded | included": [...task_ids...]}')
filters = request.data.get('filters', None)
ordering = request.data.get('ordering', [])
prepare_params = PrepareParams(project=project.id, selectedItems=selected, data=request.data,
filters=filters, ordering=ordering)
return prepare_params
def get_prepared_queryset(request, project):
prepare_params = get_prepare_params(request, project)
queryset = Task.prepared.only_filtered(prepare_params=prepare_params)
return queryset
def evaluate_predictions(tasks):
""" Call ML backend for prediction evaluation of the task queryset
"""
if not tasks:
return
project = tasks[0].project
for ml_backend in project.ml_backends.all():
# tasks = tasks.filter(~Q(predictions__model_version=ml_backend.model_version))
ml_backend.predict_many_tasks(tasks)
def filters_ordering_selected_items_exist(data):
return data.get('filters') or data.get('ordering') or data.get('selectedItems')
|
[
"noreply@github.com"
] |
dolanor-galaxy.noreply@github.com
|
c7571fd6f80cb52e31b43fa0fa9746d3faafb0c1
|
de817cc84baa1ca5cef3ceaff56dc235b00073d9
|
/dokdo.py
|
dfbc8aad66a041c9a7e72135de16c2b1fb75b035
|
[
"MIT"
] |
permissive
|
song9446/Dokdo-HTML-template-compiler-python3
|
32023dd38f57b091a6d4a8288e07ddb8663c892e
|
2d26aa7d84c0c7606ae5140126691d6f1a6e930e
|
refs/heads/master
| 2020-04-09T01:31:23.923621
| 2018-12-01T06:36:39
| 2018-12-01T06:36:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,558
|
py
|
#!/usr/bin/python3
from string import Template
import lxml.html
from lxml import etree
import copy
import re
import os
import sass
VERSION = "0.1"
variable_pattern = re.compile("\{\{\{([^}]+)\}\}\}")
def dom2str(element):
return lxml.html.tostring(element, encoding=str)
def dom2innerstr(element):
text = lxml.html.tostring(element, encoding=str)
return text[text.find(">")+1:text.rfind("<")]
def replace(text, rule, replacer):
matches = [(match.start(), match.end(), match.groups()[0].strip()) for match in re.finditer(rule, text)]
matches.reverse()
characters = list(text)
for start, end, variable in matches:
characters[start:end] = replacer(variable)
return "".join(characters)
def compile(path, variables={}, innerhtmls=[], isroot=True, statics={}):
# 1. build tree
with open(path) as f:
text = f.read()
# 1.1. replace variable
replace(text, variable_pattern, lambda x: variables[x])
if text.strip().startswith("<!DOCTYPE") or text.strip().startswith("<html"):
roots = (lxml.html.fromstring(text),)
else:
roots = lxml.html.fragments_fromstring(text)
# 2. substract styles & statics
styles = [root for root in roots if root.tag == "style"] + \
[style.drop_tree() or style for root in roots for style in root.xpath(".//style")]
for style in styles:
if style.get("type") is "text/scss": style.text = sass.compile(string=style.text)
poststatics = [root for root in roots if root.tag == "static" and "post" in root.attrib] + \
[static.drop_tree() or static for root in roots for static in root.xpath(".//static") if "post" in static.attrib]
prestatics = [root for root in roots if root.tag == "static" and "pre" in root.attrib] + \
[static.drop_tree() or static for root in roots for static in root.xpath(".//static") if "pre" in static.attrib]
roots = list(filter(lambda x: x.tag not in ("style", "static"), roots))
if path not in statics: statics[path] = (styles, poststatics, prestatics)
# 3. replace imports
for imp in (imp for root in roots for imp in root.xpath("//import")):
ipath = os.path.join(os.path.dirname(path), imp.get("path"))
importing_roots = compile(ipath, variables=imp.attrib, innerhtmls=imp, isroot=False, statics=statics)
if len(importing_roots) == 1:
importing_roots[0].attrib.update(imp.attrib)
if imp in roots:
imp_index = roots.index(imp)
roots = list(filter(lambda x: x!=imp, roots))
for i, root in enumerate(importing_roots):
roots.insert(imp_index + i, root)
else:
imp_parent = imp.getparent()
imp_index = imp_parent.index(imp)
imp.drop_tree()
for i, root in enumerate(importing_roots):
imp_parent.insert(imp_index + i, root)
# 4. replace innerhtmls
innerhtml_map = {innerhtml.get("id", i):innerhtml for i, innerhtml in enumerate(innerhtmls)}
target_innerhtmls = [innerhtml for root in roots for innerhtml in root.xpath(".//innerhtml")]
for i, target_innerhtml in enumerate(target_innerhtmls):
id_ = target_innerhtml.get("id", i)
if id_ in innerhtml_map:
innerhtml_map[id_].attrib.update(target_innerhtml.attrib)
target_innerhtml.getparent().replace(target_innerhtml, innerhtml_map[id_])
else: target_innerhtml.drop_tree()
# 5. if this is a root: put statics and return string
if isroot:
head = roots[0].xpath("//head")[0]
body = roots[0].xpath("//body")[0]
etree.SubElement(head, "style").text = "".join((sass.compile(string=dom2innerstr(style)) if style.get("type", "text/css") == "text/scss" else dom2innerstr(style)) \
for i in statics for style in statics[i][0])
for i in statics:
for poststatic in statics[i][1]: body.append(poststatic)
for prestatic in statics[i][2]: head.append(prestatic)
return "".join(dom2str(root) for root in roots)
else: return roots
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] filename",
version="%prog {}".format(VERSION))
parser.add_option("-c", "--src", dest="source",
help="source html path", metavar="SRC")
parser.add_option("-o", "--out",
action="store_false", dest="out", default="a.html",
help="destination of output", metavar="OUT")
parser.add_option("-C", "--srcdir", dest="sourcedir",
help="source dir path(it filters html files automatically)", default="src", metavar="SRCDIR")
parser.add_option("-O", "--outdir", dest="outdir", default="build",
help="out dir path", metavar="OUTDIR")
(option, tags) = parser.parse_args()
if tags:
print(compile(tags[0]))
else:
if option.source:
with open(option.out, "w") as f:
f.write(compile(tags[0]))
elif option.sourcedir:
compilables = [os.path.join(d, f) for (d, _, fs) in os.walk(option.sourcedir) for f in fs if f.endswith(".html")]
if not os.path.exists(option.outdir):
os.makedirs(option.outdir)
for source in compilables:
with open(os.path.join(option.outdir, os.path.basename(source)), "w") as f:
f.write(compile(source))
|
[
"song9446@unist.ac.kr"
] |
song9446@unist.ac.kr
|
cf5801421a18d07d16150302598a4db9e9f4d683
|
a4ed795e6aa22791a3c8f6ce931cd5ce0d8ed705
|
/testproj/settings.py
|
fa9ffeeec840bbfa1314cef360baa6cc481ffd02
|
[] |
no_license
|
ali88z/dj2020
|
28fae534079e9a38f3fc143449fff059b1642374
|
e674961c1671be450a8fc28f78396ecf0fa217fc
|
refs/heads/master
| 2022-11-29T16:47:50.080800
| 2020-07-29T14:52:42
| 2020-07-29T14:52:42
| 274,812,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,430
|
py
|
"""
Django settings for testproj project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vn0by4#ck#3fj-qlm46f!kfpr61t#3wtt(b$5o=zqn9^dicb4_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#ALLOWED_HOSTS = ['192.168.20.128','192.168.74.130','192.168.1.88']
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testModel',
'app01',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testproj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR+'/templates', ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'libraries': {'mytags': 'testproj.templatetag.mytags'},
},
},
]
WSGI_APPLICATION = 'testproj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'runoob',
'HOST': '127.0.0.1',
'PORT': 3306,
'USER': 'django',
'PASSWORD': '123456',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "statics"),
]
|
[
"zjw@mails.com"
] |
zjw@mails.com
|
7ced0a5bfb9b3e5397190462506fd668a94e38af
|
a4185782266d2e596ff264af76776b82f9a3adf8
|
/2015/17_1.py
|
9c1440f546923ffc04173e18138eb3d52c77bae3
|
[] |
no_license
|
PavithraP/advent
|
04f2cfc268e3b8c84ac26dbb9bf300036a7502e3
|
9d9247c3add95263f4db1982d1f96d9f8e8e35ca
|
refs/heads/master
| 2021-01-10T16:02:47.754326
| 2016-12-14T13:50:27
| 2016-12-14T13:50:27
| 47,602,508
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
import math
cont = [11,30,47,31,32,36,3,1,5,3,32,36,15,11,46,26,28,1,19,3]
no = 0
for i in range(int(math.pow(2,20))):
num = i
count = 0
val = 0
while(num > 0):
if num%2 == 1:
val += cont[count]
num = num / 2
count += 1
if val == 150:
no+= 1
print no
|
[
"pavithra.p@sanctumnetworks.com"
] |
pavithra.p@sanctumnetworks.com
|
ee235f82c46f75248d18f091913758a6b068b1f9
|
87b2725ccb7509cda0d4f719647192c34bbf7471
|
/HistogramPlot.py
|
e5c1ce1adf7878de50ebd4567ee1dabb94e7efd0
|
[] |
no_license
|
sumeyyeakay/CoronaVirusDataAnalysis
|
f88a5c9698cd6867059a91b5750f4bd14f414d62
|
45f4b386b95ed2143d96940e74bdc41854cba466
|
refs/heads/master
| 2022-09-09T02:19:35.034587
| 2020-06-01T15:17:18
| 2020-06-01T15:17:18
| 268,553,637
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 17:44:03 2020
@author: sumeyyeakay
Histogram grafikleri
"""
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv("covid_19_data.csv")
turkiye = df[df["Country/Region"] == "Turkey"]
italya = df[df["Country/Region"] == "Italy"]
ispanya = df[df["Country/Region"] == "Spain"]
plt.hist(italya.Deaths,bins=10)
plt.xlabel("Olum Sayisi")
plt.ylabel(" Kurtulan Hasta Sayisi")
plt.title("Italya Coronovirus Analizi")
plt.show()
|
[
"sumeyyeakayy@gmail.com"
] |
sumeyyeakayy@gmail.com
|
3a79fc6c3eb34308f2013497b29f90ad59a89e7b
|
fc85a54686e13e598541df14c472e8aa744e6713
|
/petisco/extra/sqlalchemy/sql/mysql/mysql_connection.py
|
ccf69974f1b0fbfe9c880d72c61912564fc1f72c
|
[
"MIT"
] |
permissive
|
alice-biometrics/petisco
|
63721751cd43e70825b161a5ece535c80d95b6fa
|
771ebe5c69dc735b8f373c2e7303d3b4eb655044
|
refs/heads/main
| 2023-09-01T03:53:23.642042
| 2023-08-25T05:38:42
| 2023-08-25T05:38:42
| 217,555,512
| 42
| 2
|
MIT
| 2023-09-12T11:06:43
| 2019-10-25T14:48:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,700
|
py
|
import os
MYSQL_DATABASE_DEFAULT = "mysql_test"
class MySqlConnection:
def __init__(
self,
server_name: str,
driver: str,
user: str,
password: str,
host: str,
port: str,
database_name: str,
url: str,
):
self.server_name = server_name
self.driver = driver
self.user = user
self.password = password
self.host = host
self.port = port
self.database_name = database_name
self.url = url
@staticmethod
def create(
server_name: str = "mysql",
driver: str = "pymysql",
user: str = "root",
password: str = "root",
host: str = "mysql",
port: str = "3306",
database_name: str = MYSQL_DATABASE_DEFAULT,
) -> "MySqlConnection":
url = (
f"{server_name}+{driver}://{user}:{password}@{host}:{port}/{database_name}"
)
return MySqlConnection(
server_name, driver, user, password, host, port, database_name, url
)
@staticmethod
def create_local(database_name: str = MYSQL_DATABASE_DEFAULT) -> "MySqlConnection":
return MySqlConnection.create(
host="localhost", port="3307", database_name=database_name
)
@staticmethod
def from_environ() -> "MySqlConnection":
return MySqlConnection.create(
"mysql",
"pymysql",
os.getenv("MYSQL_USER", "root"),
os.getenv("MYSQL_PASSWORD", "root"),
os.getenv("MYSQL_HOST", "mysql"),
os.getenv("MYSQL_PORT", "3306"),
os.getenv("MYSQL_DATABASE", MYSQL_DATABASE_DEFAULT),
)
|
[
"noreply@github.com"
] |
alice-biometrics.noreply@github.com
|
a3111a79779a9ea0cab3118b5d7b33943dbded16
|
98fe6781483ec7ff2a8016916edb2611d5c2e64c
|
/other/text_analysis_report.py
|
9a852707872523ccce57b5824953e76709b213d4
|
[] |
no_license
|
catris25/review_rating_prediction
|
124262d3baed594d812cb1459c3b95cb6a718312
|
fc296a58e39943d2021263e456dbfdd8b972308a
|
refs/heads/master
| 2021-01-16T17:49:47.367954
| 2018-08-14T05:35:44
| 2018-08-14T05:35:44
| 100,015,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,208
|
py
|
import numpy as np
import pandas as pd
import re, math
from collections import Counter
import matplotlib.pyplot as plt
from nltk.tokenize import sent_tokenize, word_tokenize
# from nltk.tokenize import RegexpTokenizer, PunktSentenceTokenizer, TweetTokenizer
# REMOVE ALL PUNCTUATIONS AND THEN TOKENIZE THE TEXT
def tokenize_df(df):
df_token = []
for review in df['reviewText']:
temp = review
sent_length = len(sent_tokenize(temp))
temp = re.sub("[^a-zA-Z']", " ", str(review))
temp = temp.replace("'", "")
temp = temp.lower()
word_length = len(word_tokenize(temp))
df_token.append({'reviewText': temp, 'word':word_length, 'sentence':sent_length})
df_token = pd.DataFrame(df_token)
return df_token
input_file='/home/lia/Documents/the_project/dataset/to_use/current/top_30.csv'
# input_file = '/home/lia/Documents/the_project/dataset/to_use/helpfulness/samples/30percent/6.csv'
df = pd.read_csv(input_file)
new_df = tokenize_df(df)
print(new_df.describe())
print(new_df.head(10))
# data = new_df['word']
#
# plt.hist(data, bins=200)
# plt.show()
# def outliers_z_score(ys):
# threshold = 3
#
# mean_y = np.mean(ys)
# stdev_y = np.std(ys)
# z_scores = [(y - mean_y) / stdev_y for y in ys]
# return np.where(np.abs(z_scores) > threshold)
#
# oz = outliers_z_score(data)
# print(oz)
# print('Number of words {}'.format (Counter(new_df['word'])))
# print('Number of sentences {}'.format (Counter(new_df['sentence'])))
# labels, values = zip(*Counter(data).items())
#
# indexes = np.arange(len(labels))
# width = 1
#
# plt.bar(indexes, values, width)
# plt.xticks(indexes + width * 0.5, labels,rotation = "vertical")
# plt.show()
# for w in new_df['word']:
# if w<=10:
# print(w)
too_long = df.loc[new_df['word'] >= 1000, 'reviewText']
too_short = df.loc[new_df['word'] <= 10, 'reviewText']
print('too long:', len(too_long))
print('too short:', len(too_short))
df['word'] = new_df['word']
del_id = too_long.index.append(too_short.index)
temp_df = df.drop(df.index[[del_id]])
print(temp_df.head(10))
#
# temp_df.to_csv('/home/lia/Documents/the_project/dataset/top_10_movies/top_10_clean.csv')
|
[
"blue.star95@outlook.com"
] |
blue.star95@outlook.com
|
0a231f8117213d6f61ad97b649f38245442e0a0c
|
afd3464dd2c290b7db5fe379d4374183ea6bd0c3
|
/catkin_ws/build/pick_objects/catkin_generated/pkg.develspace.context.pc.py
|
fd44bd91cf37e798cac9f2a7cf2459aba475bc25
|
[
"MIT"
] |
permissive
|
thatting/thomas-hatting-home-service-robot
|
7d0750367e5b5bfa48ab697a8fd7796b1338a662
|
481490eec2d61303e694593f8f018858c82eaac3
|
refs/heads/master
| 2020-03-22T18:27:40.537755
| 2019-02-14T13:13:14
| 2019-02-14T13:13:14
| 140,461,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pick_objects"
PROJECT_SPACE_DIR = "/home/nvidia/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"thomashatting@gmail.com"
] |
thomashatting@gmail.com
|
5e92281f35cff75f5d8fd68958f6faad390bb658
|
1711a28e01e40c0164be23536ff109c428f3dd8c
|
/SUMO_compound_mdtraj_analysis.py
|
6d5a65145a08e70043aae6c8b2f867f060261593
|
[] |
no_license
|
sunhuaiyu/mdtraj
|
adafd4b4408b688f23fed659e8fbaefd4ff1bd42
|
d626841025e9f9411e988cee6631edcbf171499d
|
refs/heads/master
| 2020-05-07T20:28:33.381621
| 2019-05-02T00:00:02
| 2019-05-02T00:00:02
| 180,862,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,277
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import mdtraj as md
from glob import glob
from multiprocessing import Pool
def SUMO_ligand_dist(tr):
#coordinates for the Cgamma of SUMO1_F36, SUMO2_F31, or SUMO3_F31:
select_str = '(resname==PHE and (resid==15 or resid==30 or resid==17)) and (name==CG)'
atom_ix = tr.topology.select(select_str)[0]
a = tr.xyz[:, atom_ix]
# ligand all atom coordinatess:
lig = tr.atom_slice(tr.topology.select('chainid==1'))
# ligand center of mass:
b = md.compute_center_of_mass(lig)
# distance between K37/K32_CA and ligand center of mass:
return (((a - b) ** 2).sum(1)) ** 0.5
# read trajectory file in HDF5 format (*.h5), compute SUMO_ligand_dist
def name2traj(file_name):
tr = md.load(file_name)
if tr.n_frames > 10000:
tr = tr[::10]
return tr
# given trajectory file name in HDF5 format, plot SUMO_ligand_dist
def plot_dist(traj_name):
plt.plot(SUMO_ligand_dist(name2traj(traj_name)), linewidth=1)
plt.ylim(0, 4.5)
title = traj_name.split('.')[0]
plt.title(title)
plt.savefig(title + '.jpg', dpi=600)
plt.close()
# calculate fraction of frames where the distance is less than a cut-off
compound = ['PHG00686', 'SEW05414', 'HTS12268', 'BTB13496']
compound2traj_name = {i: glob('SUMO1_2uyz_{}_F*_5000ns.h5'.format(i)) for i in compound}
traj_files = sum(list(compound2traj_name.values()))
# traj_dict contains all loaded trajectories
# dist_dict contains all calculated distances;
# accelerate calculation with multiprocessing
def D(file_name):
tr = name2traj(file_name)
d = SUMO_ligand_dist(tr)
return [tr, d]
DD = Pool(48).map(D, traj_files)
traj_dict = {i[0]:i[1][0] for i in zip(traj_files, DD)}
dist_dict = {i[0]:i[1][1] for i in zip(traj_files, DD)}
# distance (nm) threshold
T = 0.7
# calculate the fraction of trajectories with compound at SIM-binding site
for cp in compound:
all_dist = np.array([dist_dict[i] for i in compound2traj_name[cp]]).ravel()
bound_frames, total_frames = sum(all_dist < T), len(all_dist)
fraction = bound_frames/total_frames
print(cp, round(fraction, 3), total_frames//1000)
# plotting: stack all distance plot together for each compound
for cp in compound:
n = len(compound2traj_name[cp])
fig, axs = plt.subplots(nrows=n, ncols=1, sharex=True)
fig.set_figheight(n)
fig.set_figwidth(4)
axs[0].set_title(cp)
for i in np.arange(n):
dc = dist_dict['SUMO1_2uyz_{0}_F{1}_5000ns.h5'.format(cp, i+1)]
bound = dc < T
unbound = np.invert(bound)
length = dc.shape[0]
axs[i].plot(np.arange(length)[unbound], dc[unbound],
'C1.', markersize=0.5, alpha=0.6)
axs[i].plot(np.arange(length)[bound], dc[bound],
'C0.', markersize=0.5, alpha=0.6)
axs[i].set_ylim(0, 4.5)
fig.subplots_adjust(hspace=0)
fig.savefig('SUMO1_2uyz_{}_dist_all_traj.jpg'.format(cp),
dpi=600, bbox_inches='tight')
# extract a centroid frame from each traj ending with significant binding;
# for each compound, superpose all centroids along the SIM-binding pocket
# and save as one pdb file
centroids = {cp:[] for cp in compound}
for cp in compound:
n = len(compound2traj_name[cp])
for i in np.arange(n):
file_name = 'SUMO1_2uyz_{0}_F{1}_5000ns.h5'.format(cp, i+1)
dc = dist_dict[file_name]
bound = dc < T
if sum(bound) > 1000:
tr = traj_dict[file_name][bound]
protein_atoms = tr.topology.select('residue 32 to 56')
compound_atoms = tr.topology.select('chainid==1')
atoms_ix = np.concatenate((protein_atoms, compound_atoms))
tr.superpose(tr, frame=0, atom_indices=atoms_ix)
m = np.empty((tr.n_frames, tr.n_frames)) # rmsd matrix
for i in range(tr.n_frames):
m[i] = md.rmsd(tr, tr, i, atom_indices=atoms_ix)
#compute the centroid frame: the one closest to mean frame
centroid_ix = np.exp(-m/m.std()).sum(1).argmax()
centroids[cp].append(tr[centroid_ix])
print(file_name)
centroids_tr = md.join(centroids[cp])
centroids_tr.superpose(centroids_tr, frame=0, atom_indices=protein_atoms)
centroids_tr.save_pdb('SUMO1_2uyz_{}_bound_centroids.pdb'.format(cp))
# compute RMSD among bound_centroids
from scipy.spatial.distance import squareform
for cp in compound:
tr = md.load('SUMO1_2uyz_{}_bound_centroids.pdb'.format(cp))
m = array([md.rmsd(tr, tr, i, atom_indices=protein_atoms) for i in range(len(tr))])
m = squareform(m, checks=False)
print(cp, min(m), max(m))
# compute atomic distances
T = 0.7
tr2uyz = md.join([md.load('SUMO1_2uyz_{}_400ns.h5'.format(i+1)) for i in range(12)])
cp = 'PHG00686'
d = [dist_dict['SUMO1_2uyz_{0}_F{1}_5000ns.h5'.format(cp, i+1)] for i in range(12)]
tr1cp = md.join([traj_dict['SUMO1_2uyz_{0}_F{1}_5000ns.h5'.format(cp, i+1)][d[i] < T] for i in range(12)])
def atom_pair_dist3(cp, pair='F36CG_R54CZ'):
top = tr2uyz[0].topology
s = pair.split('_')
pair_ix = top.select_pairs('residue=={0} and name=={1}'.format(s[0][1:3], s[0][3:]),
'residue=={0} and name=={1}'.format(s[1][1:3], s[1][3:]))
dist2uyz = md.compute_distances(tr2uyz, atom_pairs=pair_ix, periodic=False)
dist1cp = md.compute_distances(tr1cp, atom_pairs=pair_ix, periodic=False)
fig = plt.figure(figsize=(10, 4.8))
gs = GridSpec(1, 2, width_ratios=[2, 1])
ax0, ax1 = plt.subplot(gs[0]), plt.subplot(gs[1])
ax0.plot(dist2uyz, 'C1.', markersize=1)
ax0.plot(dist1cp, 'C0.', markersize=1, alpha=0.5)
ax0.tick_params(labelsize=15)
ax1.hist(dist2uyz, color='C1', bins=100, linewidth=1,
orientation='horizontal')
ax1.hist(dist1cp, color='C0', alpha=0.6, bins=100, linewidth=1,
orientation='horizontal')
ax1.tick_params(labelsize=15)
ax1.legend(['no compound', 'with {}'.format(cp)], fontsize=15, frameon=0)
fig.tight_layout()
fig.savefig('SUMO1_2uyz_{0}_dist_{1}.jpg'.format(cp, pair), dpi=600)
|
[
"noreply@github.com"
] |
sunhuaiyu.noreply@github.com
|
47b910274ca6546bd96488e2c3027896b833a188
|
7abd8bbbba8f401c4ce9d9ec550a0cae4a6f19ed
|
/bingads/v12/bulk/entities/__init__.py
|
afc5d3d8bf175347a50c466420cd874f00447f89
|
[
"MIT"
] |
permissive
|
stevenblanton/BingAds-Python-SDK
|
fd2f119db51e1a91962aa5ee4bb86344e58078a8
|
5b6e6499ae1dcc6fb8ba3032ad1a2b6ee63705c9
|
refs/heads/master
| 2020-09-05T12:11:04.168580
| 2019-11-01T15:49:08
| 2019-11-01T15:49:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
__author__ = 'Bing Ads SDK Team'
__email__ = 'bing_ads_sdk@microsoft.com'
from .common import *
from .bulk_error import *
from .bulk_entity import *
from .bid_suggestion_data import *
from .unknown_bulk_entity import *
from .bulk_account import *
from .bulk_budget import *
from .bulk_campaign import *
from .bulk_ad_group import *
from .bulk_keyword import *
from .bulk_campaign_product_scope import *
from .bulk_ad_group_product_partition import *
from .bulk_campaign_negative_dynamic_search_ad_target import *
from .bulk_ad_group_dynamic_search_ad_target import *
from .bulk_ad_group_negative_dynamic_search_ad_target import *
from .ad_extensions import *
from .bulk_ads import *
from .bulk_negative_keywords import *
from .bulk_negative_sites import *
from .audiences import *
from .target_criterions import *
from .labels import *
from .bulk_offline_conversion import *
from .bulk_experiment import *
|
[
"qitia@microsoft.com"
] |
qitia@microsoft.com
|
61a49f9ce140730c3fb6b664ca5ac5bc8085cfb0
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/types/media_file_service.py
|
d18d6a8d09b03c92f8310398e3c6a6a1be1ac137
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,355
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import response_content_type as gage_response_content_type
from google.ads.googleads.v6.resources.types import media_file as gagr_media_file
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.services',
marshal='google.ads.googleads.v6',
manifest={
'GetMediaFileRequest',
'MutateMediaFilesRequest',
'MediaFileOperation',
'MutateMediaFilesResponse',
'MutateMediaFileResult',
},
)
class GetMediaFileRequest(proto.Message):
r"""Request message for
[MediaFileService.GetMediaFile][google.ads.googleads.v6.services.MediaFileService.GetMediaFile]
Attributes:
resource_name (str):
Required. The resource name of the media file
to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateMediaFilesRequest(proto.Message):
r"""Request message for
[MediaFileService.MutateMediaFiles][google.ads.googleads.v6.services.MediaFileService.MutateMediaFiles]
Attributes:
customer_id (str):
Required. The ID of the customer whose media
files are being modified.
operations (Sequence[google.ads.googleads.v6.services.types.MediaFileOperation]):
Required. The list of operations to perform
on individual media file.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v6.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MediaFileOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class MediaFileOperation(proto.Message):
r"""A single operation to create media file.
Attributes:
create (google.ads.googleads.v6.resources.types.MediaFile):
Create operation: No resource name is
expected for the new media file.
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=gagr_media_file.MediaFile,
)
class MutateMediaFilesResponse(proto.Message):
r"""Response message for a media file mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v6.services.types.MutateMediaFileResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateMediaFileResult',
)
class MutateMediaFileResult(proto.Message):
r"""The result for the media file mutate.
Attributes:
resource_name (str):
The resource name returned for successful
operations.
media_file (google.ads.googleads.v6.resources.types.MediaFile):
The mutated media file with only mutable fields after
mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
media_file = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_media_file.MediaFile,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
4f56cee030454bf7d814b2615a38c73539bcce37
|
d186f9763a16cddc161568728827636a8b68f2f2
|
/src/grpc_service/service_pb2_grpc.py
|
37cda993f81dc828c5dfc5ef4100daddd986874b
|
[] |
no_license
|
xvicmanx/machine-learning
|
12fce38a70b88132d633f8956435d72fc3fee050
|
8389125e8a0f41c3c803bdfa94f5483ab30897d1
|
refs/heads/main
| 2023-02-11T19:35:43.298423
| 2021-01-06T12:59:29
| 2021-01-06T12:59:29
| 308,706,331
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,434
|
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import service_pb2 as service__pb2
class MachineLearningStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.PredictSalary = channel.unary_unary(
'/machine_learning.MachineLearning/PredictSalary',
request_serializer=service__pb2.PredictSalaryRequest.SerializeToString,
response_deserializer=service__pb2.PredictSalaryResponse.FromString,
)
self.PredictPurchase = channel.unary_unary(
'/machine_learning.MachineLearning/PredictPurchase',
request_serializer=service__pb2.PredictPurchaseRequest.SerializeToString,
response_deserializer=service__pb2.PredictPurchaseResponse.FromString,
)
self.PredictSegment = channel.unary_unary(
'/machine_learning.MachineLearning/PredictSegment',
request_serializer=service__pb2.PredictSegmentRequest.SerializeToString,
response_deserializer=service__pb2.PredictSegmentResponse.FromString,
)
self.GetOptimalCampaignAdOption = channel.unary_unary(
'/machine_learning.MachineLearning/GetOptimalCampaignAdOption',
request_serializer=service__pb2.GetOptimalCampaignAdOptionRequest.SerializeToString,
response_deserializer=service__pb2.GetOptimalCampaignAdOptionResponse.FromString,
)
self.PredictReviewOutcome = channel.unary_unary(
'/machine_learning.MachineLearning/PredictReviewOutcome',
request_serializer=service__pb2.PredictReviewOutcomeRequest.SerializeToString,
response_deserializer=service__pb2.PredictReviewOutcomeResponse.FromString,
)
self.PredictBankLeaving = channel.unary_unary(
'/machine_learning.MachineLearning/PredictBankLeaving',
request_serializer=service__pb2.PredictBankLeavingRequest.SerializeToString,
response_deserializer=service__pb2.PredictBankLeavingResponse.FromString,
)
self.PredictCatOrDog = channel.unary_unary(
'/machine_learning.MachineLearning/PredictCatOrDog',
request_serializer=service__pb2.PredictCatOrDogRequest.SerializeToString,
response_deserializer=service__pb2.PredictCatOrDogResponse.FromString,
)
class MachineLearningServicer(object):
"""Missing associated documentation comment in .proto file."""
def PredictSalary(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PredictPurchase(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PredictSegment(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOptimalCampaignAdOption(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PredictReviewOutcome(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PredictBankLeaving(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PredictCatOrDog(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MachineLearningServicer_to_server(servicer, server):
rpc_method_handlers = {
'PredictSalary': grpc.unary_unary_rpc_method_handler(
servicer.PredictSalary,
request_deserializer=service__pb2.PredictSalaryRequest.FromString,
response_serializer=service__pb2.PredictSalaryResponse.SerializeToString,
),
'PredictPurchase': grpc.unary_unary_rpc_method_handler(
servicer.PredictPurchase,
request_deserializer=service__pb2.PredictPurchaseRequest.FromString,
response_serializer=service__pb2.PredictPurchaseResponse.SerializeToString,
),
'PredictSegment': grpc.unary_unary_rpc_method_handler(
servicer.PredictSegment,
request_deserializer=service__pb2.PredictSegmentRequest.FromString,
response_serializer=service__pb2.PredictSegmentResponse.SerializeToString,
),
'GetOptimalCampaignAdOption': grpc.unary_unary_rpc_method_handler(
servicer.GetOptimalCampaignAdOption,
request_deserializer=service__pb2.GetOptimalCampaignAdOptionRequest.FromString,
response_serializer=service__pb2.GetOptimalCampaignAdOptionResponse.SerializeToString,
),
'PredictReviewOutcome': grpc.unary_unary_rpc_method_handler(
servicer.PredictReviewOutcome,
request_deserializer=service__pb2.PredictReviewOutcomeRequest.FromString,
response_serializer=service__pb2.PredictReviewOutcomeResponse.SerializeToString,
),
'PredictBankLeaving': grpc.unary_unary_rpc_method_handler(
servicer.PredictBankLeaving,
request_deserializer=service__pb2.PredictBankLeavingRequest.FromString,
response_serializer=service__pb2.PredictBankLeavingResponse.SerializeToString,
),
'PredictCatOrDog': grpc.unary_unary_rpc_method_handler(
servicer.PredictCatOrDog,
request_deserializer=service__pb2.PredictCatOrDogRequest.FromString,
response_serializer=service__pb2.PredictCatOrDogResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'machine_learning.MachineLearning', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MachineLearning(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def PredictSalary(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictSalary',
service__pb2.PredictSalaryRequest.SerializeToString,
service__pb2.PredictSalaryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PredictPurchase(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictPurchase',
service__pb2.PredictPurchaseRequest.SerializeToString,
service__pb2.PredictPurchaseResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PredictSegment(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictSegment',
service__pb2.PredictSegmentRequest.SerializeToString,
service__pb2.PredictSegmentResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetOptimalCampaignAdOption(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/GetOptimalCampaignAdOption',
service__pb2.GetOptimalCampaignAdOptionRequest.SerializeToString,
service__pb2.GetOptimalCampaignAdOptionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PredictReviewOutcome(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictReviewOutcome',
service__pb2.PredictReviewOutcomeRequest.SerializeToString,
service__pb2.PredictReviewOutcomeResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PredictBankLeaving(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictBankLeaving',
service__pb2.PredictBankLeavingRequest.SerializeToString,
service__pb2.PredictBankLeavingResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PredictCatOrDog(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictCatOrDog',
service__pb2.PredictCatOrDogRequest.SerializeToString,
service__pb2.PredictCatOrDogResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
[
"vic3jo@gmail.com"
] |
vic3jo@gmail.com
|
b8c045ccf9fbfd0be6b2357b5c866a6f5f8c45fb
|
1426511b59ad3e00a3e037ba3377e41828ae4680
|
/ca_unemp/serializers.py
|
eab56c3ab66c86dedab623a052c2279bdcf95514
|
[] |
no_license
|
hillarykhan/ca-unemp-api
|
4776ed104a026c2d39c44dbbfca60d27f57c50a4
|
7b27c4aebdfe72bb0282fc28abb60ede9e6f0813
|
refs/heads/main
| 2023-08-24T00:58:15.603062
| 2021-10-27T04:41:13
| 2021-10-27T04:41:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
from rest_framework import serializers
from .models import Unemployment
class StatSerializer(serializers.ModelSerializer):
class Meta:
model = Unemployment
fields = '__all__'
|
[
"khan.hillary@gmail.com"
] |
khan.hillary@gmail.com
|
d827d71d9c05c7c9a359841ae13e780b7c1620e1
|
0e0bd9d0082bf71918db9f6c92c2cefd32fd23bd
|
/guild/commands/runs_import.py
|
354c23dc47578e9820036cf0779f49107bcd69fb
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
christabella/guildai
|
b911d9758296503c431b571dc4696a3690f44b3d
|
10d34eb9aa02aa4a374c340e75b5d44d9f3d8a25
|
refs/heads/master
| 2022-12-17T18:34:45.766299
| 2020-08-31T12:42:25
| 2020-08-31T12:42:25
| 294,189,964
| 0
| 0
|
Apache-2.0
| 2020-09-09T18:02:13
| 2020-09-09T18:02:12
| null |
UTF-8
|
Python
| false
| false
| 2,500
|
py
|
# Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
from . import runs_support
def _ac_archive(**_kw):
return click_util.completion_dir()
def import_params(fn):
click_util.append_params(
fn,
[
runs_support.runs_arg,
click.Argument(("archive",)),
click.Option(
("-m", "--move"),
help="Move imported runs rather than copy.",
is_flag=True,
),
click.Option(
("--copy-resources",),
help="Copy resources for each imported run.",
is_flag=True,
),
runs_support.all_filters,
click.Option(
("-y", "--yes"), help="Do not prompt before importing.", is_flag=True
),
],
)
assert fn.__click_params__[-1].name == "runs", fn.__click_params__
fn.__click_params__[-1].autocompletion = _ac_archive
return fn
@click.command("import")
@import_params
@click.pass_context
@click_util.use_args
@click_util.render_doc
def import_runs(ctx, args):
"""Import one or more runs from `ARCHIVE`.
`ARCHIVE` must be a directory that contains exported runs. Archive
directories can be created using ``guild export``.
You may use ``guild runs list --archive ARCHIVE`` to view runs in
`ARCHIVE`.
By default, resources are NOT copied with each imported run, but
their links are maintained. To copy resources, use
`--copy-resources`.
**WARNING**: Use `--copy-resources` with care as each imported run
will contain a separate copy of each resource!
{{ runs_support.runs_arg }}
If a `RUN` argument is not specified, ``:`` is assumed (all runs
are selected).
{{ runs_support.all_filters }}
"""
from . import runs_impl
runs_impl.import_(args, ctx)
|
[
"g@rre.tt"
] |
g@rre.tt
|
2fbd7c9248f1dcc4aa90678c7973c0971038f7b3
|
dbeae28942f79ebe1f844628baf6cb8f7251609b
|
/modules/state.py
|
961e9b0dd1677c68fc8b876bae6fae442c30c3b4
|
[] |
no_license
|
kouheiszk/pokemon-bot
|
3226614ad699dca261f2c97523b70d3c91a08b00
|
ba7404b7f6120581ac6602ca0c00ecbd9e0cbfc1
|
refs/heads/master
| 2020-05-21T10:12:07.376595
| 2016-09-13T10:57:01
| 2016-09-13T10:57:01
| 66,206,829
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from modules.catch import Catch
from modules.entities.badges import Badges
from modules.entities.hatched_eggs import HatchedEggs
from modules.entities.inventory import Inventory
from modules.entities.map_objects import MapObjects
from modules.entities.player import Player
from modules.entities.settings import Settings
class State(object):
def __init__(self):
self.player = Player()
self.inventory = Inventory()
self.badges = Badges()
self.settings = Settings()
self.map_objects = MapObjects()
self.catch = Catch()
self.hatched_eggs = HatchedEggs(self.inventory)
|
[
"kouhei.szk@gmail.com"
] |
kouhei.szk@gmail.com
|
c17cbfb454897e208edc74fb6406665a5bd37389
|
1debb684db5f2434de3793751afc45edcb2d584f
|
/apps/gtask/templatetags/datetime_tags.py
|
701d99da0ef2467c96ac5c4250f7b89bba8ee4e1
|
[] |
no_license
|
rosscdh/SuperDMon
|
2524aaa1429ce82558723ad5ea8833698380fb85
|
d0e6dd2f9d2237320b19b53b9be37c888f8c40ff
|
refs/heads/master
| 2016-09-05T13:33:55.294196
| 2012-02-07T14:52:34
| 2012-02-07T14:52:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from datetime import datetime
from django import template
register = template.Library()
@register.filter("timestamp")
def timestamp(value):
try:
return datetime.fromtimestamp(value)
except AttributeError:
return datetime.now()
|
[
"ross.crawford@sedo.com"
] |
ross.crawford@sedo.com
|
acaaff5ac222121f65916b2c51dba801a44b99f3
|
37496577a9fa05bf949bd018fca17f0b6d546ecd
|
/client/pdo/client/scripts/AuctionTestCLI.py
|
4a1e9c064ad1516c800d154a615e56b89dbcc513
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"Zlib",
"MIT",
"CC-BY-4.0"
] |
permissive
|
EugeneYYY/private-data-objects
|
cce9250648252f4baf92e0007c9584ac82d46401
|
d96033bbfa9bd3fe72a549487e8e5c83c7c580ca
|
refs/heads/master
| 2020-03-15T07:11:36.278038
| 2018-05-01T21:04:26
| 2018-05-01T22:40:35
| 132,023,932
| 0
| 0
| null | 2018-05-03T16:45:45
| 2018-05-03T16:45:44
| null |
UTF-8
|
Python
| false
| false
| 20,659
|
py
|
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import argparse
import random
from string import Template
import logging
logger = logging.getLogger(__name__)
import pprint
pp = pprint.PrettyPrinter(indent=4)
import pdo.common.crypto as pcrypto
from pdo.client.SchemeExpression import SchemeExpression
from pdo.common.keys import ServiceKeys
from pdo.contract import ContractCode
from pdo.contract import ContractState
from pdo.contract import Contract
from pdo.contract import register_contract
from pdo.contract import add_enclave_to_contract
from pdo.service_client.enclave import EnclaveServiceClient
from pdo.service_client.provisioning import ProvisioningServiceClient
enclave_services_by_url = {}
enclave_services = {}
participant_keys = {}
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def GetEnclaveServiceByURL(url) :
global enclave_services_by_url, enclave_service
if url not in enclave_services_by_url :
eservice = EnclaveServiceClient(url)
enclave_services_by_url[url] = eservice
enclave_services[eservice.enclave_id] = eservice
return enclave_services_by_url[url]
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def GetKeysForIdentity(config, identity) :
key_config = config['Key']
global participant_keys
if identity not in participant_keys :
#keypath = key_config['SearchPath']
#keyfile = Template(key_config['KeyFileTemplate']).substitute({'identity' : identity })
#participant_keys[identity] = ServiceKeys.read_from_file(keyfile, keypath)
participant_keys[identity] = ServiceKeys.create_service_keys()
return participant_keys[identity]
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def SendMessageAsIdentity(config, contract, invoker_keys, message, fmt = 'python', wait=False) :
ledger_config = config.get('Sawtooth')
contract_config = config.get('Contract')
try :
logger.info('send message %s to contract %s', message, contract.contract_code.name)
enclave_id = random.choice(contract.provisioned_enclaves)
enclave_service = enclave_services[enclave_id]
request = contract.create_update_request(invoker_keys, enclave_service, message)
response = request.evaluate()
logger.debug('result: %s, ', response.result)
except Exception as e :
logger.error('method invocation failed for message %s: %s', message, str(e))
sys.exit(-1)
try :
if wait :
response.submit_update_transaction(ledger_config, wait=30)
else :
response.submit_update_transaction(ledger_config)
contract.set_state(response.encrypted_state)
data_dir = contract_config['DataDirectory']
contract.contract_state.save_to_cache(data_dir=data_dir)
except Exception as e:
logger.error('transaction submission failed for message %s; %s', message, str(e))
sys.exit(-1)
expression = SchemeExpression.ParseExpression(response.result)
if fmt == 'scheme' :
return expression
elif fmt == 'python' :
return expression.value
else :
raise ValueError('unknown format {}'.format(fmt))
# -----------------------------------------------------------------
# -----------------------------------------------------------------
def CreateAndRegisterContract(config, contract_info, creator_keys) :
ledger_config = config.get('Sawtooth')
contract_config = config.get('Contract')
contract_creator_id = creator_keys.identity
contract_name = contract_info['Name']
source_file = contract_info['Source']
search_path = contract_config['SourceSearchPath']
contract_code = ContractCode.create_from_scheme_file(contract_name, source_file, search_path = search_path)
# --------------------------------------------------
logger.info('register the contract')
# --------------------------------------------------
pservice_urls = contract_info.get("ProvisioningServices")
provisioning_services = list(map(lambda url : ProvisioningServiceClient(url), pservice_urls))
provisioning_service_keys = list(map(lambda svc : svc.identity, provisioning_services))
contract_id = register_contract(ledger_config, creator_keys, contract_code, provisioning_service_keys)
logger.info('registered the contract as %s', contract_id)
contract_state = ContractState.create_new_state(contract_id)
contract = Contract(contract_code, contract_state, contract_id, contract_creator_id)
# --------------------------------------------------
logger.info('provision enclaves')
# --------------------------------------------------
eservice_urls = contract_info.get("EnclaveServices")
enclave_services = list(map(lambda url : GetEnclaveServiceByURL(url), eservice_urls))
for eservice in enclave_services :
secret_list = []
for pservice in provisioning_services :
message = pcrypto.string_to_byte_array(eservice.enclave_id + contract_id)
signature = creator_keys.sign(message)
secret = pservice.get_secret(eservice.enclave_id, contract_id, creator_keys.verifying_key, signature)
secret_list.append(secret)
secretinfo = eservice.verify_secrets(contract_id, contract_creator_id, secret_list)
encrypted_state_encryption_key = secretinfo['encrypted_state_encryption_key']
signature = secretinfo['signature']
txnid = add_enclave_to_contract(
ledger_config,
creator_keys,
contract_id,
eservice.enclave_id,
secret_list,
encrypted_state_encryption_key,
signature)
contract.set_state_encryption_key(eservice.enclave_id, encrypted_state_encryption_key)
# --------------------------------------------------
logger.info('create the initial contract state')
# --------------------------------------------------
eservice = random.choice(enclave_services)
initialize_request = contract.create_initialize_request(creator_keys, eservice)
initialize_response = initialize_request.evaluate()
contract.set_state(initialize_response.encrypted_state)
logger.info('initial state created')
# --------------------------------------------------
logger.info('save the initial state in the ledger')
# --------------------------------------------------
txnid = initialize_response.submit_initialize_transaction(ledger_config, wait=30)
return contract
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def CreateAssetContract(config) :
asset_config = config['AssetContract']
contract_config = config['Contract']
asset_creator_identity = asset_config['Creator']
asset_creator_keys = GetKeysForIdentity(config, asset_creator_identity)
contract = CreateAndRegisterContract(config, asset_config, asset_creator_keys)
data_dir = contract_config['DataDirectory']
contract.save_to_file(asset_config['Name'], data_dir = data_dir)
contract.contract_state.save_to_cache(data_dir = data_dir)
return contract
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def CreateAuctionContract(config) :
auction_config = config['AuctionContract']
contract_config = config['Contract']
auction_creator_identity = auction_config['Creator']
auction_creator_keys = GetKeysForIdentity(config, auction_creator_identity)
contract = CreateAndRegisterContract(config, auction_config, auction_creator_keys)
data_dir = contract_config['DataDirectory']
contract.save_to_file(auction_config['Name'], data_dir = data_dir)
contract.contract_state.save_to_cache(data_dir = data_dir)
return contract
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def CreateRandomAsset(config, asset_contract, invoker_keys, assetname, value = None) :
params = {}
params['asset'] = "asset_" + assetname
params['value'] = random.randint(0, 100) if value is None else value
message = Template("'(create \"${asset}\" ${value})").substitute(params)
logger.info('create asset %s with value %s', params['asset'], params['value'])
result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message)
if result is None :
raise Exception('failed to create random asset')
return params['asset']
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def EscrowAsset(config, asset_contract, invoker_keys, asset, pubkey) :
## ( ((key "auction") (value 5) (owner "<ownerid>")) "<signature>" )
# first pass... escrow the asset and push the transaction
message = "'(escrow \"{0}\" \"{1}\")".format(asset, pubkey)
result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message)
# get the escrow attestation for handoff to the auction
message = "'(escrow-attestation \"{0}\")".format(asset)
result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message, fmt='scheme')
return (str(result.nth(0)), str(result.nth(1)), str(result.nth(2)))
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def CancelBid(config, auction_contract, asset_contract, invoker_keys) :
try :
message = "'(cancel-bid)"
result = SendMessageAsIdentity(config, auction_contract, invoker_keys, message)
message = "'(cancel-attestation)"
result = SendMessageAsIdentity(config, auction_contract, invoker_keys, message, fmt='scheme')
## should be: (((key "offered") (value X) (owner "<ownerid")) (dependencies) "<signature>")
assetkey = dict(result.nth(0).value)['key']
dependencies = str(result.nth(1))
signature = str(result.nth(2))
message = "'(disburse \"{0}\" {1} {2})".format(assetkey, dependencies, signature)
result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message)
except :
pass
## -----------------------------------------------------------------
## -----------------------------------------------------------------
def LocalMain(config) :
asset_config = config['AssetContract']
auction_config = config['AuctionContract']
user_config = config['Participants']
auction_keys = GetKeysForIdentity(config, auction_config['Creator'])
asset_keys = GetKeysForIdentity(config, asset_config['Creator'])
# create the asset contract
asset_contract = CreateAssetContract(config)
asset_contract_pubkey = SendMessageAsIdentity(config, asset_contract, asset_keys, "'(get-public-signing-key)", fmt='python')
# ---------- create the asset to use for the auction, minimum bid is 10 ----------
auction_asset = CreateRandomAsset(config, asset_contract, auction_keys, 'auction', value = 10)
# ---------- create the assets for each of the identities ----------
assetmap = {}
for identity in user_config['Asset'] :
user_keys = GetKeysForIdentity(config, identity)
assetmap[identity] = CreateRandomAsset(config, asset_contract, user_keys, identity)
# ---------- create and initialize the auction contract ----------
auction_contract = CreateAuctionContract(config)
auction_contract_pubkey = SendMessageAsIdentity(config, auction_contract, auction_keys, "'(get-public-signing-key)", fmt='python')
message = "'(initialize \"{0}\")".format(asset_contract_pubkey)
result = SendMessageAsIdentity(config, auction_contract, auction_keys, message, wait=True)
# ---------- escrow the auction asset and prime the auction----------
(ecounter, edependencies, esignature) = EscrowAsset(
config, asset_contract, auction_keys, auction_asset, str(auction_contract_pubkey))
message = "'(prime-auction* {0} {1} {2})".format(ecounter, edependencies, esignature)
result = SendMessageAsIdentity(config, auction_contract, auction_keys, message)
# ---------- submit bids ----------
for identity in user_config['Auction'] :
asset = assetmap[identity]
user_keys = GetKeysForIdentity(config, identity)
(ecounter, edependencies, esignature) = EscrowAsset(
config, asset_contract, user_keys, asset, auction_contract_pubkey)
message = "'(submit-bid* {0} {1} {2})".format(ecounter, edependencies, esignature)
result = SendMessageAsIdentity(config, auction_contract, user_keys, message)
## =================================================================
# we have to wait for the transactions to commit before we continue
#WaitForStateCommit(lwc, PrivateContractTransaction, asset_contract.ContractID, asset_contract.State.ComputeHash())
#WaitForStateCommit(lwc, PrivateContractTransaction, auction_contract.ContractID, auction_contract.State.ComputeHash())
## =================================================================
# ---------- get the max bid ----------
message = "'(max-bid)"
result = SendMessageAsIdentity(config, auction_contract, auction_keys, message)
logger.info("maximum bid: %s", str(result))
# ---------- close the bidding and transfer the assets ----------
message = "'(close-bidding)"
result = SendMessageAsIdentity(config, auction_contract, auction_keys, message)
message = "'(exchange-attestation)"
result = SendMessageAsIdentity(config, auction_contract, auction_keys, message, fmt='scheme')
## should be: (((key "offered") (value X) (owner "<ownerid")) ((key "bid") (value X) (owner "<ownerid")) dep sig)
logger.debug("closed bidding with result: %s", str(result))
offered = dict(result.nth(0).value)
maxbid = dict(result.nth(1).value)
dependencies = str(result.nth(2))
signature = str(result.nth(3))
logger.info('exchange ownership of keys %s and %s', offered['key'], maxbid['key'])
message = "'(exchange-ownership \"{0}\" \"{1}\" {2} {3})".format(offered['key'], maxbid['key'], dependencies, signature)
result = SendMessageAsIdentity(config, asset_contract, auction_keys, message)
# ---------- cancel the remaining bids ----------
for identity in user_config['Auction'] :
logger.info("attempt to cancel bid for %s", identity)
user_keys = GetKeysForIdentity(config, identity)
CancelBid(config, auction_contract, asset_contract, user_keys)
# ---------- dump the final state of the contract ----------
result = SendMessageAsIdentity(config, asset_contract, asset_keys, "'(get-state)", fmt='python', wait=True)
pp.pprint(result)
print("auction contract id = {0}".format(auction_contract.contract_id))
print("asset contract id = {0}".format(asset_contract.contract_id))
sys.exit(0)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## DO NOT MODIFY BELOW THIS LINE
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## -----------------------------------------------------------------
ContractHost = os.environ.get("HOSTNAME", "localhost")
ContractHome = os.environ.get("CONTRACTHOME") or os.path.realpath("/opt/pdo")
ContractEtc = os.environ.get("CONTRACTETC") or os.path.join(ContractHome, "etc")
ContractKeys = os.environ.get("CONTRACTKEYS") or os.path.join(ContractHome, "keys")
ContractLogs = os.environ.get("CONTRACTLOGS") or os.path.join(ContractHome, "logs")
ContractData = os.environ.get("CONTRACTDATA") or os.path.join(ContractHome, "data")
ScriptBase = os.path.splitext(os.path.basename(sys.argv[0]))[0]
config_map = {
'base' : ScriptBase,
'data' : ContractData,
'etc' : ContractEtc,
'home' : ContractHome,
'host' : ContractHost,
'keys' : ContractKeys,
'logs' : ContractLogs
}
# -----------------------------------------------------------------
# -----------------------------------------------------------------
def Main() :
import pdo.common.config as pconfig
import pdo.common.logger as plogger
# parse out the configuration file first
conffiles = [ 'auction-test.toml' ]
confpaths = [ ".", "./etc", ContractEtc ]
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='configuration file', nargs = '+')
parser.add_argument('--config-dir', help='configuration file', nargs = '+')
parser.add_argument('--logfile', help='Name of the log file, __screen__ for standard output', type=str)
parser.add_argument('--loglevel', help='Logging level', type=str)
parser.add_argument('--ledger', help='URL for the Sawtooth ledger', type=str)
parser.add_argument('--asset-contract', help='Name of the asset contract', default="integer-key", type = str)
parser.add_argument('--asset-identity', help='Identity to use for the asset contract', default="ikey-contract", type=str)
parser.add_argument('--auction-contract', help='Name of the auction contract', default="auction", type = str)
parser.add_argument('--auction-identity', help='Identity to use for the auction contract', default="auc-contract", type=str)
parser.add_argument('--key-dir', help='Directories to search for key files', nargs='+')
parser.add_argument('--contract-dir', help='Directories to search for contract files', nargs='+')
options = parser.parse_args()
# first process the options necessary to load the default configuration
if options.config :
conffiles = options.config
if options.config_dir :
confpaths = options.config_dir
global config_map
config_map['assetidentity'] = options.asset_identity
config_map['assetcontract'] = options.asset_contract
config_map['auctionidentity'] = options.auction_identity
config_map['auctioncontract'] = options.auction_contract
try :
config = pconfig.parse_configuration_files(conffiles, confpaths, config_map)
except pconfig.ConfigurationException as e :
logger.error(str(e))
sys.exit(-1)
# set up the logging configuration
if config.get('Logging') is None :
config['Logging'] = {
'LogFile' : '__screen__',
'LogLevel' : 'INFO'
}
if options.logfile :
config['Logging']['LogFile'] = options.logfile
if options.loglevel :
config['Logging']['LogLevel'] = options.loglevel.upper()
plogger.setup_loggers(config.get('Logging', {}))
# set up the ledger configuration
if config.get('Sawtooth') is None :
config['Sawtooth'] = {
'LedgerURL' : 'http://localhost:8008',
}
if options.ledger :
config['Sawtooth']['LedgerURL'] = options.ledger
# set up the key search paths
if config.get('Key') is None :
config['Key'] = {
'SearchPath' : ['.', './keys', ContractKeys]
}
if options.key_dir :
config['Key']['SearchPath'] = options.key_dir
# set up the data paths
if config.get('Contract') is None :
config['Contract'] = {
'SourceSearchPath' : [ '.', './contract', os.path.join(ContractHome, 'contracts') ]
}
if options.contract_dir :
config['Contract']['SourceSearchPath'] = options.contract_dir
# GO!
LocalMain(config)
## -----------------------------------------------------------------
## Entry points
## -----------------------------------------------------------------
Main()
|
[
"byron.marohn@intel.com"
] |
byron.marohn@intel.com
|
58d23eb63af6add22016b753d43de7f6521fbfb1
|
279e26d880c2470d0b60fe55b52f36024ecb28b5
|
/address.py
|
f65092bd69fcdb218f7a868194846dc937236b2d
|
[] |
no_license
|
khang-le/unit5-05
|
0167d40d8070d5889c948a90f13d06ea53581690
|
c9b4afb6f1361dca227d915c7630ff7e5fe3b1cf
|
refs/heads/master
| 2020-09-22T03:51:35.589393
| 2019-11-30T16:27:46
| 2019-11-30T16:27:46
| 225,039,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
#!/usr/bin/env python3
# Created by : Khang Le
# Created on : September 2019
# This program prints out your name, using default function parameters
def full_address(first_name, last_name, street_address, city, province,
postal_code, apt_number=None):
# return full address format
full_address = first_name
if apt_number is not None:
full_address = ("\n" + full_address + " " + last_name + "" +
street_address + "" + city + " " +
province + " " + postal_code + " " + apt_number)
elif apt_number is None:
full_address = ("\n" + full_address + " " + last_name + "" +
street_address + "" + city + " " +
province + " " + postal_code)
return full_address.upper()
def main():
# get user informations
apt_number = None
first_name = input("Enter your first name: ")
last_name = input("Enter your last name: ") + "\n"
street_address = input("Enter your address: ") + "\n"
question = input("Do you have an ap.number? (y/n): ")
if question.upper() == "Y" or question.upper() == "YES":
apt_number = input("Enter your apt.number here: ") + "\n"
city = input("Enter your current city: ")
province = input("Enter your current province: ") + " "
postal_code = input("Enter your postal code: ")
if apt_number is not None:
address = full_address(first_name, last_name, street_address,
city, province, postal_code, apt_number)
else:
address = full_address(first_name, last_name, street_address,
city, province, postal_code)
print(("Your shipping informations: {}").format(address))
if __name__ == "__main__":
main()
|
[
"nguyen.khang.le@mths.ca"
] |
nguyen.khang.le@mths.ca
|
993148bc8da60f6cde60e4ddcf631c383dadd161
|
2a42392cf93deaccb39b357411c0b49abec0a132
|
/classcode/anim_and_sound/anim.py
|
840cb919d1038dfaea799ab71a28e4ca7a054444
|
[] |
no_license
|
AKilgore/CS112-Spring2012
|
89aa573b19f1c92055e4832d87c6e5fa0588bccf
|
9fe50b80d71b4dee92101b993c1f58265eb40ee2
|
refs/heads/master
| 2020-12-24T19:27:58.448474
| 2012-04-30T07:23:40
| 2012-04-30T07:23:40
| 3,266,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
#!/usr/bin/env/ python
import pygame
class AnimationFrames(object):
def __init__(self, frames, loops=-1):
self._times = []
self._data = []
total = 0
for t, data in frames:
total += t
self._times.append(total)
self._data.append(data)
self.end = total
self.loops = loops
def get(self, time):
if self.loops == -1 or time is < self.loops * self.end:
time %= self.end
if time > self.end:
return self._data[-1]
idx = 0
while self._times[idx] < t:
idx += 1
return self._data[idx]
class Animation(object):
def __init__(self, spritesheet, frames):
if not isinstance(frames, AnimationFrames):
frames = AnimationFrames(frames)
self.spritesheet = spritesheet
self.frames = frames
self.time = 0
self.update(0)
def get_frame_data(self, t):
return self.frame.get(t)
def update(self, dt):
self.time += dt
self.x, self.y = self.get_frame_data(self.time)
def get_current_frame(self):
return self.spritesheet.get(self.x, self.y)
|
[
"mak11@hampshire.edu"
] |
mak11@hampshire.edu
|
ce7c48f9f8686e922f04be56fd4bf8ab959eb8de
|
d9d516490b35d4589787dd1c2f02e1cb39967ae4
|
/021 Jogo da adivinhação.py
|
f27f947c56eeb6ea3fe7e4a0cacdc82c2896aca5
|
[] |
no_license
|
Emerson53na/exercicios-python-3
|
e3ec9e88e9d413ee9dee432a2c120447a22a3f3d
|
8f0349a94aca822722c02084c6e3d13cd8c27051
|
refs/heads/master
| 2021-05-19T09:31:31.686547
| 2020-04-22T23:54:41
| 2020-04-22T23:54:41
| 251,631,178
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
from random import choice
print('=-'*20,'\nVou pensar em um número de 0 a 5.Tente adivinhar...')
print('=-'*20)
num = int(input('Em que número eu pensei? '))
lista = [0,1,2,3,4,5]
cpu = choice(lista)
if cpu == num:
print('O número escolhido foi: {}\n\033[32mParabens, você ganhou!\033[m'.format(cpu))
else:
print('O número escolhido foi: {}\n\033[31mVocê errou!\033[m'.format(cpu))
|
[
"noreply@github.com"
] |
Emerson53na.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.