blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ebd1dd0aa7d1f4a4140c09684866a9a7eccdcb8 | d685a23ff883f216f5ab8ea6ba257eb660e6a931 | /laboratorios/lab01/codigo/rectangle.py | c50f4713939745bab2bd9d972bd8ca6c78d7b997 | [] | no_license | varisti7/ST0245-001 | 2abde80a13d6139018e6e7ac9b99fd476f572c98 | 3582eb0a06f22af496d63458db55d8d47b797c79 | refs/heads/master | 2023-01-12T20:29:19.816894 | 2020-11-17T21:27:02 | 2020-11-17T21:27:02 | 283,341,706 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | import time, string, random
import matplotlib.pyplot as plt
def rectangle(size2n):
"""
This agorithm recieves an integer as the number of rectangles of 2xn and calculates of how
many ways it could be filled with 2x1 rectangles.
"""
if size2n == 1 or size2n == 2: # C1
return size2n
else:
return rectangle(size2n - 1) + rectangle(size2n - 2) # T(n-1) + T(n-2)
"""
In the worst case the asynthotic complexity would be:
T(n) = C1 + T(n-1) + T(n-2) => solved by "Laboratory practice No. 1: Recursion" point 4.4.1
O(2^n)
"""
def plotTimes():
"""
This function plot's the rectangle algorithm. The x label as the number (from 20-40) and the y label
as the time required to solve it.
"""
timeArray = []
lengthArray = []
for x in range(20,40):
t1 = time.time()
rectangle(x)
tt = time.time() - t1
timeArray.append(tt)
print(tt)
lengthArray.append(x)
plt.plot(lengthArray, timeArray, 'rs')
plt.show()
def main():
plotTimes()
main()
"""
6,20E-03
4,55E-03
1,29E-02
2,36E-02
0,04935503
0,079976082
0,100703001
0,176604033
0,24488306
0,347311974
0,278718948
0,402431965
0,634913206
1,017865181
1,632277012
2,63162899
4,297087908
6,946293831
11,17777205
18,06480503
""" | [
"varisti6@gmail.com"
] | varisti6@gmail.com |
a03f688cd3bb6ceef3f26b749170bc2c0ac710d7 | 82770c7bc5e2f27a48b8c370b0bab2ee41f24d86 | /microblog/flask/venv/lib/python2.7/site-packages/billiard/forking.py | 57fc9795e47d83e6c656232b9ddde88438c6ec12 | [
"Apache-2.0"
] | permissive | johankaito/fufuka | 77ddb841f27f6ce8036d7b38cb51dc62e85b2679 | 32a96ecf98ce305c2206c38443e58fdec88c788d | refs/heads/master | 2022-07-20T00:51:55.922063 | 2015-08-21T20:56:48 | 2015-08-21T20:56:48 | 39,845,849 | 2 | 0 | Apache-2.0 | 2022-06-29T23:30:11 | 2015-07-28T16:39:54 | Python | UTF-8 | Python | false | false | 17,515 | py | #
# Module for starting a process object using os.fork() or CreateProcess()
#
# multiprocessing/forking.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import os
import sys
import signal
import warnings
from pickle import load, HIGHEST_PROTOCOL
from billiard import util
from billiard import process
from billiard.five import int_types
from .reduction import dump
from .compat import _winapi as win32
__all__ = ['Popen', 'assert_spawning', 'exit',
'duplicate', 'close']
try:
WindowsError = WindowsError # noqa
except NameError:
class WindowsError(Exception): # noqa
pass
W_OLD_DJANGO_LAYOUT = """\
Will add directory %r to path! This is necessary to accommodate \
pre-Django 1.4 layouts using setup_environ.
You can skip this warning by adding a DJANGO_SETTINGS_MODULE=settings \
environment variable.
"""
#
# Choose whether to do a fork or spawn (fork+exec) on Unix.
# This affects how some shared resources should be created.
#
_forking_is_enabled = sys.platform != 'win32'
#
# Check that the current thread is spawning a child process
#
def assert_spawning(self):
if not Popen.thread_is_spawning():
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(self).__name__
)
#
# Unix
#
if sys.platform != 'win32':
try:
import thread
except ImportError:
import _thread as thread # noqa
import select
WINEXE = False
WINSERVICE = False
exit = os._exit
duplicate = os.dup
close = os.close
_select = util._eintr_retry(select.select)
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
_tls = thread._local()
def __init__(self, process_obj):
# register reducers
from billiard import connection # noqa
_Django_old_layout_hack__save()
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
r, w = os.pipe()
self.sentinel = r
if _forking_is_enabled:
self.pid = os.fork()
if self.pid == 0:
os.close(r)
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
os._exit(code)
else:
from_parent_fd, to_child_fd = os.pipe()
cmd = get_command_line() + [str(from_parent_fd)]
self.pid = os.fork()
if self.pid == 0:
os.close(r)
os.close(to_child_fd)
os.execv(sys.executable, cmd)
# send information to child
prep_data = get_preparation_data(process_obj._name)
os.close(from_parent_fd)
to_child = os.fdopen(to_child_fd, 'wb')
Popen._tls.process_handle = self.pid
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del(Popen._tls.process_handle)
to_child.close()
# `w` will be closed when the child exits, at which point `r`
# will become ready for reading (using e.g. select()).
os.close(w)
util.Finalize(self, os.close, (r,))
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, flag)
except os.error:
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if self.returncode is None:
if timeout is not None:
r = _select([self.sentinel], [], [], timeout)[0]
if not r:
return None
# This shouldn't block if select() returned successfully.
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
return self.returncode
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError:
if self.wait(timeout=0.1) is None:
raise
@staticmethod
def thread_is_spawning():
if _forking_is_enabled:
return False
else:
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return handle
#
# Windows
#
else:
try:
import thread
except ImportError:
import _thread as thread # noqa
import msvcrt
try:
import _subprocess
except ImportError:
import _winapi as _subprocess # noqa
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
exit = win32.ExitProcess
close = win32.CloseHandle
#
#
#
def duplicate(handle, target_process=None, inheritable=False):
if target_process is None:
target_process = _subprocess.GetCurrentProcess()
h = _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle, target_process,
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
)
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and sys.version_info[1] < 3):
h = h.Detach()
return h
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
_tls = thread._local()
def __init__(self, process_obj):
_Django_old_layout_hack__save()
# create pipe for communication with child
rfd, wfd = os.pipe()
# get handle for read end of the pipe and make it inheritable
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
# start process
cmd = get_command_line() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
_python_exe, cmd, None, None, 1, 0, None, None, None
)
close(ht) if isinstance(ht, int_types) else ht.Close()
(close(rhandle) if isinstance(rhandle, int_types)
else rhandle.Close())
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
self.sentinel = int(hp)
# send information to child
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(wfd, 'wb')
Popen._tls.process_handle = int(hp)
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del Popen._tls.process_handle
to_child.close()
@staticmethod
def thread_is_spawning():
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return duplicate(handle, Popen._tls.process_handle)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _subprocess.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_subprocess.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.wait(timeout=0.1) is None:
raise
#
#
#
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--billiard-fork':
assert len(argv) == 3
os.environ["FORKED_BY_MULTIPROCESSING"] = "1"
return True
else:
return False
def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()
def get_command_line():
'''
Returns prefix of command line used for spawning a child process
'''
if process.current_process()._identity == () and is_forking(sys.argv):
raise RuntimeError('''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that have forgotten to use the proper
idiom in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.''')
if getattr(sys, 'frozen', False):
return [sys.executable, '--billiard-fork']
else:
prog = 'from billiard.forking import main; main()'
return [_python_exe, '-c', prog, '--billiard-fork']
def _Django_old_layout_hack__save():
if 'DJANGO_PROJECT_DIR' not in os.environ:
try:
settings_name = os.environ['DJANGO_SETTINGS_MODULE']
except KeyError:
return # not using Django.
conf_settings = sys.modules.get('django.conf.settings')
configured = conf_settings and conf_settings.configured
try:
project_name, _ = settings_name.split('.', 1)
except ValueError:
return # not modified by setup_environ
project = __import__(project_name)
try:
project_dir = os.path.normpath(_module_parent_dir(project))
except AttributeError:
return # dynamically generated module (no __file__)
if configured:
warnings.warn(UserWarning(
W_OLD_DJANGO_LAYOUT % os.path.realpath(project_dir)
))
os.environ['DJANGO_PROJECT_DIR'] = project_dir
def _Django_old_layout_hack__load():
try:
sys.path.append(os.environ['DJANGO_PROJECT_DIR'])
except KeyError:
pass
def _module_parent_dir(mod):
dir, filename = os.path.split(_module_dir(mod))
if dir == os.curdir or not dir:
dir = os.getcwd()
return dir
def _module_dir(mod):
if '__init__.py' in mod.__file__:
return os.path.dirname(mod.__file__)
return mod.__file__
def main():
'''
Run code specifed by data received over pipe
'''
global _forking_is_enabled
_Django_old_layout_hack__load()
assert is_forking(sys.argv)
_forking_is_enabled = False
handle = int(sys.argv[-1])
if sys.platform == 'win32':
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
else:
fd = handle
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
prepare(preparation_data)
# Huge hack to make logging before Process.run work.
try:
os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__
except KeyError:
pass
except AttributeError:
pass
loglevel = os.environ.get("_MP_FORK_LOGLEVEL_")
logfile = os.environ.get("_MP_FORK_LOGFILE_") or None
format = os.environ.get("_MP_FORK_LOGFORMAT_")
if loglevel:
from billiard import util
import logging
logger = util.get_logger()
logger.setLevel(int(loglevel))
if not logger.handlers:
logger._rudimentary_setup = True
logfile = logfile or sys.__stderr__
if hasattr(logfile, "write"):
handler = logging.StreamHandler(logfile)
else:
handler = logging.FileHandler(logfile)
formatter = logging.Formatter(
format or util.DEFAULT_LOGGING_FORMAT,
)
handler.setFormatter(formatter)
logger.addHandler(handler)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
from billiard.util import _logger, _log_to_stderr
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().authkey,
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE and not WINSERVICE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if (not os.path.isabs(main_path) and
process.ORIGINAL_DIR is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['main_path'] = os.path.normpath(main_path)
return d
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name == '__main__':
main_module = sys.modules['__main__']
main_module.__file__ = main_path
elif main_name != 'ipython':
# Main modules not actually called __main__.py may
# contain additional code that should still be executed
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in list(main_module.__dict__.values()):
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
| [
"john.g.keto@gmail.com"
] | john.g.keto@gmail.com |
43fa9cc363b25fbbb658601647398ac8dbfe41aa | a25b26b3b27b59325915c10dfafd607faae4ed01 | /pickfrombothsides.py | 879435386092602b6015b9cf8eab00d3d4514243 | [] | no_license | helplearnhome/Coding-Interview-in-Python | 3e3d9f42b993b9ea8e86e25d8a8e36c1610c45f2 | 52a76689a5808a89ebb8399bf3e6d4922c4c190b | refs/heads/master | 2023-07-17T06:30:09.207235 | 2021-09-08T03:28:33 | 2021-09-08T03:28:33 | 397,350,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | # # Recursive
def func(A,B,i,j,sum):
if B == 0:
# print(sum)
return sum
return max(func(A,B-1,i+1,j,sum+A[i]),func(A,B-1,i,j-1,sum+A[j]))
class Solution:
# @param A : list of integers
# @param B : integer
# @return an integer
def solve(self, A, B):
# print(B)
return func(A,B,0,-1,0)
# x = Solution()
# print(x.solve([1,9,8,1,1,7],3))
#Wrong way
#Iterative
# def func(A,B,i,j,sum):
# while(B>0 and i<=j):
# if (A[i] > A[j]):
# sum+=A[i]
# i+=1
# else:
# sum+=A[j]
# j-=1
# B-=1
# return sum
# class Solution:
# # @param A : list of integers
# # @param B : integer
# # @return an integer
# def solve(self, A, B):
# # print(B)
# return func(A,B,0,len(A)-1,0)
# x = Solution()
# print(x.solve([1,9,8,1,1,7],3))
#iterative
class Solution:
# @param A : list of integers
# @param B : integer
# @return an integer
def solve(self, A, B):
sum=0
maxi=0
for i in range(B):
sum+=A[i]
maxi=sum
for j in range(1,B+1):
sum-=A[B-j]
sum+=A[-j]
if maxi < sum:
maxi=sum
return maxi
x = Solution()
print(x.solve([0,0,0,1,1,7],3))
#pavana
# int Solution::solve(vector<int> &A, int B) {
# vector<int> dp1(B,0),dp2(B,0);
# int n=A.size();
# dp1[0]=A[0];
# for(int i=1;i<B;i++)
# dp1[i]=dp1[i-1]+A[i];
# dp2[0]=A[n-1];
# for(int i=1;i<B;i++)
# dp2[i]=dp2[i-1]+A[n-1-i];
# int ans=max(dp1[B-1],dp2[B-1]);
# for(int i=0;i<B-1;i++)
# ans=max(ans,dp1[i]+dp2[B-2-i]);
# return ans;
# } | [
"noreply@github.com"
] | helplearnhome.noreply@github.com |
47f0abfaceb11e660d4f305e745db7fe9fee819f | da84fa23cc4cf2e81e50892085ac162508bff155 | /nestris_ocr/capturing/linux/linux_mgr.py | 6381209b8dd85f1880aca2b7eb9cbc653ec9f0cd | [] | no_license | alex-ong/NESTrisOCR | 83ddaba55b100f0ee20e924731459e547e321887 | 488beeb30e596ccd0548152e241e1c6f772e717b | refs/heads/master | 2023-01-10T08:02:41.702538 | 2021-07-07T06:33:39 | 2021-07-07T06:33:39 | 169,196,192 | 25 | 8 | null | 2022-12-27T15:37:40 | 2019-02-05T05:44:34 | Python | UTF-8 | Python | false | false | 1,202 | py | import Xlib
import Xlib.display
from Xlib import X
class WindowMgr:
"""Encapsulates some calls for window management"""
def __init__(self, hwnd=None):
self.handle = hwnd
def checkWindow(self, hwnd):
"""checks if a window still exists"""
return hwnd
def getWindows(self):
"""
Return a list of tuples (handler, window name) for each real window.
"""
windows = []
def getWindowHierarchy(window, windows):
children = window.query_tree().children
for w in children:
try:
w.get_image(0, 0, 1, 1, X.ZPixmap, 0xFFFFFFFF)
windows.append(
(
w.id,
w.get_wm_class()[1] if w.get_wm_class() is not None else "",
)
)
except Xlib.error.BadMatch:
pass
finally:
windows = getWindowHierarchy(w, windows)
return windows
root = Xlib.display.Display().screen().root
windows = getWindowHierarchy(root, windows)
return windows
| [
"the.onga@gmail.com"
] | the.onga@gmail.com |
92342462dbbae240ef7cda2ffdb9248d3c5c9ee5 | d9ebec1b75f31881ae7e05139a9ad7004dd92227 | /api/recommender/models.py | 55993431ad80a72aeec090a0f85ea6cc58322235 | [] | no_license | Drobo07/eBackend | 83efa41aac6dddc483b8a4061bbcb00fee9208b7 | cf090d5d4b0c4d5bb8c50c511dbc0f3d90e1aca4 | refs/heads/master | 2020-12-21T15:08:18.811907 | 2019-05-02T17:18:49 | 2019-05-02T17:18:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | from django.db import models
from django.contrib.postgres.fields import ArrayField
from datetime import datetime
from general.models import Publications
# Create your models here.
class BarcRequest(models.Model):
title = models.CharField(max_length=128,null=False)
abstract = models.TextField(max_length=1024,null=False)
created = models.DateTimeField(auto_now_add=True)
result_token = models.CharField(max_length=32)
result_generated = models.BooleanField(default=False)
def __str__(self):
if(len(self.title)>32):
return self.title[:30]+"..."
return self.title
def get_result_token(self):
ind = self.pk
token = "barc/"+self.created.strftime("/%Y/%m/%d/%H/%M/%S")
return token
| [
"dipeshkrj14@gmail.com"
] | dipeshkrj14@gmail.com |
f7893075ad7f7f33d47f38004cf784a9fc9afb2d | ebe11743bb33a1bfe36cddf92551a1e08b3e565d | /advanced/Common.py | d01638f48bbc3fb6f7e1f69744c92fc1ace91f0e | [] | no_license | nick-cheatwood7/python-mondrian | 671ca3aa0734fa283906181f81d1b29be3dccb1b | e5f98b6009eb41eacc90f5906218bb3f3e59452e | refs/heads/main | 2023-03-13T10:30:36.799891 | 2021-02-22T07:55:29 | 2021-02-22T07:55:29 | 339,519,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py |
# Created 02/22/2021 by Nick Cheatwood
# Contains any numbers or functions generic enough to warrant being outside of main file
import random
# Specify canvas width/height
canvasWidth = 1024
canvasHeight = 768
# Specify splits
# 'Generate a random integer between 120 and the width of the region * 1.5'
# If the random integer is less than the width of the region then split the region
splitLow = 120 # Ensures that we never split a region less than 120px wide
splitPenalty = 1.5 # Provides a random chance that a larger region will not be split into smaller regions
# Generate a random color
def getRandomColor():
# Available color choices
colorChoices = [ 'white',
'#6E5AE0',
'#E09287',
'#8170E0',
'#E0CF5A',
'#65E0A7'
]
# Pick a random value between 0 and the length of the color choices array
randColorVal = random.randint(0, len(colorChoices) + 1)
if(randColorVal >= len(colorChoices)):
# randColorVal depends on a range, so end range is length of array + 1 to get proper range
randIndex = randColorVal - 2 # out of index, bring down 2 to get to proper end array index
else:
randIndex = randColorVal
return colorChoices[randIndex] # return a random color
def getRandomBorderColor():
# Avaliable color options
colors = [
'black',
'white',
'hot pink',
'grey',
'blue'
]
# range, so add 1 to length of colors array
randVal = random.randint(0, len(colors) + 1)
# make sure randomized index is in range
randIndex = randVal - 2 if randVal >= len(colors) else randVal
return colors[randIndex] | [
"nick.cheatwood@gmail.com"
] | nick.cheatwood@gmail.com |
49da684394e0562e287537b0ace76cdd6969645c | ec649dc247e5229f78c2bcf51fdef1c57f3cdbff | /OCR/tesseract-invoice.py | 0a8f8a6159e2642f490c1a13260c8f55f28faaf0 | [] | no_license | dainv1989/machine-learning | 0f1686a9bf921bfcf665a786c63ec68b189942a0 | e44b14041f577ebe5d5ce785af45e4226e0d2756 | refs/heads/master | 2021-07-11T20:39:39.423568 | 2020-03-22T23:55:10 | 2020-03-22T23:55:10 | 93,155,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | import cv2
import re
import pytesseract as pts
from pytesseract import Output
def show_textboxes(image):
d = pts.image_to_data(image, output_type=Output.DICT)
#print(d.keys())
n_boxes = len(d['text'])
for i in range(n_boxes):
if int(d['conf'][i]) > 60:
(x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])
img = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
return image
#end show_textboxes
def find_pattern(image, data_pattern):
d = pts.image_to_data(image, output_type=Output.DICT)
keys = list(d.keys())
n_boxes = len(d['text'])
for i in range(n_boxes):
if int(d['conf'][i]) > 60:
if re.match(data_pattern, d['text'][i]):
(x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])
image = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
return image
#end find_pattern
img = cv2.imread('invoice.jpg')
#text = pts.image_to_string(img)
#print(text)
date_pattern = '^(0[1-9]|[12][0-9]|3[01])/(0[1-9]|1[012])/(19|20)\d\d$'
img_datebox = find_pattern(img, date_pattern)
#cv2.imshow('img', img_datebox)
email_pattern = '^[a-z0-9]+@[a-z0-9]+\.[a-z]+$'
img_emailbox = find_pattern(img_datebox, email_pattern)
cv2.imshow('img', img_emailbox)
#img_textboxes = show_textboxes(img)
#cv2.imshow('img', img_textboxes)
cv2.waitKey(0)
| [
"dainv1989@hotmail.com"
] | dainv1989@hotmail.com |
837640dec4290cdc6c2b69db42c87b60beb2a646 | 327a6d74d22fb06d76f5da814452e76aa8999692 | /stanCode_projects/Photoshop/green_screen.py | ff846ff20b3d35e8ea5a12d8952923732dfd0a2b | [
"MIT"
] | permissive | siyuban/stanCode | 6a5c2114a3bc949c1d6e5a70b987dcda74161874 | 5cca179a7542abdf1fe3a9e1da256945241249cc | refs/heads/main | 2023-02-11T16:20:08.707138 | 2021-01-09T15:44:55 | 2021-01-09T15:44:55 | 324,043,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,464 | py | """
File: green_screen.py
Name:萬思妤
-------------------------------
This file creates a new image that uses
MillenniumFalcon.png as background and
replace the green pixels in "ReyGreenScreen.png".
"""
from simpleimage import SimpleImage
def combine(background_img, figure_img):
"""
:param background_img: SimpleImage, the background image.
:param figure_img: SimpleImage, green screen figure image
:return: SimpleImage, figure image with the green screen pixels replaced by pixels of background.
"""
for x in range(background_img.width):
for y in range(background_img.height):
figure_pixel = figure_img.get_pixel(x, y)
bigger = max(figure_pixel.red, figure_pixel.blue)
if figure_pixel.green > bigger*2:
background_img_pixel = background_img.get_pixel(x, y)
figure_pixel.red = background_img_pixel.red
figure_pixel.blue = background_img_pixel.blue
figure_pixel.green = background_img_pixel.green
return figure_img
def main():
"""
Create a new image that uses MillenniumFalcon.png as background and replace the green
pixels in "ReyGreenScreen.png".
"""
space_ship = SimpleImage("images/MillenniumFalcon.png")
figure = SimpleImage("images/ReyGreenScreen.png")
space_ship.make_as_big_as(figure)
result = combine(space_ship, figure)
result.show()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | siyuban.noreply@github.com |
11065362a8ac77972c519aadeae585300bb5085d | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_25/models/active_directory_get_response.py | ee529854e041a3ff612ccf174315845d4e2c49ef | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 5,613 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.25
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_25 import models
class ActiveDirectoryGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[ActiveDirectory]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.ActiveDirectory]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[ActiveDirectory]): A list of Active Directory computer account configuration objects.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ActiveDirectoryGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ActiveDirectoryGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
e0cf8c5298a8ee4e8a3b21eb3b1fe65504c3047e | 204ec78fcebcea9e1e1da4905cf3fad0a514b01f | /test/unit/test_timeout.py | 4990b87aad1b2d40888f75acc3481c349d3eb4e0 | [
"Apache-2.0"
] | permissive | ARMmbed/pyOCD | 659340bf8753aa8e15a72890b8bea64dff2c2f42 | d4cdcf7e532cae17caad866839287bbe1e0d952b | refs/heads/master | 2023-05-31T13:45:15.797588 | 2020-10-12T13:55:47 | 2020-10-12T13:55:47 | 190,203,829 | 3 | 1 | Apache-2.0 | 2019-07-05T11:05:40 | 2019-06-04T13:09:56 | Python | UTF-8 | Python | false | false | 1,942 | py | # pyOCD debugger
# Copyright (c) 2017-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import (time, sleep)
import pytest
from pyocd.utility.timeout import Timeout
class TestTimeout:
def test_no_timeout(self):
with Timeout(0.05) as to:
cnt = 0
while to.check():
sleep(0.01)
cnt += 1
if cnt == 4:
break
else:
assert False
assert not to.did_time_out
def test_timeout_a(self):
s = time()
with Timeout(0.05) as to:
while to.check():
sleep(0.01)
assert to.did_time_out
assert (time() - s) >= 0.05
def test_timeout_b(self):
timedout = False
s = time()
with Timeout(0.05) as to:
cnt = 0
while cnt < 10:
if to.did_time_out:
timedout = True
sleep(0.02)
cnt += 1
assert timedout
assert to.did_time_out
assert (time() - s) >= 0.05
def test_timeout_c(self):
timedout = False
with Timeout(0.05) as to:
cnt = 0
while cnt < 10:
if to.did_time_out:
timedout = True
cnt += 1
assert not timedout
assert not to.did_time_out
| [
"flit@me.com"
] | flit@me.com |
f158c6821e350e490fa25d9eda4fc880f01fe9d0 | 6e2dc82bcfbc420ce6fd8e890f9f254e8e594902 | /www/cursivedata/migrations/0005_auto__add_field_pipeline_anim_loop.py | 92063d7e04206acf824ce406bdbd7eabe0b2d325 | [
"CC-BY-4.0"
] | permissive | mattvenn/cursivedata | 8ea86bde4a58a5678b1116953d17f0ae3600daf6 | 43e43263bef6f01698166d87bcff00b246957277 | refs/heads/master | 2021-01-17T08:06:38.715586 | 2016-07-22T16:04:10 | 2016-07-22T16:04:10 | 5,599,674 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,340 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Pipeline.anim_loop'
db.add_column('cursivedata_pipeline', 'anim_loop',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Pipeline.anim_loop'
db.delete_column('cursivedata_pipeline', 'anim_loop')
models = {
'cursivedata.cosmsource': {
'Meta': {'object_name': 'COSMSource'},
'add_feed_id': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'add_feed_title': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'add_location': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'api_key': ('django.db.models.fields.CharField', [], {'default': "'WsH6oBOmVbflt5ytsSYHYVGQzCaSAKw0Ti92WHZzajZHWT0g'", 'max_length': '400'}),
'cosm_trigger_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'cosm_url': ('django.db.models.fields.CharField', [], {'default': "'http://api.cosm.com/v2/triggers/'", 'max_length': '200'}),
'feed_id': ('django.db.models.fields.CharField', [], {'default': "'96779'", 'max_length': '400'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_value': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Unknown Source'", 'max_length': '100'}),
'pipelines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cursivedata.Pipeline']", 'symmetrical': 'False', 'blank': 'True'}),
'stream_id': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '400'}),
'use_stream_id': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'cursivedata.datapoint': {
'Meta': {'object_name': 'DataPoint'},
'current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'datastore': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.DataStore']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'cursivedata.datastore': {
'Meta': {'object_name': 'DataStore'},
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'cursivedata.endpoint': {
'Meta': {'object_name': 'Endpoint'},
'device': ('django.db.models.fields.CharField', [], {'default': "'web'", 'max_length': '200'}),
'full_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'full_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'generate_gcode': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'height': ('django.db.models.fields.FloatField', [], {'default': '200', 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_height': ('django.db.models.fields.IntegerField', [], {'default': '500'}),
'img_width': ('django.db.models.fields.IntegerField', [], {'default': '500'}),
'last_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'last_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '200'}),
'paused': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'robot_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'run_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'side_margin': ('django.db.models.fields.FloatField', [], {'default': '10', 'max_length': '200'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '200'}),
'status_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'top_margin': ('django.db.models.fields.FloatField', [], {'default': '10', 'max_length': '200'}),
'width': ('django.db.models.fields.FloatField', [], {'default': '200', 'max_length': '200'})
},
'cursivedata.gcodeoutput': {
'Meta': {'object_name': 'GCodeOutput'},
'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Endpoint']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'served': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'cursivedata.generator': {
'Meta': {'object_name': 'Generator'},
'description': ('django.db.models.fields.CharField', [], {'default': "'Unknown'", 'max_length': '2000'}),
'file_path': ('django.db.models.fields.CharField', [], {'default': "'./generators'", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'default': "'No Image'", 'max_length': '200'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}),
'last_used': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}),
'module_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'cursivedata.generatorstate': {
'Meta': {'object_name': 'GeneratorState'},
'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'params': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'state': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
'cursivedata.parameter': {
'Meta': {'object_name': 'Parameter'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'float'", 'max_length': '20'}),
'default': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "'Some parameter'", 'max_length': '1000', 'blank': 'True'}),
'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'cursivedata.pipeline': {
'Meta': {'object_name': 'Pipeline'},
'anim_autoplay': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anim_loop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'anim_speed': ('django.db.models.fields.IntegerField', [], {'default': '1000'}),
'auto_begin_days': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'data_store': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cursivedata.DataStore']", 'unique': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000', 'blank': 'True'}),
'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Endpoint']"}),
'full_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'full_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_height': ('django.db.models.fields.IntegerField', [], {'default': '500'}),
'img_width': ('django.db.models.fields.IntegerField', [], {'default': '500'}),
'last_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'last_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'next_auto_begin_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'paused': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'print_top_left_x': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'print_top_left_y': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'print_width': ('django.db.models.fields.FloatField', [], {'default': '500'}),
'run_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cursivedata.COSMSource']", 'symmetrical': 'False', 'blank': 'True'}),
'state': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cursivedata.GeneratorState']", 'unique': 'True'})
},
'cursivedata.storedoutput': {
'Meta': {'object_name': 'StoredOutput'},
'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Endpoint']", 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'default': "'output/none'", 'max_length': '200'}),
'filetype': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '10'}),
'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'pipeline': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Pipeline']", 'null': 'True', 'blank': 'True'}),
'run_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'complete'", 'max_length': '10'})
}
}
complete_apps = ['cursivedata'] | [
"matt@mattvenn.net"
] | matt@mattvenn.net |
4a1c309a93de9647a0f1adc90e88ad9c8624b3be | 2b8c88dfee5c5a784357515eafe8cd5f997c8774 | /leetcode/dynamic_programming/code-84.py | 1b9e7013a5652e79e6603e09d069daf7eb6aa134 | [] | no_license | archenRen/learnpy | e060f3aa2f77c35fc1b12345720af6c8b528da57 | 934ef76b97297f746a722a48c76672c7bc744cd9 | refs/heads/master | 2022-04-28T20:25:59.114036 | 2020-05-03T02:16:03 | 2020-05-03T02:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py |
# This is a TLE solution.
def largestRectangleArea2(heights: 'List[int]') -> int:
n = len(heights)
max_val = 0
for i in range(n):
min_val = heights[i]
max_val = max(max_val, min_val)
for j in range(i-1, -1, -1):
min_val = min(heights[j], min_val)
max_val = max(max_val, min_val * (i - j + 1))
return max_val
def largestRectangleArea(heights: 'List[int]') -> int:
# The stack maintain the indexes of buildings with ascending height.
n = len(heights)
heights.append(0)
stack = []
ans = 0
i = 0
while i <= n:
if not stack or heights[i] >= heights[stack[-1]]:
stack.append(i)
else:
tp = stack.pop()
if stack:
ans = max(ans, heights[tp] * (i - stack[-1] - 1))
else:
ans = max(ans, heights[tp] * i)
i -= 1
i += 1
return ans
# print(largestRectangleArea([2, 1, 5, 6, 2, 3])) # expect 10 (2*5)
# print(largestRectangleArea([2, 1, 3, 6, 2, 3]))# expect 8 (4*2)
# print(largestRectangleArea([2,3]))
# print(largestRectangleArea([3]))
print(largestRectangleArea(list(range(10))))
| [
"wangdi03@ppdai.com"
] | wangdi03@ppdai.com |
8e19d867074a1f86ccc691db65d40f99ea7ffb2b | c989985588c9c3132cb3df4710a59fa8df7c7f2d | /user.py | 3bf573c28689ca53aea39411c68c0e0d0a081ec0 | [] | no_license | hsolmaz/amazonbrowsertest | 3100886d08ba88f16efe33ef71db9dd4bf1b4566 | 54f0010ffcbcda89b2e85a5ab6f1d2724303a52e | refs/heads/master | 2020-03-21T00:39:38.131878 | 2018-06-21T11:16:55 | 2018-06-21T11:16:55 | 137,903,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import random
import string
import time
char_set2 = string.ascii_lowercase + string.digits
random_number = char_set2.split('0')[1]
char_set = char_set2.split('0')[0]
random_numbers = ''.join(random.sample(random_number*1, 1))
i = int(random_numbers)
username = ''.join(random.sample(char_set*6, 6))
lastname = ''.join(random.sample(char_set*6, 6))
password = ''.join(random.sample(char_set*8, 8))
maildomain = ''.join(random.sample(char_set*6, 6))
usermail = username+'@'+maildomain+'.com'
category = [u'yamaç paraşütü', 'balon turu', u'dalıl', 'kitesurf',
'yelkencilik', 'windsurf', 'paintball', 'atv safari', u'okçuluk', 'jeep safari']
driver = webdriver.Chrome()
driver.get("https://alt.test/")
""" driver.find_element_by_xpath('//*[@id="main-nav"]/li[5]/a').click()
driver.find_element_by_name(
'fos_user_registration_form[name]').send_keys(username)
driver.find_element_by_name(
'fos_user_registration_form[surname]').send_keys(lastname)
driver.find_element_by_name(
'fos_user_registration_form[email]').send_keys(usermail)
driver.find_element_by_name(
'fos_user_registration_form[plainPassword][first]').send_keys(password)
driver.find_element_by_name(
'fos_user_registration_form[plainPassword][second]').send_keys(password)
driver.find_element_by_xpath(
'//*[@id="user-actions"]/div/div/form/div/div[1]/div[9]/button').click() """
try:
driver.find_element_by_xpath(
'//*[@id="index-top-search"]/div/div[2]/div/input[1]').send_keys(category[i]+Keys.ENTER)
driver.find_element_by_xpath(
'//*[@id="index-top-search"]/div/div[4]/button').click()
try:
driver.execute_script(
"document.getElementsByClassName('select2 select2-container select2-container--default select2-container--focus')[0].click()")
time.sleep(2)
driver.find_element_by_xpath(
'//*[@id="select-order"]/div[2]/ul/li[3]').click()
time.sleep(5)
str = driver.find_element_by_xpath(
'//*[@id="search-page"]/div[3]/div[3]/div[2]/div[1]/div/div[2]/div[3]/div[2]/div').text
sayilar = [int(s) for s in str.split() if s.isdigit()]
str = driver.find_element_by_xpath(
'//*[@id="search-page"]/div[3]/div[3]/div[2]/div[2]/div/div[2]/div[3]/div[2]/div').text
sayilars = [int(s) for s in str.split() if s.isdigit()]
if sayilar < sayilars:
print u'Sıralama büyükten küçüğe çalışmaktadır'
except Exception as e:
print(e)
print u'tek ürün mevcut'
try:
driver.find_element_by_xpath(
'//*[@id="search-page"]/div[3]/div[3]/div[2]/div['+i+']/div/div[2]/div[1]/a').click()
except:
print u'tek ürün mevcut ilk ürün açıldı'
driver.find_element_by_xpath(
'//*[@id="search-page"]/div[3]/div[3]/div[2]/div[1]/div/div[2]/div[1]/a').click()
time.sleep(10)
driver.close()
except Exception as e:
driver.close()
| [
"huseyin@hayalmahsulleri.com.tr"
] | huseyin@hayalmahsulleri.com.tr |
2d8ac059bbfc47157c68170ee0adcfcc597a30df | d6c7a20e0286d389ba17aef2b22931bd5d294dd0 | /postgres_demo.py | a722c97c42594c3b784794e756dfd057802fee71 | [] | no_license | bashooff/databases_demo | d1ebfde67f418324f45cf11922ce4395f96f9028 | 281cd00254451519e989bd076ed70bf4fe3cacde | refs/heads/main | 2023-04-05T22:02:20.629234 | 2021-05-05T08:51:35 | 2021-05-05T08:51:35 | 364,515,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | import psycopg2
def create_table():
# Connect to a database
conn = psycopg2.connect("dbname='database1' user='postgres' password='postgres1234' host='localhost' port='5432' ")
# Create a cursor object
cur = conn.cursor()
# Write SQL query
cur.execute("CREATE TABLE IF NOT EXISTS store (item TEXT, quantity INTEGER, price REAL)")
# Commit changes
conn.commit()
# Close connection
conn.close()
def insert(item,quantity,price):
conn = psycopg2.connect("dbname='database1' user='postgres' password='postgres1234' host='localhost' port='5432' ")
cur = conn.cursor()
#cur.execute("INSERT INTO store VALUES('%s', '%s', '%s')" % (item, quantity, price))
cur.execute("INSERT INTO store VALUES(%s, %s, %s)", (item, quantity, price))
conn.commit()
conn.close()
def view():
conn = psycopg2.connect("dbname='database1' user='postgres' password='postgres1234' host='localhost' port='5432' ")
cur = conn.cursor()
cur.execute("SELECT * FROM store")
rows = cur.fetchall()
conn.close()
return rows
def delete(item):
conn = psycopg2.connect("dbname='database1' user='postgres' password='postgres1234' host='localhost' port='5432' ")
cur = conn.cursor()
cur.execute("DELETE FROM store WHERE item=%s", (item,))
conn.commit()
conn.close()
def update(quantity, price, item):
conn = psycopg2.connect("dbname='database1' user='postgres' password='postgres1234' host='localhost' port='5432' ")
cur = conn.cursor()
cur.execute("UPDATE store SET quantity=%s, price=%s WHERE item=%s", (quantity, price, item))
conn.commit()
conn.close()
create_table()
#insert("Orange", 10, 15)
#delete("Orange")
update(20, 9, "Apple")
print(view()) | [
"bashooff@hotmail.com"
] | bashooff@hotmail.com |
d5b6777d7162805424e48a672f5d020c4bd445be | d7d22ea566bd8e97f7bfe956118ad3be4edb9d2f | /game/entity/actor.py | 355a0a98d7f1aa4005adc119a411a9821d482da1 | [] | no_license | philax/gourd_of_the_lings | c02ef1db5467306c7994b20280c7c140ea65280f | 8bdf4c0c410e39e4ac531906aacbb98d5122c632 | refs/heads/master | 2021-01-10T05:35:45.802300 | 2015-12-17T22:35:22 | 2015-12-17T22:35:22 | 48,065,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | class Actor(object):
def __init__(self, name, base_hp, base_dmg, inventory=[]):
self.base_hp = base_hp
self.hp = base_hp
self.name = name
# if(stats == None):
# self.stats['HP'] = 10
# self.stats['STR'] = 5
# self.stats['AGI'] = 5
# self.stats['DEX'] = 5
def gain_hp(self, heal):
self.hp += heal
def lose_hp(self, dmg):
self.hp -= dmg
self.is_alive()
def get_hp(self):
print "%s has %s HP."% (self.name, self.hp)
def is_alive(self, is_quiet=False):
# print "DEBUG: Actor death check: %s has %s hp!"% (self.name, str(self.hp))
if self.hp > 0:
if is_quiet == False:
self.get_hp()
return True
elif self.hp <= 0:
if is_quiet == False: #hackey
print "%s has been defeated!"% (self.name)
return False | [
"plaks@turbine.com"
] | plaks@turbine.com |
66a53d2b71a6f7420dc7a93f85b662fb08440da4 | f6a24e544fe48cb13fa38fcde11ce9b57d119eba | /api.py | 7e4075258107e12abd359e2ec3feba410554f86c | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | postsai/postsai | b8afeb44d87ff16cd0526ba593f70446751affc6 | c2b8363c34ff28a8c54b04548ff1c72c8a98e2c3 | refs/heads/master | 2023-08-16T16:50:30.671405 | 2023-08-10T15:08:17 | 2023-08-10T15:08:17 | 52,112,371 | 5 | 5 | NOASSERTION | 2023-09-14T14:19:35 | 2016-02-19T19:49:22 | Python | UTF-8 | Python | false | false | 1,985 | py | #! /usr/bin/python3
# The MIT License (MIT)
# Copyright (c) 2016-2021 Postsai
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import cgi
import json
import sys
from os import environ
import config
from backend.cvs import PostsaiCommitViewer
from backend.query import Postsai
from backend.importer import PostsaiImporter
if __name__ == "__main__":
if "REQUEST_METHOD" in environ and environ['REQUEST_METHOD'] == "POST":
data = sys.stdin.read()
parsed = None
try:
parsed = json.loads(data, strict=False)
except UnicodeDecodeError:
data = data.decode("iso-8859-15").encode("utf-8")
parsed = json.loads(data, strict=False)
PostsaiImporter(vars(config), parsed).import_from_webhook()
else:
form = cgi.FieldStorage()
if form.getfirst("method", "") == "commit":
PostsaiCommitViewer(vars(config)).process()
else:
Postsai(vars(config)).process()
| [
"nhnb@users.sourceforge.net"
] | nhnb@users.sourceforge.net |
341107c12104363f5af83709a82a18422f87fb29 | 81246c8049ebf8d58e9614f4f062ec4dc0a2bd8b | /venv/Scripts/django-admin.py | 62609559dbfa35dd7608610ad723d1334c72616d | [] | no_license | nikakuzina/django | cf68e43f9779c5ee19ebfc95c4173c31338c0bec | afb2c4fe6f6e72ecad9653e72758350989e794e7 | refs/heads/main | 2023-01-31T15:07:37.111973 | 2020-12-07T22:20:18 | 2020-12-07T22:20:18 | 319,457,660 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | #!C:\Users\Admin\PycharmProjects\djangonika\venv\Scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"stasciobanu777@gmail.com"
] | stasciobanu777@gmail.com |
0bd4bb83349048366f08e67ed891cd7688d8efe5 | 9de52a33dfa175b3f4994658ad6a2261f045c8b0 | /难点积累/通过颜色特征直方图与滑动窗口比较图片相似性.py | c5f30321927b646fe0c3e45f26744c413506b9df | [] | no_license | infcnwangjie/opencv | 64140cebc64cacc9fe078bb266ee6569ba62bc0f | ea18ef4e9c514f703ed8fdd83b0f5d74069e1d90 | refs/heads/master | 2022-12-14T23:01:21.642076 | 2020-11-04T08:01:54 | 2020-11-04T08:01:54 | 133,104,182 | 0 | 0 | null | 2022-12-08T11:40:53 | 2018-05-12T01:38:08 | Python | UTF-8 | Python | false | false | 2,060 | py | # -*- coding: utf-8 -*-
import cv2
#https://baijiahao.baidu.com/s?id=1615404760897105428&wfr=spider&for=pc
def color_similar_ratio(image1, image2):
if image1 is None or image2 is None:
return 0
img1 = cv2.cvtColor(image1, cv2.COLOR_BGR2HSV)
img2 = cv2.cvtColor(image2, cv2.COLOR_BGR2HSV)
hist1 = cv2.calcHist([img1], [0, 1], None, [180, 256], [0, 180, 0, 255.0])
cv2.normalize(hist1, hist1, 0, 255, cv2.NORM_MINMAX) # 规划到0-255之间
# cv2.imshow("hist1",hist1)
hist2 = cv2.calcHist([img2], [0, 1], None, [180, 256], [0, 180, 0, 255.0])
cv2.normalize(hist2, hist2, 0, 255, cv2.NORM_MINMAX) # 规划到0-255之间
degree = cv2.compareHist(hist1, hist2, cv2.HISTCMP_CORREL) # HISTCMP_BHATTACHARYYA HISTCMP_CORREL
print(degree)
# if degree > 0.56:
# backproject = cv2.calcBackProject([img2], [0, 1], hist1, [0, 180, 0, 255.0], 1)
# cv2.imshow("backproject", backproject)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return degree
def slide():
img = cv2.imread("D:/2020-04-10-15-26-22test.bmp")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rows, cols = gray.shape
for row in range(0,rows):
for col in range(502, 612):
# print("-" * 1000)
yield (col, row, img[row:row + 80, col:col + 80])
# for col in range(2619, 2743):
# print("-" * 1000)
# yield (col, row, img[row:row + 80, col:col + 80])
def my_testslide():
roi_red_img=cv2.imread("D:/roi_red.png")
for col,row,img in slide():
# print("+"*100)
# print("rows:{},cols:{}".format(row,col))
roi_red_img=cv2.resize(roi_red_img,(80,80))
similar=color_similar_ratio(roi_red_img,img)
# print("similar:{}".format(similar))
if similar>0.85:
print("find red landmark")
cv2.namedWindow("roi", 0)
cv2.imshow("roi", roi_red_img)
cv2.namedWindow("target")
cv2.imshow("target",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
# image1 = cv2.imread("D:/roi1.png")
# image2 = cv2.imread("D:/target_gy.png")
# i = color_similar_ratio(image1, image2)
# print("color,相似度为:{}".format(i))
my_testslide()
| [
"wangjie_rj@163.com"
] | wangjie_rj@163.com |
4ab262e191693747daa93149c342a0572400a678 | 0255c635cd5aba99c7dafb3f34757fc4761ecbb7 | /annotation_pipeline/mturk_backend/hit_results.py | b7381b2b864259150bd21a56e92d852280a9fcc7 | [
"Apache-2.0",
"MIT"
] | permissive | tomerwolgithub/Break | 021a6ecfd780fbcd0556dbdc8dcd6a2f2fe16115 | e7106929b9b7cca069e5d33c894d0eec10ef538f | refs/heads/master | 2022-12-17T14:31:34.528576 | 2021-10-30T08:56:09 | 2021-10-30T08:56:09 | 233,827,307 | 46 | 13 | MIT | 2022-12-08T03:40:20 | 2020-01-14T11:35:00 | JavaScript | UTF-8 | Python | false | false | 1,537 | py | """HIT results data structure"""
import json
class HITResults:
def __init__(self, hit_id, assignment_id, worker_id, submit_time):
self.hit_id = hit_id
self.assignment_id = assignment_id
self.worker_id = worker_id
# submit_time is a datetime object which we convert to a string
self.submit_time = submit_time.strftime('%Y-%m-%d %H:%M:%S.%f')
self.accepted = None
self.type = None
def accept(self):
self.accepted = True
def reject(self):
self.accepted = False
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__)
class GenerationResults(HITResults):
def __init__(self, hit_id, assignment_id, worker_id, submit_time, decomposition):
super().__init__(hit_id, assignment_id, worker_id, submit_time)
self.decomposition = decomposition
self.type = 'gen'
self.manually_validated = None
self.valid_annotation = None
def validate(self, manual_validation_result):
self.manually_validated = True
self.valid_annotation = manual_validation_result
class ValidationResults(HITResults):
def __init__(self, hit_id, assignment_id, worker_id, submit_time, annotation_validation, bank_validation):
super().__init__(hit_id, assignment_id, worker_id, submit_time)
self.annotation_validation = annotation_validation
self.bank_validation = bank_validation
self.type = 'val' | [
"noreply@github.com"
] | tomerwolgithub.noreply@github.com |
610ac8671393a3cc93c8ac2f5fb7cbe982e9e849 | 96090102d5e87f1771ba5a90f7b676f4ccb0afa6 | /src/profiles/forms.py | ef4d8adbf95e2f2acf6f725493fe0bef6afcef2b | [] | no_license | rahulsayon/SocialWedding | b4b37ad69b89236784c6fb983ab27b4cd2e4266e | ab96b6a5d381936463065e75f74d0c8ffd3b1907 | refs/heads/master | 2022-12-18T15:08:39.380348 | 2020-09-27T14:49:40 | 2020-09-27T14:49:40 | 299,053,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | from django import forms
from . models import Profile
class ProfileModalForm(forms.ModelForm):
class Meta:
model = Profile
fields = [ 'first_name','last_name','bio','avatar' ] | [
"rahulsayon95@gmail.com"
] | rahulsayon95@gmail.com |
24bb1c93e94a9f3ea07ca4b69095ba78a63c236d | 569c958cf7a13f61ebe1417caee671be5ba4adb4 | /LFUcache-Python/LFUCache.py | a1c98614d68c3c1bb3abb707f57c2efb94946ea7 | [] | no_license | Dechuan0629/LeetCodePractice | 932c079aff4cc1ef01d3a57b2a3d70389c1c81e3 | 8e801a667617bc76854f02dbe2fcd068d448fa39 | refs/heads/master | 2022-12-26T02:37:00.247683 | 2022-12-15T09:29:17 | 2022-12-15T09:29:17 | 246,292,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,401 | py | import time
class LFUCache:
def __init__(self, capacity: int):
self.capacity = capacity
self.current_capacity = 0
self.cache = {}
def get(self, key: int) -> int:
try:
ans = self.cache[str(key)][0]
self.cache[str(key)][1]+=1
self.cache[str(key)][2] = round(time.time() * 1000000)
except KeyError:
return -1
else:
return ans
def put(self, key: int, value: int) -> None: #本可以一次ac,没考虑到put相同的key时,也会占用操作次数
try:
if self.current_capacity < self.capacity: #每一个键值对保存的形式为key:[value,次数,时间戳]
if str(key) in self.cache:
self.cache[str(key)][0] = value
self.cache[str(key)][1] += 1
self.cache[str(key)][2] = round(time.time() * 1000000) #时间戳精准到微秒
else:
self.cache.update({str(key):[value,0,round(time.time() * 1000000)]})
self.current_capacity+=1
else:
if str(key) in self.cache:
self.cache[str(key)][0] = value
self.cache[str(key)][1] += 1
self.cache[str(key)][2] = round(time.time() * 1000000)
else:
key_min = min(self.cache.keys(), key=(lambda k: self.cache[k][1:])) #利用min函数找出次数最少的,优先级为 操作次数 > 操作时间
del self.cache[key_min] #如果次数相同,会按照时间戳判断,时间戳小的,说明最近没有使用,因此会删除
self.cache.update({str(key): [value, 0, round(time.time() * 1000000)]})
except ValueError:
return -1
def main():
capacity = input('input the cache capacity:')
cache = LFUCache(int(capacity))
cache.put(2,1)
cache.put(2,2)
cache.get(2)
while True:
op = input('input operator:')
if op == 'put':
key,value = map(int,input('input key,value:').split(','))
cache.put(key,value)
elif op == 'get':
key = input('input key:')
print(cache.get(int(key)))
else:
break
if __name__ == '__main__':
main() | [
"610459802@qq.com"
] | 610459802@qq.com |
6310e23a458f6e5fc6663a32b235b73bc2454352 | 0da6893c3d64c5653e33b1330e7ea02975e6138b | /Flame_Sensor_Publish.py | 454917a337e2c4222609ddcdb705913bd80d80b5 | [] | no_license | AdarshKBulusu/projectWildfire_Alert-Detection | 6faf6a5b39846d1fd2cdbccf44868d2b23aba1b1 | 2a698a29899aae0953ec4feb95c9b964921038d2 | refs/heads/master | 2022-11-20T00:23:12.646675 | 2020-07-22T01:49:29 | 2020-07-22T01:49:29 | 281,527,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | import socket
host = '10.0.0.251.' #127.0.0.1
port = 50008
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(1)
print("hello starting program")
conn, addr = s.accept()
print ("Connection from", addr)
while True:
data = conn.recv(1024)
if not data: break
print("Recieved: "+str(data)) #del str
conn.send("I am server")
response = input("Reply: ")
jim=response.encode()
if response == "exit":
break
conn.sendall(jim)
conn.close()
| [
"noreply@github.com"
] | AdarshKBulusu.noreply@github.com |
ae8c74c7b7f7206875a2b5b26902c89d55155d71 | d6362b0d78433efb7b6f0a59ed427bf58d5bad47 | /python_basics/multiprocessing/other_tests/pool_map.py | ec8073e05d2559b0a26b9e75a30f10d1c6c6b3b5 | [] | no_license | davidbp/python_tutorials | 27c78791ad1ea041d543a8cd5b48a6f58bca700d | 5a7bc14a967f18d6820b39b152fc8219c95d3f75 | refs/heads/master | 2023-07-13T11:26:21.374596 | 2023-07-01T21:19:55 | 2023-07-01T21:19:55 | 104,083,908 | 14 | 4 | null | 2022-06-21T22:37:29 | 2017-09-19T14:13:34 | Jupyter Notebook | UTF-8 | Python | false | false | 526 | py |
from random import random
from time import sleep
from multiprocessing.pool import Pool
def multi_run_wrapper(args):
return task(*args)
def task(identifier, id):
value = random()
print(f'Task {identifier} executing with {id}', flush=True)
sleep(1)
return value
# protect the entry point
if __name__ == '__main__':
n_examples = 1000
chunksize = 100
with Pool(10) as pool:
pool.map(multi_run_wrapper, ((x,y) for x,y in zip(range(n_examples),range(n_examples))) , chunksize=chunksize)
| [
"davidbuchaca@gmail.com"
] | davidbuchaca@gmail.com |
8dfafbc28a614569c623e5f7c6693a7448952581 | b48a447030cd1afd7c38f765eb21448ff87c7b2f | /app/app/test.py | c767945514566650545e409e3378eeeac378c435 | [] | no_license | joshuagato/django_rest_framework_docker_setup | 87bba4292d708bc33340c07eec08bf2b00917bb6 | ea5f69654616993de91f4993a216a195b1f64657 | refs/heads/master | 2022-12-13T20:15:59.224836 | 2020-09-19T16:36:14 | 2020-09-19T16:36:14 | 295,513,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | from django.test import TestCase
from .calc import add, subtract
class CalcTests(TestCase):
def test_add_numbers(self):
"""Test that two numbers are added together"""
self.assertEqual(add(4, 6), 10)
def test_subtract_numbers(self):
"""Test that values are subtracted and returned"""
self.assertEqual(subtract(2, 7), 5)
| [
"joshuagato37@gmail.com"
] | joshuagato37@gmail.com |
bd5146467d282b22af61c534fd0e9f2dd2af0c1e | eeee85ce45554dc328776fa5b6f24c894c86a098 | /bikeshed/app/admin.py | 034d9f6b95426c879ccfcfc4d05eaba3613feeea | [] | no_license | varavan/bikeshed-test | 646b0fc4dcd452dd398c46fe75ad2154009f853e | 39b93c340362e02ebf06290cd2c0b214696579c8 | refs/heads/master | 2021-01-11T18:54:58.106643 | 2017-01-22T18:12:48 | 2017-01-22T18:12:48 | 79,654,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | # coding=utf-8
from django.contrib import admin
from .models import Brand
admin.site.register(Brand)
| [
"ivan.ruiz.delatorre@gmail.com"
] | ivan.ruiz.delatorre@gmail.com |
ffd4ff39507434f06cbbc5a0767aeadf66cdf5a4 | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/idea/party/issue/line_friend/group_lot_guy_lombok_kind/door/oauth.py | 555d4e970019c6d7f81128a63b321c2efb7bdedb | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,672 | py | const request = require('request')
const uuidv4 = require('uuid/v4')
const { LimitReachedError } = require('./errors')
const ITEMS_IN_REQUEST_LIMIT = 25
const REQUEST_CHAR_LIMIT = 5000
const CHAR_PER_HOUR_LIMIT = 2000000
// const subscriptionKey = process.env.TRANSLATOR_TEXT_KEY;
// if (!subscriptionKey) {
// throw new Error('Environment variable for your subscription key is not set.')
// }
const subscriptionKey = 'a674785ff843a278a87995ef4ee1659b'
function MicrosoftTranslator () {}
MicrosoftTranslator.prototype.translate = function (strings, targetLang) {
console.log(`Microsoft: translating ${strings.length} strings to ${targetLang}...`)
let options = {
method: 'POST',
baseUrl: 'https://api.cognitive.microsofttranslator.com/',
url: 'translate',
qs: {
'api-version': '3.0',
to: targetLang
},
headers: {
'5bb321a1b738949e8bace956a490028a': subscriptionKey,
'Content-type': 'application/json',
'X-ClientTraceId': uuidv4().toString()
},
body: strings.map(str => ({ text: str })),
json: true
}
return new Promise((resolve, reject) => {
request(options, (err, res, body) => {
if (err) {
reject(err)
return
}
if (body.error) {
console.log('body', body)
if (body.error.code === 400077) {
reject(new LimitReachedError('Microsoft', 'Maximum request size'))
} else if (body.error.code === 403001) {
reject(new LimitReachedError('Microsoft', 'Quota per hour'))
} else {
reject(new Error(body.error.message))
}
} else {
let translations = body
.reduce((accum, item) => accum.concat(item.translations), [])
.map(i => i.text)
resolve(translations)
}
})
}).then(translations => {
console.log(`Microsoft: Translation succeed. Got ${translations.length} translations.`)
return translations
})
}
MicrosoftTranslator.prototype.getRequestLimit = function () {
return REQUEST_CHAR_LIMIT
}
MicrosoftTranslator.prototype.getRequestItemsCountLimit = function () {
return ITEMS_IN_REQUEST_LIMIT
}
MicrosoftTranslator.prototype.getMaxLimit = function () {
return CHAR_PER_HOUR_LIMIT
}
module.exports = MicrosoftTranslator
// new MicrosoftTranslator()
// .translate([(new Array(5001)).join('a'), 'b'], 'ru')
// .then(translations => console.log('Result', translations))
// .catch(err => console.error(err))
/*
* Limits: https://docs.microsoft.com/en-us/azure/cognitive-services/translator/request-limits
* https://docs.microsoft.com/en-us/azure/cognitive-services/translator/reference/v3-0-translate?tabs=curl
* */
| [
"soric.matko@gmail.com"
] | soric.matko@gmail.com |
7094d4bbe7a500eb46faa9fac35c316ada1389af | 77fc5af96da1d461c86c7f9668b64b99ca04a1b6 | /codes/horner.py | 4458f960d38c57f60ba6940082b190afccdbd331 | [] | no_license | rene-d/edupython | 5b6bc8ddb5eb8ec896ee70fb961d4e689af1075a | 1261d0c7aae17bb2d4ff3370860768b73ba4172d | refs/heads/master | 2020-11-24T10:07:18.504472 | 2019-12-21T21:03:08 | 2019-12-21T21:03:08 | 228,099,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | # Méthode de Hörner
# https://edupython.tuxfamily.org/sources/view.php?code=horner
# Créé par IANTE, le 12/07/2011
from lycee import *
P=liste_demande('entrez les coefficients de P(x) par ordre des puissances croissantes')
r=demande('Entrez une racine évidente')
Q=[0]*(len(P)-1)
v=0
for d in range(len(P)-2,-1,-1):
v=P[d+1]+r*v
Q[d]=v
print (affiche_poly(P)+'=('+affiche_poly([-r,1])+')('+affiche_poly(Q)+')')
| [
"rene.devichi@gmail.com"
] | rene.devichi@gmail.com |
f4ece548db005dd63655e9189b41b5c461dedea0 | 2ad4b5b73b050f01f4952bd95806f3ff316fbfa4 | /Leetcode/python/sol_25(2).py | e13645131ecae05cbe6f2bafd5435c575cb5a201 | [] | no_license | Clement25/Leetcode-Solution-and-Algorithm | 5d44f4099b8fb7b81fa497cc08161e16e70285b0 | c80fd1dee21209abcbaa1fb09412cd7f2de7b586 | refs/heads/master | 2021-09-08T15:01:44.661219 | 2020-11-06T07:05:16 | 2020-11-06T07:05:16 | 178,236,299 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | def threesumclosest(nums, target):
'''
type nums:List[int]
type target:int
rtype:int
'''
mind = 100000
nums.sort()
for i, v in enumerate(nums[:-2]):
ntgt1 = target - v
for j, u in enumerate(nums[i + 1:-1]):
ntgt2 = ntgt1 - u
if ntgt2 in nums[i + j + 2:]:
return target
for k, w in enumerate(nums[i + j + 2:]):
if w > ntgt2:
break
l = i + j + k + 1
dis1 = abs(w - ntgt2)
dis2 = abs(ntgt2 - nums[l]) if k != 0 else dis1
dis = min(dis1, dis2)
if dis < mind:
mind = dis
res = u + v + w if dis1 <= dis2 else u + v + nums[l]
return res
#print(threesumclosest([-55, -24, -18, -11, -7, -3, 4, 5, 6, 9, 11, 23, 33], 0))
#print(threesumclosest([-1,2,1,-4], 1))
#print(threesumclosest([-1,0,1,1,55], 3))
print(
threesumclosest([
13, 2, 0, -14, -20, 19, 8, -5, -13, -3, 20, 15, 20, 5, 13, 14, -17, -7,
12, -6, 0, 20, -19, -1, -15, -2, 8, -2, -9, 13, 0, -3, -18, -9, -9,
-19, 17, -14, -19, -4, -16, 2, 0, 9, 5, -7, -4, 20, 18, 9, 0, 12, -1,
10, -17, -11, 16, -13, -14, -3, 0, 2, -18, 2, 8, 20, -15, 3, -13, -12,
-2, -19, 11, 11, -10, 1, 1, -10, -2, 12, 0, 17, -19, -7, 8, -19, -17,
5, -5, -10, 8, 0, -12, 4, 19, 2, 0, 12, 14, -9, 15, 7, 0, -16, -5, 16,
-12, 0, 2, -16, 14, 18, 12, 13, 5, 0, 5, 6
], -59))
| [
"35480362+Clement25@users.noreply.github.com"
] | 35480362+Clement25@users.noreply.github.com |
1ac9e3f7b26aa042692590d17c2b31da13a46806 | 3549dd941a47e359fa3ab17f2f2ab1968fb928e4 | /dataset/mini_imagenet.py | 977d67fbdfa3631520e6fc5a48353ed196f061d1 | [] | no_license | Juncheng-Dong/FSL | 74628d414a863091cfcc5627ed5dc2a54a7f2611 | 1273175c6cfc14a27dcc6d7e5b682f950b45b4ed | refs/heads/main | 2023-08-01T03:49:01.978935 | 2021-09-14T15:48:33 | 2021-09-14T15:48:33 | 406,255,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,283 | py | import os
import pickle
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
class ImageNet(Dataset):
def __init__(self, args, partition='train', pretrain=True, is_sample=False, k=4096,
transform=None):
super(Dataset, self).__init__()
self.data_root = args.data_root
self.partition = partition
self.data_aug = args.data_aug
self.mean = [120.39586422 / 255.0, 115.59361427 / 255.0, 104.54012653 / 255.0]
self.std = [70.68188272 / 255.0, 68.27635443 / 255.0, 72.54505529 / 255.0]
self.normalize = transforms.Normalize(mean=self.mean, std=self.std)
self.pretrain = pretrain
if transform is None:
if self.partition == 'train' and self.data_aug:
self.transform = transforms.Compose([
lambda x: Image.fromarray(x),
lambda x: np.array(x),
transforms.RandomCrop(84, padding=8),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
lambda x: np.array(x),
transforms.ToTensor(),
self.normalize
])
else:
self.transform = transforms.Compose([
lambda x: Image.fromarray(x),
lambda x: np.array(x),
transforms.ToTensor(),
self.normalize
])
else:
self.transform = transform
if self.pretrain:
self.file_pattern = 'miniImageNet_category_split_train_phase_%s.pickle'
else:
self.file_pattern = 'miniImageNet_category_split_%s.pickle'
self.data = {}
with open(os.path.join(self.data_root, self.file_pattern % partition), 'rb') as f:
data = pickle.load(f, encoding='latin1')
self.imgs = data['data']
self.labels = data['labels']
# pre-process for contrastive sampling
self.k = k
self.is_sample = is_sample
if self.is_sample:
self.labels = np.asarray(self.labels)
self.labels = self.labels - np.min(self.labels)
num_classes = np.max(self.labels) + 1
self.cls_positive = [[] for _ in range(num_classes)]
for i in range(len(self.imgs)):
self.cls_positive[self.labels[i]].append(i)
self.cls_negative = [[] for _ in range(num_classes)]
for i in range(num_classes):
for j in range(num_classes):
if j == i:
continue
self.cls_negative[i].extend(self.cls_positive[j])
self.cls_positive = [np.asarray(self.cls_positive[i]) for i in range(num_classes)]
self.cls_negative = [np.asarray(self.cls_negative[i]) for i in range(num_classes)]
self.cls_positive = np.asarray(self.cls_positive)
self.cls_negative = np.asarray(self.cls_negative)
def __getitem__(self, item):
img = np.asarray(self.imgs[item]).astype('uint8')
img = self.transform(img)
target = self.labels[item] - min(self.labels)
if not self.is_sample:
return img, target, item
else:
pos_idx = item
replace = True if self.k > len(self.cls_negative[target]) else False
neg_idx = np.random.choice(self.cls_negative[target], self.k, replace=replace)
sample_idx = np.hstack((np.asarray([pos_idx]), neg_idx))
return img, target, item, sample_idx
def __len__(self):
return len(self.labels)
class MetaImageNet(ImageNet):
def __init__(self, args, partition='train', train_transform=None, test_transform=None, fix_seed=True):
super(MetaImageNet, self).__init__(args, partition, False)
self.fix_seed = fix_seed
self.n_ways = args.n_ways
self.n_shots = args.n_shots
self.n_queries = args.n_queries
self.classes = list(self.data.keys())
self.n_test_runs = args.n_test_runs
self.n_aug_support_samples = args.n_aug_support_samples
if train_transform is None:
self.train_transform = transforms.Compose([
lambda x: Image.fromarray(x),
lambda x: np.array(x),
transforms.RandomCrop(84, padding=8),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
lambda x: np.array(x),
transforms.ToTensor(),
self.normalize
])
else:
self.train_transform = train_transform
if test_transform is None:
self.test_transform = transforms.Compose([
lambda x: Image.fromarray(x),
lambda x: np.array(x),
transforms.ToTensor(),
lambda x: np.array(x),
self.normalize
])
else:
self.test_transform = test_transform
self.data = {}
for idx in range(self.imgs.shape[0]):
if self.labels[idx] not in self.data:
self.data[self.labels[idx]] = []
self.data[self.labels[idx]].append(self.imgs[idx])
self.classes = list(self.data.keys())
def __getitem__(self, item):
if self.fix_seed:
np.random.seed(item)
cls_sampled = np.random.choice(self.classes, self.n_ways, False)
support_xs = []
support_ys = []
query_xs = []
query_ys = []
for idx, cls in enumerate(cls_sampled):
imgs = np.asarray(self.data[cls]).astype('uint8')
support_xs_ids_sampled = np.random.choice(range(imgs.shape[0]), self.n_shots, False)
support_xs.append(imgs[support_xs_ids_sampled])
support_ys.append([idx] * self.n_shots)
query_xs_ids = np.setxor1d(np.arange(imgs.shape[0]), support_xs_ids_sampled)
query_xs_ids = np.random.choice(query_xs_ids, self.n_queries, False)
query_xs.append(imgs[query_xs_ids])
query_ys.append([idx] * query_xs_ids.shape[0])
support_xs, support_ys, query_xs, query_ys = np.array(support_xs), np.array(support_ys), np.array(
query_xs), np.array(query_ys)
num_ways, n_queries_per_way, height, width, channel = query_xs.shape
query_xs = query_xs.reshape((num_ways * n_queries_per_way, height, width, channel))
query_ys = query_ys.reshape((num_ways * n_queries_per_way, ))
support_xs = support_xs.reshape((-1, height, width, channel))
if self.n_aug_support_samples > 1:
support_xs = np.tile(support_xs, (self.n_aug_support_samples, 1, 1, 1))
support_ys = np.tile(support_ys.reshape((-1, )), (self.n_aug_support_samples))
support_xs = np.split(support_xs, support_xs.shape[0], axis=0)
query_xs = query_xs.reshape((-1, height, width, channel))
query_xs = np.split(query_xs, query_xs.shape[0], axis=0)
support_xs = torch.stack(list(map(lambda x: self.train_transform(x.squeeze()), support_xs)))
query_xs = torch.stack(list(map(lambda x: self.test_transform(x.squeeze()), query_xs)))
return support_xs, support_ys, query_xs, query_ys
def __len__(self):
return self.n_test_runs
if __name__ == '__main__':
args = lambda x: None
args.n_ways = 5
args.n_shots = 1
args.n_queries = 12
args.data_root = 'data'
args.data_aug = True
args.n_test_runs = 5
args.n_aug_support_samples = 1
imagenet = ImageNet(args, 'val')
print(len(imagenet))
print(imagenet.__getitem__(500)[0].shape)
metaimagenet = MetaImageNet(args)
print(len(metaimagenet))
print(metaimagenet.__getitem__(500)[0].size())
print(metaimagenet.__getitem__(500)[1].shape)
print(metaimagenet.__getitem__(500)[2].size())
print(metaimagenet.__getitem__(500)[3].shape)
| [
"jd420@research-tarokhlab-10.oit.duke.edu"
] | jd420@research-tarokhlab-10.oit.duke.edu |
39d5e277eb935eee8876c1af0b0557edcf5f6b91 | 146012dda21ab72badad6daa8f98e6b26fedb128 | /13day/9-名片系统.py | c647a413c352cb726036cb58e94329648c26b284 | [] | no_license | fengshuai1/1805 | 41786c3561beca580ba82d9e9d4347571e38e198 | 8dc3e6605cc1d6f91685ae45bfebfc062f0aa489 | refs/heads/master | 2020-03-19T07:41:40.608389 | 2018-06-28T01:45:43 | 2018-06-28T01:45:43 | 136,140,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,124 | py | list = []#存放名字
print("名片管理系统".center(50,"*"))
while True:
print("1:添加名片".center(50," "))
print("2:查找名片".center(50," "))
print("3:修改名片".center(50," "))
print("4:删除名片".center(50," "))
print("5:打印名片".center(50," "))
num = int(input("请选择功能"))
if num == 1:
d = {}#空字典
while True:
name = input("请输入要添加的名字")
if len(name) > 4:
print("太长,请重新输入")
continue
job = input("请输入要添加的职位")
if len(job) > 4:
print("太长,请重新输入")
continue
phone = input("请输入手机号")
if len(phone) != 11 or phone.startswith("1") == False:
print("手机号输入有误,请重新输入")
continue
d["name"] = name
d["job"] = job
d["phone"] = phone
#添加到列表
list.append(d)
print("添加成功")
break
elif num == 2:
name = input("请输入要查找的姓名")
flag = False#假设没有咱们要找的人
for i in list:
if name == i["name"]:
print("姓名:%s\n职位:%s\n电话:%s"%(i["name"],i["job"],i["phone"]))
flag = True#找到了
break
if flag == False:
print("查无此人")
elif num == 3:
#要改之前,你得先查到你要找的那个
name = input("请输入你要改的人的姓名")
flag = False
for i in list:
if name == i["name"]:
print("1:修改名字")
print("2:修改职位")
print("3:修改电话")
num_1 = int(input("请选择功能"))
if num_1 == 1:
new_name = input("请输入新的名字")
i["name"] = new_name
elif num_1 == 2:
new_job = input("请输入新的职位")
i["job"] = new_job
elif num_1 == 3:
new_phone = input("请输入新的电话")
i["phone"] = new_phone
flag = True
break
if flag == False:
print("查无此人")
elif num == 4:
name = input("请输入你要删除的名字")
flag = False
for position,i in enumerate(list):#把索引遍历出来
if name == i["name"]:
flag = True#找到了
print("1:确认删除")
print("2:取消删除")
num_2 = int(input("请选择序号"))
if num_2 == 1:
list.pop(position)#直接删除
break
if flag == False:
print("查无此人")
elif num == 5:#打印名片
print("名字\t职位\t电话")
for i in list:
print(" "+i["name"]+"\t "+i["job"]+"\t "+i["phone"])
| [
"1329008013@qq.com"
] | 1329008013@qq.com |
4eff0fd515d88525ed27929659bd4f5416c8a937 | 0181ec7a90e4e5955c88751f7ef7ab0dbbb21203 | /parser.py | cc5701698b239f2659b835cfd3032656d4c417a0 | [] | no_license | rizel/gyle | fa7d490fc587bddc0688bd02ff2cd25e4e4504b3 | e32ad8c7ba8b5c011f25c837f0bb378d9db110bf | refs/heads/master | 2023-06-13T08:11:22.627707 | 2021-07-11T22:56:33 | 2021-07-11T22:56:33 | 383,124,195 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | import json
from pprint import pprint
from configuration import OUTPUT_FILE, INTERESTING_JOB_TITLES
from global_variables import *
def parse_informational_data():
global skipped_employees, processed_employees
global argyle_countries, argyle_languages, argyle_interesting_facts
json_content = None
with open(OUTPUT_FILE, 'r') as file:
file_string = file.read()
# loading the stringified json
stringified_json = json.loads(file_string)
# loading as a json finally.
json_content = json.loads(stringified_json)
# pprint(json_content)
employees_data = json_content['props']['pageProps']['employees']
# print(len(employees_data))
# pprint(employees_data)
for employee in employees_data:
try:
country = employee['Country']
languages = employee['Languages']
job_title = employee['Job Title']
argyle_interesting_facts.append(employee['Interesting Fact'])
# country processing
if country in argyle_countries:
argyle_countries[country] +=1
else:
argyle_countries[country] = 1
# job title processing
if job_title in argyle_job_titles:
argyle_job_titles[job_title] +=1
else:
argyle_job_titles[job_title] = 1
# select people
if job_title in INTERESTING_JOB_TITLES:
interesting_people[job_title].append(employee)
# languages processing
for language in languages:
if language in argyle_languages:
argyle_languages[language] +=1
else:
argyle_languages[language] = 1
processed_employees+= 1
except Exception as e:
print(e) | [
"rizelita@gmail.com"
] | rizelita@gmail.com |
265c1a982e59086f60095200d42064955cf9ed66 | f7d22242393632528f866e4cb4d08ba83550f865 | /Part 11 - XGBoost/xgboost.py | fda346b0b846cb6f0b57ae30026753a18634b205 | [] | no_license | someshjaishwal/Machine-Learning | 5ecc71685e4230f5a031b9368a89fcd34bf94568 | 3f66e282501a87131a364267486f4427bf3fab0b | refs/heads/master | 2020-03-22T05:42:46.492676 | 2018-07-03T13:12:50 | 2018-07-03T13:12:50 | 139,585,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,855 | py | # -*- coding: utf-8 -*-
# extreme gradient boosting
# basic libararies
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:,3:13].values
y = dataset.iloc[:,13].values
### PART 1 - Preprocessing Dataset
# encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
label_x_1 = LabelEncoder()
X[:,1]= label_x_1.fit_transform(X[:,1])
label_x_2 = LabelEncoder()
X[:,2] = label_x_2.fit_transform(X[:,2])
ohen = OneHotEncoder(categorical_features = [1])
X = ohen.fit_transform(X).toarray()
X = X[:,1:]
# splitting training and test set
from sklearn.cross_validation import train_test_split
X_train, X_text, y_train, y_test = train_test_split(X, y, test_size = 0.2,
random_state = 42)
"""
# no need of feature scaling in xtreme gradient boosting
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_text = sc.transform(X_text)
"""
### PART 2 - fitting xgboost on training set
from xgboost import XGBClassifier
classifier = XGBClassifier()
classifier.fit(X_train,y_train)
### PART 3 - Making predictions and Evaluating the model
# predicting testset results
y_pred = classifier.predict(X_text)
# making confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# evaluating model
accuracy = (cm[0,0]+cm[1,1])*100/(cm[0,0]+cm[0,1]+cm[1,0]+cm[1,1])
print ("accuracy :",accuracy,"%")
# evaluation using k-fold cross validation
from sklearn.cross_validation import cross_val_score
accuracy_vec = cross_val_score(estimator = classifier,
X = X_train, y = y_train, cv = 10)
final_accurcay = accuracy_vec.mean()
std_deviation = accuracy_vec.std()
| [
"noreply@github.com"
] | someshjaishwal.noreply@github.com |
a4a14ef74a6feb9dfff1fce11de617aeefe4c012 | 8f4cb6b34e4a13b0d71756987aa07d22d1e5c399 | /solutions/uri/1029/1029.py | aa872f44a910ef2e08cbb1dcaf8f16351f4eafd5 | [
"MIT"
] | permissive | kaneki-ken01/playground | e688537439d4ef937cfeb3a0be54159c5d47d51b | 1900da4a7b352b1228659631068ff365456408e1 | refs/heads/main | 2023-08-16T21:00:05.823664 | 2021-10-04T19:09:08 | 2021-10-04T19:09:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | fibs = [0] * 40
calls = [0] * 40
def fibonacci(n):
global fibs, calls
if n <= 1:
fibs[n] = n
calls[n] = 0
else:
if fibs[n] != 0:
return fibs[n]
fibs[n] = fibonacci(n - 1) + fibonacci(n - 2)
calls[n] = calls[n - 1] + calls[n - 2] + 2
return fibs[n]
i = int(input())
for _ in range(i):
n = int(input())
fibonacci_result = fibonacci(n)
calls_count = calls[n]
print(f'fib({n}) = {calls_count} calls = {fibonacci_result}')
| [
"deniscostadsc@gmail.com"
] | deniscostadsc@gmail.com |
c441941156bd0808bc93eb34a0c6ef9a076dbaee | 06164402e4a9c46a03d579175e588519dbd4048d | /experiments/experiments_gdsc/cross_validation/vb_nmf/linesearch_xval_vb.py | 013c70a9a0481ff098be2e4b97b6fb3098dc6e91 | [
"Apache-2.0"
] | permissive | XuanHeIIIS/BNMTF | 19547e36466ecee8d45fb0002d305ee6b7ba6c23 | 34df0c3cebc5e67a5e39762b9305b75d73a2a0e0 | refs/heads/master | 2020-03-27T12:47:58.375964 | 2018-06-10T10:22:19 | 2018-06-10T10:22:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | """
Run the cross validation with line search for model selection using VB-NMF on
the Sanger dataset.
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../../"
sys.path.append(project_location)
import numpy, random
from BNMTF.code.models.bnmf_vb_optimised import bnmf_vb_optimised
from BNMTF.code.cross_validation.line_search_cross_validation import LineSearchCrossValidation
from BNMTF.data_drug_sensitivity.gdsc.load_data import load_gdsc
# Settings
standardised = False
iterations = 1000
init_UV = 'random'
K_range = [15,20,25,30]
no_folds = 10
restarts = 1
quality_metric = 'AIC'
output_file = "./results.txt"
alpha, beta = 1., 1.
lambdaU = 1./10.
lambdaV = 1./10.
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
# Load in the Sanger dataset
(_,X_min,M,_,_,_,_) = load_gdsc(standardised=standardised,sep=',')
# Run the cross-validation framework
#random.seed(42)
#numpy.random.seed(9000)
nested_crossval = LineSearchCrossValidation(
classifier=bnmf_vb_optimised,
R=X_min,
M=M,
values_K=K_range,
folds=no_folds,
priors=priors,
init_UV=init_UV,
iterations=iterations,
restarts=restarts,
quality_metric=quality_metric,
file_performance=output_file
)
nested_crossval.run()
"""
all_MSE = [2.2242309355503416, 2.3108126630384804, 2.4095896447817631, 2.2188694213830114, 2.4185938516134278, 2.1808748510586002, 2.2503432196374651, 2.2305023229025145, 2.3595465204422488, 2.2186318302878667]
all_R2 = [0.8123419361488506, 0.8011409466575017, 0.7943028271877304, 0.8125046212085996, 0.7934881370166628, 0.8111969927756486, 0.8058878338360765, 0.811089129626958, 0.798953276136085, 0.8151865445946502]
Average MSE: 2.2821995260695718 +- 0.0066998949966021598
Average R^2: 0.80560922451887629 +- 5.8495363723835686e-05
""" | [
"tab43@cam.ac.uk"
] | tab43@cam.ac.uk |
2cb62f842ce83e502bbe612b698ba73756afc2e2 | a4571dd0b2ebb0f3c50a540355910d502796ea6a | /EX - 79.py | b45ee0bd62eb1934035ac3e1b7cbf14cd3fc653c | [] | no_license | Eduflutter/EXE_python | bf2f3d881e97e75a70505635b483fda642f5f6c7 | acc38dabf3d8b694fbe674c54e283cf55a5578d8 | refs/heads/master | 2023-05-01T14:59:30.839428 | 2020-10-02T17:31:23 | 2020-10-02T17:31:23 | 366,124,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | '''
Crie um progrma onde o usário possa
digitar vários valores numéricos e cadrastre-os em
uma lista.Caso o número já axista lá dentro, ele
não será adicionado.No final, serão exibidos todos
os valores únicos digitados, em ordem crescente.
'''
print('\33c')
print('\33[1;34m-\33[1;33m'*40)
print(f'\33[1;32m{":":<0}\33[m\33[1;32m{"CADRASTRO NUMÈRICOS":^38}\33[m\33[1;32m{":":>0}\33[m')
print('\33[1;34m-\33[m'*40)
valor = []
while True:
n = int(input('Digite O valor: '))
if n not in valor:
valor.append(n)
else:
print('Valor duplicado! não adicionado...')
r = str(input('Deseja continuar?: [S / N] ')).upper()[0]
if r in 'N':
break
print('-'*40)
print(f'O banco de dados dos valores forão: ')
print('-'*40)
print(f'{sorted(valor)}') | [
"eduoliveira3939@gmail.com"
] | eduoliveira3939@gmail.com |
6dd96a9c6db1009bb2305040395a722405b07ba1 | e877a3761f4f6ceefc361eee30844e82ca4155b1 | /testing/models.py | 2a1cae72ade8df75fec16b47e780c9480994b4d6 | [] | no_license | AlexSilva18/StatikA | a24b2d54b63b7038188310fe56adddb3dbba2a8b | 7b5791ff1055d7e4fa25fc249930d5beb2b58a1e | refs/heads/master | 2021-01-01T15:54:25.604198 | 2017-08-10T18:25:54 | 2017-08-10T18:25:54 | 97,729,746 | 0 | 1 | null | 2017-07-27T16:10:16 | 2017-07-19T15:02:14 | HTML | UTF-8 | Python | false | false | 325 | py | from django.db import models
class Testing(models.Model):
title = models.CharField(max_length=255)
test_description = models.TextField(blank=True, default='')
def __str__(self):
return '{}: {}'.format(self.title, self.test_description)
def get_absolute_url(self):
return print("Hello")
| [
"alex.rsilva18@gmail.com"
] | alex.rsilva18@gmail.com |
b6797ce0808d55a048fdb5f8ef31e878e2ee688e | c637f95bb4fdd6fcf5e0ee2b8a7ea59f915ebc13 | /Red Team/cobaltsrike-notif-beacon/telegram.py | 6784631dfb91f0d412b586b7c97998db51fbdd83 | [] | no_license | Crj0b/Pentest | e9bbfcc82122176ad0ae9c27961376b33fe76e94 | a893c3cbd4d34dcb70cb12430dc33558208f3c2b | refs/heads/master | 2023-03-17T10:45:31.035452 | 2019-06-30T18:23:19 | 2019-06-30T18:23:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | #! /usr/bin/env python3
import argparse
import telepot
import socket
chat_id = 'xxx' #userID
bot = telepot.Bot('xxxxxxxx') #token telegram
parser = argparse.ArgumentParser(description='Beacon Info')
parser.add_argument('--computername')
parser.add_argument('--internalip')
parser.add_argument('--username')
args = parser.parse_args()
computername = args.computername
internalip = args.internalip
username = args.username
hostname = socket.gethostname()
message = "Message from "+hostname+" Server\nBeacon succes implant Info Target\nUsername : "+username+"\nIpaddres : "+internalip+"\nComputer name : "+computername+"."
bot.sendMessage(chat_id, message)
| [
"rahmat.hadi@tiket.com"
] | rahmat.hadi@tiket.com |
94849937d8f7341ee3a6c4ea309d665c8ed58ae7 | 8d1351a226efbe70d4bffa7f185e74b9fe49827e | /app/registerData/registerConform.py | 460e328c7178f6ce1647295140a13e2960b3f5b2 | [] | no_license | heojoon/flaskdemo | 3a0afb2594e736b4774ff4c6f6a3c4a4dad73489 | 9fc1fd317e37c35495a9fcf421cc78787ab31229 | refs/heads/main | 2023-04-05T05:46:47.244706 | 2021-04-17T13:04:03 | 2021-04-17T13:04:03 | 357,064,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | # file name : registerConform.py
# pwd : /project_name/app/registerData/registerConform.py
from flask import Blueprint, request, render_template, flash, redirect, url_for
from flask import current_app as app
from app.module import forms
registerData = Blueprint('registerData', __name__, url_prefix='/register')
#@registerData.route('/', methods=['GET','POST'])
#def home():
# return render_template('/registerData/layout.html')
@registerData.route('/', methods=['GET','POST'])
def register():
form = forms.RegistrationForm()
if form.validate_on_submit():
flash(f'{form.username.data} 님 가입 완료!', 'success')
#return redirect(url_for('home'))
return render_template('/registerData/register.html', form=form) | [
"heojoon48@gmail.com"
] | heojoon48@gmail.com |
b335b27973d0716c0d68c2be7d60cf3fe2f5edf8 | 87270a041c6acb4d0a8926fef53d73259a59011c | /examen2CDI.py | 5f995d46bcb6d230e3eb486016f13fb83485c10e | [] | no_license | marcazu/CDI-FIB | c83b16ccbcde5bc40833cdba6b3dcdabac07f6c3 | c5df3c5279437d992877bd535d361bda312cecac | refs/heads/main | 2023-01-06T10:58:25.281409 | 2020-11-15T18:38:08 | 2020-11-15T18:38:08 | 313,098,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,023 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 26 17:53:30 2020
@author: Marc
"""
import numpy as np
from scipy import misc
from math import sqrt,log2,log
import matplotlib.pyplot as plt
from PIL import Image
import pywt
"-----------------EXERCICI PREFIJO-----------------"
def prefix_code(lst):
output = []
sumatorio = 0
for i in lst:
sumatorio += 2**(-i)
if(sumatorio > 1):
return output
else:
for index, i in enumerate(lst):
if(index is 0):
output.append(bin(0)[2:].zfill(i))
else:
for j in range(0, 2**i):
prefix = False
aux = bin(j)[2:].zfill(i)
for binword in output:
prefix = ((aux.startswith(binword)) or (binword.startswith(aux)))
if prefix :
break
if not prefix:
output.append(aux)
break
return output
prova = [3,3,3,5,5,6,6,7,8,8,9,9,9]
binarywords = prefix_code(prova)
if not binarywords:
print ("no")
else:
print("yes")
[print(binword)for binword in binarywords]
"-----------------EXERCICI code LZ77-----------------"
def LZ77Code(mensaje,S=12,W=18):
code=[[0,0,mensaje[0]]]
mydict=[[0,0,mensaje[0]]]
i=1#donde estamos leyendo carácteres
ahead=W-S
lookahead=mensaje[1:1+ahead]
old=str(mensaje[max(0,i-S-1):max(0,i)])
while i < len(mensaje):
offset=0
length=0
char=lookahead[0]
window = old+lookahead
#miramos matches
for j in range(len(old)-1,-1,-1):
if old[j] == lookahead[0]:
#tenemos algun match
match=True
izq=j+1
der=len(old)+1
maxlen=1
#longest prefix match
while match and der <len(window):
if window[izq] == window[der]:
izq+=1
der+=1
maxlen+=1
else:
match=False
#extendemos carácteres extra
if maxlen> length :
offset= len(old) -j
length= maxlen
try :
char= window[der]
except:
try:
char= window[i+length]
except:
char=window[der-1]
length -=1
if length == 0:
offset=0
code=code+[[offset,length,char]]
i += length+1
old=str(mensaje[max(0,i-S):i])
lookahead= str(mensaje[i:ahead+i])
code[-1]=[code[-1][0],code[-1][1]+1,'EOF']
return code
mensaje = 'abcdeabaebbadab'
code = LZ77Code(mensaje, 12, 18)
print(code)
, 0.0128, 0.016, , 0.0051, 0.0102, ]
"-----------------EXERCICI decode LZ77-----------------"
def LZ77Decode(codigo):
mensaje=''
for i in codigo:
if i[0] != 0:
pos=len(mensaje)-i[0]
word=mensaje[pos:pos+ i[1]]
extension= ""
mensaje += word
if i[0] <= i[1]:#debemos extender el último simbolo
mensaje += mensaje[i[0]+1:i[1]+1]
mensaje+= i[2]
return mensaje[:-3]
def LZ77Decode(codigo):
mensaje=''
for i in codigo:
if i[0] != 0:
pos=len(mensaje) - i[0]
word=mensaje[pos:pos+ i[1]]
extension= ""
mensaje += word
if i[0] <= i[1]:#debemos extender el último simbolo
mensaje += mensaje[i[0]+1:i[1]+1]
mensaje+= i[2]
return mensaje[:-3]
def LZ78Decode(codigo):
mensaje=''
diccionario=[]
n=len(codigo)
for i in range(n-1):
indice=codigo[i][0]
letra=codigo[i][1]
if indice==0:
mensaje+=letra
diccionario+=[letra]
else:
palabra=diccionario[indice-1]+letra
mensaje+=palabra
diccionario+=[palabra]
indice=codigo[n-1][0]
letra=codigo[n-1][1]
if indice>0:
palabra=diccionario[indice-1]
mensaje+=palabra
return mensaje, diccionario
"-----------------EXERCICI quins son codis-----------------"
def prefix_code(code):
for binword in code:
for aux in code:
if(binword !=aux):
prefix = ((aux.startswith(binword)) or (binword.startswith(aux)))
if(prefix):
return ("no")
return ("yes")
code = ['00','11','001','111','0111', '01111','10000']
print(prefix_code(code))
"-----------------EXERCICI MATRIU ORTOGONAL-----------------"
"S'ha de mirar quina matriu multiplicada per la transposada dona algo rollo [1,0,0][0,1,0][0,0,1]
matrix1=[[2/3,2/3,1/3],
[-(sqrt(2))/2,(sqrt(2))/2,0],
[-(sqrt(2))/6,-(sqrt(2))/6,2*(sqrt(2))/3]]
matrix2=[[2/3,2/3,1/3],
[(sqrt(2))/2,(sqrt(2))/2,(sqrt(2))/4],
[-(sqrt(2))/6,-(sqrt(2))/6,2*(sqrt(2))/3]]
matrix3= [[2,2,1],
[4,4,2],
[-2,-2,8]]
matrix4 = [[2,2,1],
[-2,2,0],
[-2,-2,8]]
"fer aixo x cada 1"
mat = np.asarray(matrix1)
"print(mat.transpose())"
mat1T=(np.dot(mat,mat.transpose()))
mat = np.asarray(matrix2)
"print(mat.transpose())"
mat2T=(np.dot(mat,mat.transpose()))
mat = np.asarray(matrix3)
"print(mat.transpose())"
mat3T=(np.dot(mat,mat.transpose()))
mat = np.asarray(matrix4)
"print(mat.transpose())"
mat4T=(np.dot(mat,mat.transpose()))
"*---------------------------------------------------------------------------*"
"-----------------EXERCICI RATIO D COMPRESSIÖ-----------------"
def ratio_de_compression(pixeles,escala,entradas,pixelesB):
num = pixeles*pixeles*log2(escala)
den = (pixeles/pixelesB)*(pixeles/pixelesB)*log2(entradas) + entradas*pixelesB*pixelesB*log2(escala)
return num/den
ratio_de_compression(512,128,128,4)
"-----------------EXERCICI WAVELET-----------------"
l2 = [0.28,0.8481,0.4271,-0.141]
l3= [-0.32,0.2481,-0.1729,-0.741]
l = [0.28,0.8481,0.4271,-0.141]
suman = 0
suma2 = 0
for i in l:
suman += i
suma2 += (i**2)
print(suman,"=", sqrt(2))
print("1 =",suma2)
print((l[2]*l[0])+(l[3]*l[1]))
"-----------------EXERCICI BLOC DE COLORS-----------------"
def idc_bloque(p):
"Substituir x valor de l'exercici"
c = [[0.0, -0.5773502691896257, 0.8164965809277261, 0.0],
[0.0, -0.5773502691896257, -0.40824829046386313, -0.7071067811865475],
[0.0, -0.5773502691896257, -0.408248290463863, 0.7071067811865477],
[1.0, 0.0, 0.0, 0.0]]
ct = np.transpose(c)
return (np.tensordot(np.tensordot(ct,p,axes=([1],[0])), c, axes = ([1],[0]))).reshape(-1)
fig = plt.figure()
array = np.zeros((4,4))
array = array.astype(int)
for i in range(4):
for j in range(4):
array[i][j] = 1
m = idc_bloque(array)
fig.add_subplot(4,4,i*4+j+1).axis('off')
plt.imshow(m.reshape((4,4)))
array[i][j] = 0
def LZ78Decode(codigo):
mensaje=''
diccionario=[]
n=len(codigo)
for i in range(n-1):
indice=codigo[i][0]
letra=codigo[i][1]
if indice==0:
mensaje+=letra
diccionario+=[letra]
else:
palabra=diccionario[indice-1]+letra
mensaje+=palabra
diccionario+=[palabra]
indice=codigo[n-1][0]
letra=codigo[n-1][1]
if indice>0:
palabra=diccionario[indice-1]
mensaje+=palabra
return mensaje, diccionario | [
"95marc@gmail.com"
] | 95marc@gmail.com |
47b9a6bd6f7a0eeeea5383a97b107d05ed17e022 | 505506f12bf43f8693b95d4b19bc4e0aded8cab0 | /agents/runner.py | b42ea639c0ed776070021f875e865efeb36b0840 | [
"BSD-3-Clause"
] | permissive | Aubret/gym-minigrid | 8be3fe596a0a071af4c504d215655114d4c7bc76 | fc622913333da4564a7e3343920ce4415e47c5ab | refs/heads/master | 2021-06-06T20:23:48.382545 | 2021-06-04T12:31:37 | 2021-06-04T12:31:37 | 169,751,153 | 0 | 0 | null | 2019-02-08T14:58:54 | 2019-02-08T14:58:53 | null | UTF-8 | Python | false | false | 5,000 | py | import numpy as np
from baselines.a2c.utils import discount_with_dones
from baselines.common.runners import AbstractEnvRunner
#not use this
class Runner(AbstractEnvRunner):
"""
We use this class to generate batches of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch of experiences
"""
def __init__(self, env, model, nsteps=5, gamma=0.99):
super().__init__(env=env, model=model, nsteps=nsteps)
self.gamma = gamma
#self.batch_action_shape = [x if x is not None else -1 for x in model.train_model.action.shape.as_list()]
self.ob_dtype = model.train_model.X.dtype.as_numpy_dtype
def compute_rewards(self,epi_rewards,epi_dones,last_obs):
if self.gamma > 0.0:
# Discount/bootstrap off value fn
last_values = self.model.value(last_obs, S=None, M=epi_dones).tolist()
for n, (rewards, dones, value) in enumerate(zip(epi_rewards, epi_dones, last_values)):
if dones[-1] == 0:
rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
epi_rewards[n] = rewards
return epi_rewards
def run(self):
# We initialize the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]
epi_obs, epi_rewards, epi_actions, epi_values, epi_dones = [],[],[],[],[]
mb_states = self.states
for numsteps in range(self.nsteps):
# Given observations, take action and value (V(s))
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, states, _ = self.model.step(self.obs, S=self.states, M=self.dones)
# Append the experiences
epi_obs.append(np.copy(self.obs))
epi_actions.append(actions)
epi_values.append(values)
#epi_dones.append(self.dones)
# Take actions in env and look the results
obs, rewards, dones, _ = self.env.step(actions)
self.env.render()
epi_rewards.append(rewards)
epi_dones.append(dones)
if dones: #compute the reward before switching episode
self.obs = self.env.reset()
self.dones = False
self.states=None
epi_rewards = np.asarray(epi_rewards, dtype=np.float32).swapaxes(1, 0)
epi_obs = np.asarray(epi_obs, dtype=self.ob_dtype).swapaxes(1, 0).reshape((numsteps + 1,) + self.env.observation_space.shape) # .reshape(self.batch_ob_shape)
epi_actions = np.asarray(epi_actions, dtype=self.model.train_model.action.dtype.name).swapaxes(1, 0)
epi_values = np.asarray(epi_values, dtype=np.float32).swapaxes(1, 0)
epi_dones = np.asarray(epi_dones, dtype=np.bool).swapaxes(1, 0)
epi_masks = epi_dones[:, :-1]
epi_dones = epi_dones[:, 1:]
mb_rewards.extend(self.compute_rewards(epi_rewards,epi_dones,obs))
mb_obs.extend(epi_obs)
mb_actions.extend(epi_actions)
mb_values.extend(epi_values)
mb_dones.extend(epi_dones)
epi_obs, epi_rewards, epi_actions, epi_values, epi_dones = [], [], [], [], []
continue
self.states = states
self.dones = dones
self.obs = obs
#epi_dones.append(self.dones)
print(epi_dones)
if not dones:
epi_rewards = np.asarray(epi_rewards, dtype=np.float32).swapaxes(1, 0)
epi_obs = np.asarray(epi_obs, dtype=self.ob_dtype).swapaxes(1, 0).reshape((numsteps + 1,) + self.env.observation_space.shape) # .reshape(self.batch_ob_shape)
epi_actions = np.asarray(epi_actions, dtype=self.model.train_model.action.dtype.name).swapaxes(1, 0)
epi_values = np.asarray(epi_values, dtype=np.float32).swapaxes(1, 0)
epi_dones = np.asarray(epi_dones, dtype=np.bool).swapaxes(1, 0)
epi_masks = epi_dones[:, :-1]
#epi_dones = epi_dones[:, 1:]
#Concat last iteartions
mb_rewards.extend(self.compute_rewards(epi_rewards, epi_dones,obs))
mb_obs.extend(epi_obs)
mb_actions.extend(epi_actions)
mb_values.extend(epi_values)
mb_dones.extend(epi_dones)
# Batch of steps to batch of rollouts
#print(self.batch_action_shape)
#print(mb_actions.shape)
#mb_actions = mb_actions.reshape(self.batch_action_shape)
#mb_actions = mb_actions.reshape([numsteps+1])
mb_rewards = mb_rewards.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values
| [
"lalumiere3@hotmail.fr"
] | lalumiere3@hotmail.fr |
72dde4d0cca5ada32dd37e6e36d79b7dc6680cba | 685e7dc080a383d12dd526a510a8f74c34ef2e71 | /tests/nonci/test_compare_pretrained.py | cf57a0df969679003ebcb54d6d1f3d881dc8170d | [
"MIT"
] | permissive | 18813055625/bert-for-tf2 | f1b86351675861ebe710bb4f94e99b89a639f83a | e71d108f0bd8c5af0c4e0b8427b144e996c02fdb | refs/heads/master | 2020-07-29T08:24:33.635201 | 2019-09-09T11:56:37 | 2019-09-09T11:56:37 | 209,729,589 | 0 | 1 | MIT | 2019-09-20T07:16:54 | 2019-09-20T07:16:53 | null | UTF-8 | Python | false | false | 8,706 | py | # coding=utf-8
#
# created by kpe on 27.Mar.2019 at 15:37
#
from __future__ import absolute_import, division, print_function
import unittest
import re
import os
import numpy as np
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
import params
from bert import BertModelLayer
from bert.loader import map_from_stock_variale_name, map_to_stock_variable_name, load_stock_weights
from bert.loader import StockBertConfig, map_stock_config_to_params
from bert.tokenization import FullTokenizer
tf.compat.v1.disable_eager_execution()
class TestCompareBertsOnPretrainedWeight(unittest.TestCase):
bert_ckpt_dir = ".models/uncased_L-12_H-768_A-12/"
bert_ckpt_file = bert_ckpt_dir + "bert_model.ckpt"
bert_config_file = bert_ckpt_dir + "bert_config.json"
def test_bert_original_weights(self):
print("bert checkpoint: ", self.bert_ckpt_file)
bert_vars = tf.train.list_variables(self.bert_ckpt_file)
for ndx, var in enumerate(bert_vars):
print("{:3d}".format(ndx), var)
def create_bert_model(self, max_seq_len=18):
bc = None
with tf.io.gfile.GFile(self.bert_config_file, "r") as reader:
bc = StockBertConfig.from_json_string(reader.read())
bert = BertModelLayer.from_params(map_stock_config_to_params(bc),
name="bert")
input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="input_ids")
token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="token_type_ids")
output = bert([input_ids, token_type_ids])
model = keras.Model(inputs=[input_ids, token_type_ids], outputs=output)
return model, bert, (input_ids, token_type_ids)
def test_keras_weights(self):
max_seq_len = 18
model, bert, inputs = self.create_bert_model(18)
model.build(input_shape=[(None, max_seq_len),
(None, max_seq_len)])
model.summary()
for ndx, var in enumerate(bert.trainable_variables):
print("{:3d}".format(ndx), var.name, var.shape)
#for ndx, var in enumerate(model.trainable_variables):
# print("{:3d}".format(ndx), var.name, var.shape)
def test___compare_weights(self):
#tf.reset_default_graph()
max_seq_len = 18
model, bert, inputs = self.create_bert_model(18)
model.build(input_shape=[(None, max_seq_len),
(None, max_seq_len)])
stock_vars = tf.train.list_variables(self.bert_ckpt_file)
stock_vars = {name: list(shape) for name, shape in stock_vars}
keras_vars = model.trainable_variables
keras_vars = {var.name.split(":")[0]: var.shape.as_list() for var in keras_vars}
matched_vars = set()
unmatched_vars = set()
shape_errors = set()
for name in stock_vars:
bert_name = name
keras_name = map_from_stock_variale_name(bert_name)
if keras_name in keras_vars:
if keras_vars[keras_name] == stock_vars[bert_name]:
matched_vars.add(bert_name)
else:
shape_errors.add(bert_name)
else:
unmatched_vars.add(bert_name)
print("bert -> keras:")
print(" matched count:", len(matched_vars))
print(" unmatched count:", len(unmatched_vars))
print(" shape error count:", len(shape_errors))
print("unmatched:\n", "\n ".join(unmatched_vars))
self.assertEqual(197, len(matched_vars))
self.assertEqual(9, len(unmatched_vars))
self.assertEqual(0, len(shape_errors))
matched_vars = set()
unmatched_vars = set()
shape_errors = set()
for name in keras_vars:
keras_name = name
bert_name = map_to_stock_variable_name(keras_name)
if bert_name in stock_vars:
if stock_vars[bert_name] == keras_vars[keras_name]:
matched_vars.add(keras_name)
else:
shape_errors.add(keras_name)
else:
unmatched_vars.add(keras_name)
print("keras -> bert:")
print(" matched count:", len(matched_vars))
print(" unmatched count:", len(unmatched_vars))
print(" shape error count:", len(shape_errors))
print("unmatched:\n", "\n ".join(unmatched_vars))
self.assertEqual(197, len(matched_vars))
self.assertEqual(0, len(unmatched_vars))
self.assertEqual(0, len(shape_errors))
def predict_on_keras_model(self, input_ids, input_mask, token_type_ids):
max_seq_len = input_ids.shape[-1]
model, bert, k_inputs = self.create_bert_model(max_seq_len)
model.build(input_shape=[(None, max_seq_len),
(None, max_seq_len)])
load_stock_weights(bert, self.bert_ckpt_file)
k_res = model.predict([input_ids, token_type_ids])
return k_res
def predict_on_stock_model(self, input_ids, input_mask, token_type_ids):
from tests.ext.modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint
tf.compat.v1.reset_default_graph()
tf_placeholder = tf.compat.v1.placeholder
max_seq_len = input_ids.shape[-1]
pl_input_ids = tf.compat.v1.placeholder(tf.int32, shape=(1, max_seq_len))
pl_mask = tf.compat.v1.placeholder(tf.int32, shape=(1, max_seq_len))
pl_token_type_ids = tf.compat.v1.placeholder(tf.int32, shape=(1, max_seq_len))
bert_config = BertConfig.from_json_file(self.bert_config_file)
tokenizer = FullTokenizer(vocab_file=os.path.join(self.bert_ckpt_dir, "vocab.txt"))
s_model = BertModel(config=bert_config,
is_training=False,
input_ids=pl_input_ids,
input_mask=pl_mask,
token_type_ids=pl_token_type_ids,
use_one_hot_embeddings=False)
tvars = tf.compat.v1.trainable_variables()
(assignment_map, initialized_var_names) = get_assignment_map_from_checkpoint(tvars, self.bert_ckpt_file)
tf.compat.v1.train.init_from_checkpoint(self.bert_ckpt_file, assignment_map)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
s_res = sess.run(
s_model.get_sequence_output(),
feed_dict={pl_input_ids: input_ids,
pl_token_type_ids: token_type_ids,
pl_mask: input_mask,
})
return s_res
def test_direct_keras_to_stock_compare(self):
from tests.ext.modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint
bert_config = BertConfig.from_json_file(self.bert_config_file)
tokenizer = FullTokenizer(vocab_file=os.path.join(self.bert_ckpt_dir, "vocab.txt"))
# prepare input
max_seq_len = 6
input_str = "Hello, Bert!"
input_tokens = tokenizer.tokenize(input_str)
input_tokens = ["[CLS]"] + input_tokens + ["[SEP]"]
input_ids = tokenizer.convert_tokens_to_ids(input_tokens)
input_ids = input_ids + [0]*(max_seq_len - len(input_tokens))
input_mask = [1]*len(input_tokens) + [0]*(max_seq_len - len(input_tokens))
token_type_ids = [0]*len(input_tokens) + [0]*(max_seq_len - len(input_tokens))
input_ids = np.array([input_ids], dtype=np.int32)
input_mask = np.array([input_mask], dtype=np.int32)
token_type_ids = np.array([token_type_ids], dtype=np.int32)
print(" tokens:", input_tokens)
print("input_ids:{}/{}:{}".format(len(input_tokens), max_seq_len, input_ids), input_ids.shape, token_type_ids)
s_res = self.predict_on_stock_model(input_ids, input_mask, token_type_ids)
k_res = self.predict_on_keras_model(input_ids, input_mask, token_type_ids)
np.set_printoptions(precision=9, threshold=20, linewidth=200, sign="+", floatmode="fixed")
print("s_res", s_res.shape)
print("k_res", k_res.shape)
print("s_res:\n {}".format(s_res[0, :2, :10]), s_res.dtype)
print("k_res:\n {}".format(k_res[0, :2, :10]), k_res.dtype)
adiff = np.abs(s_res-k_res).flatten()
print("diff:", np.max(adiff), np.argmax(adiff))
self.assertTrue(np.allclose(s_res, k_res, atol=1e-6))
| [
"kpe.git@gmailbox.org"
] | kpe.git@gmailbox.org |
d2fd857768b784b5d412fcfde44b925623531940 | 62d2d16c3042c4c3737f02a7b0a5a23961fc3bc3 | /exers/notas.py | 1b0fb4137568709465ba2b6ed78b072308c164be | [] | no_license | Jose-Humberto-07/pythonFaculdade | 9e3d196a2b27ed34b182519db7f0e0b0a3ac6be2 | e7a03cca421c8656b3169dfc8fe5ac5973e21176 | refs/heads/main | 2023-05-26T05:03:19.838093 | 2021-06-03T00:53:22 | 2021-06-03T00:53:22 | 369,333,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py |
#funcao
def media(ap1, ap2):
m = (ap1 + ap2) / 2
return m
nome = []
ap1 = []
ap2 = []
print("===============controle de notas=============")
#=[0,1,2,3,4,5]
for c in range(3):
print("Qual o nome do ",(c+1),"° aluno? ")
nome.append(input())
print("Qual a nota AP1 do " + nome[c])
ap1.append(float(input()))
print("Qual a nota AP2 do " + nome[c])
ap2.append(float(input()))
print()
print("-------------------------------------------------------")
print()
print("===============================================================")
for c in range(3):
media = media(ap1[c], ap2[c])
print("Nome: " + nome[c])
print("Primeira nota (AP1): " , ap1[c])
print("Segunda nota (AP2): " , ap2[c])
print("Média: " , media)
| [
"jhpnascimento96@gmail.com"
] | jhpnascimento96@gmail.com |
dda125c8083666e799a4bccbfac1e27a51202a18 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_13284-2532/sdB_EC_13284-2532_lc.py | dfa44dd24cbfe5cc7255aa0893f9c5a3ba440b9b | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[202.803875,-25.791181], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_EC_13284-2532 /sdB_EC_13284-2532_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
889a37fe8215598757625f98d9e00660675b6457 | 991d762127850817be2da9fbbb6ba4601d1c1252 | /test_trellis.py | 30f974fdbe80962a1faaa2093d97f0f28a5025fb | [] | no_license | tomjoy/trellis | 8699c264a1d3e287ae145488a172552a2a8c1c64 | ce5e7dfc6cff6386a9ee216ed9be7436816c4512 | refs/heads/master | 2016-09-11T10:33:13.288062 | 2014-11-18T08:19:33 | 2014-11-18T08:19:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,364 | py | from test_sets import *
from peak import context
from peak.events.activity import EventLoop, TwistedEventLoop, Time, NOT_YET
from peak.events import trellis, stm, collections, activity
from peak.util.decorators import rewrap, decorate as d
from peak.util.extremes import Max
import unittest, heapq, mocker, types, sys
try:
import testreactor
except ImportError:
testreactor = None # either twisted or testreactor are missing
try:
import wx
except ImportError:
wx = None
class EventLoopTestCase(unittest.TestCase):
def setUp(self):
self.state = context.new()
self.state.__enter__()
super(EventLoopTestCase, self).setUp()
self.configure_context()
def tearDown(self):
super(EventLoopTestCase, self).tearDown()
self.state.__exit__(None, None, None)
def configure_context(self):
pass
class TestListener(stm.AbstractListener):
def __repr__(self): return self.name
class TestSubject(stm.AbstractSubject):
def __repr__(self): return self.name
class DummyError(Exception): pass
class UndirtyListener(TestListener):
def dirty(self):
return False
try:
set
except NameError:
from sets import Set as set
if wx:
class TestWxEventLoop(EventLoopTestCase):
def configure_context(self):
from peak.events.activity import EventLoop, WXEventLoop
EventLoop <<= WXEventLoop
self.app = wx.PySimpleApp(redirect=False)
self.app.ExitOnFrameDelete = False
def testSequentialCalls(self):
log = []
EventLoop.call(log.append, 1)
EventLoop.call(log.append, 2)
EventLoop.call(log.append, 3)
EventLoop.call(log.append, 4)
EventLoop.call(EventLoop.stop)
EventLoop.run()
self.assertEqual(log, [1,2,3,4])
# XXX this should test timing stuff, but the only way to do that
# is with a wx mock, which I haven't time for as yet.
if testreactor:
class TestReactorEventLoop(EventLoopTestCase, testreactor.ReactorTestCase):
def configure_context(self):
from peak.events.activity import Time, EventLoop
from twisted.internet import reactor
Time <<= lambda: Time()
Time.time = reactor.getTime
EventLoop <<= TwistedEventLoop
def testSequentialCalls(self):
log = []
EventLoop.call(log.append, 1)
EventLoop.call(log.append, 2)
EventLoop.call(log.append, 3)
EventLoop.call(log.append, 4)
class IdleTimer(trellis.Component):
trellis.attrs(
idle_timeout = 20,
busy = False,
)
idle_for = trellis.maintain(
lambda self: self.idle_for.begins_with(not self.busy),
initially=NOT_YET
)
trellis.maintain() # XXX should be perform
def alarm(self):
if self.idle_for[self.idle_timeout] and EventLoop.running:
log.append(5)
EventLoop.stop()
it = IdleTimer()
EventLoop.run()
self.assertEqual(log, [1,2,3,4,5])
class TestLinks(unittest.TestCase):
def setUp(self):
self.l1 = TestListener(); self.l1.name = 'l1'
self.l2 = TestListener(); self.l1.name = 'l2'
self.s1 = TestSubject(); self.s1.name = 's1'
self.s2 = TestSubject(); self.s2.name = 's2'
self.lk11 = stm.Link(self.s1, self.l1)
self.lk12 = stm.Link(self.s1, self.l2)
self.lk21 = stm.Link(self.s2, self.l1)
self.lk22 = stm.Link(self.s2, self.l2)
def verify_subjects(self, items):
for link, nxt, prev in items:
self.failUnless(link.next_subject is nxt)
if isinstance(link,stm.Link):
self.failUnless(link.prev_subject is prev)
def verify_listeners(self, items):
for link, nxt, prev in items:
self.failUnless(link.next_listener is nxt)
if isinstance(link,stm.Link):
self.failUnless(link.prev_listener is prev)
def testBreakIterSubjects(self):
it = self.l1.iter_subjects()
self.failUnless(it.next() is self.s2)
self.lk21.unlink()
self.failUnless(it.next() is self.s1)
def testBreakIterListeners(self):
it = self.s1.iter_listeners()
self.failUnless(it.next() is self.l2)
self.lk11.unlink()
self.failUnless(it.next() is self.l1)
def testLinkSetup(self):
self.verify_subjects([
(self.l1, self.lk21, None), (self.l2, self.lk22, None),
(self.lk21, self.lk11, None), (self.lk11, None, self.lk21),
(self.lk22, self.lk12, None), (self.lk12, None, self.lk22),
])
self.verify_listeners([
(self.s1, self.lk12, None), (self.s2, self.lk22, None),
(self.lk22, self.lk21, self.s2), (self.lk21, None, self.lk22),
(self.lk12, self.lk11, self.s1), (self.lk11, None, self.lk12),
])
def testUnlinkListenerHeadSubjectTail(self):
self.lk21.unlink()
self.verify_subjects([
(self.l1, self.lk11, None), (self.lk11, None, None)
])
self.verify_listeners([
(self.s2, self.lk22, None), (self.lk22, None, self.s2)
])
def testUnlinkListenerTailSubjectHead(self):
self.lk12.unlink()
self.verify_subjects([
(self.l2, self.lk22, None), (self.lk22, None, None),
])
self.verify_listeners([
(self.s1, self.lk11, None), (self.lk11, None, self.s1),
])
def a(f):
def g(self):
return self.ctrl.atomically(f, self)
return rewrap(f, g)
class TestController(unittest.TestCase):
def setUp(self):
self.ctrl = stm.Controller()
self.t0 = TestListener(); self.t0.name='t0';
self.t1 = TestListener(); self.t1.name='t1'; self.t1.layer = 1
self.t2 = TestListener(); self.t2.name='t2'; self.t2.layer = 2
self.t3 = UndirtyListener(); self.t3.name='t3'
self.s1 = TestSubject(); self.s2 = TestSubject()
self.s1.name = 's1'; self.s2.name = 's2'
def tearDown(self):
# Verify correct cleanup in all scenarios
for k,v in dict(
undo=[], managers={}, queues={}, layers=[], reads={}, writes={},
has_run={}, destinations=None, routes=None,
current_listener=None, readonly=False, in_cleanup=False,
active=False, at_commit=[], to_retry={}
).items():
val = getattr(self.ctrl, k)
self.assertEqual(val, v, '%s: %r' % (k,val))
def testScheduleSimple(self):
t1 = TestListener(); t1.name='t1'
t2 = TestListener(); t2.name='t2'
self.assertEqual(self.ctrl.layers, [])
self.assertEqual(self.ctrl.queues, {})
self.ctrl.schedule(t1)
self.ctrl.schedule(t2)
self.assertEqual(self.ctrl.layers, [0])
self.assertEqual(self.ctrl.queues, {0: {t1:1, t2:1}})
self.ctrl.cancel(t1)
self.assertEqual(self.ctrl.layers, [0])
self.assertEqual(self.ctrl.queues, {0: {t2:1}})
self.ctrl.cancel(t2)
# tearDown will assert that everything has been cleared
def testThreadLocalController(self):
self.failUnless(isinstance(trellis.ctrl, stm.Controller))
self.failUnless(isinstance(trellis.ctrl, stm.threading.local))
def testHeapingCancel(self):
# verify that cancelling the last listener of a layer keeps
# the 'layers' list in heap order
self.ctrl.schedule(self.t0)
self.ctrl.schedule(self.t2)
self.ctrl.schedule(self.t1)
layers = self.ctrl.layers
self.assertEqual(layers, [0, 2, 1])
self.ctrl.cancel(self.t0)
self.assertEqual(heapq.heappop(layers), 1)
self.assertEqual(heapq.heappop(layers), 2)
self.assertEqual(self.ctrl.queues, {1: {self.t1:1}, 2: {self.t2:1}})
self.ctrl.queues.clear()
def testDoubleAndMissingCancelOrSchedule(self):
self.ctrl.schedule(self.t2)
self.ctrl.cancel(self.t0)
self.ctrl.cancel(self.t2)
self.ctrl.cancel(self.t2)
self.ctrl.schedule(self.t1)
self.assertEqual(self.ctrl.queues, {1: {self.t1:1}})
self.ctrl.schedule(self.t1)
self.assertEqual(self.ctrl.queues, {1: {self.t1:1}})
self.ctrl.cancel(self.t1)
def testScheduleLayerBump(self):
# listener layer must be at least source layer + 1
self.ctrl.schedule(self.t1)
self.ctrl.schedule(self.t1, 0)
self.assertEqual(self.ctrl.queues, {1: {self.t1:1}})
self.ctrl.schedule(self.t1, 1)
self.assertEqual(self.ctrl.queues, {2: {self.t1:1}})
self.assertEqual(self.t1.layer, 2)
self.ctrl.cancel(self.t1)
d(a)
def testScheduleRollback(self):
# when running atomically, scheduling is an undo-logged operation
self.ctrl.schedule(self.t1)
self.ctrl.rollback_to(0)
def testCleanup(self):
self.ctrl.schedule(self.t0)
def raiser():
# XXX need to actually run one rule, plus start another w/error
raise DummyError
try:
self.ctrl.atomically(self.runAs, self.t0, raiser)
except DummyError:
pass
def testSubjectsMustBeAtomic(self):
self.assertRaises(AssertionError, self.ctrl.lock, self.s1)
self.assertRaises(AssertionError, self.ctrl.used, self.s1)
self.assertRaises(AssertionError, self.ctrl.changed, self.s1)
d(a)
def testLockAcquiresManager(self):
class Dummy:
def __enter__(*args): pass
def __exit__(*args): pass
mgr = self.s1.manager = Dummy()
self.ctrl.lock(self.s1)
self.assertEqual(self.ctrl.managers, {mgr:0})
self.ctrl.lock(self.s2)
self.assertEqual(self.ctrl.managers, {mgr:0})
d(a)
def testReadWrite(self):
self.ctrl.used(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.reads, {})
self.assertEqual(self.ctrl.writes, {})
self.ctrl.current_listener = self.t0
self.ctrl.used(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.reads, {self.s1:1})
self.assertEqual(self.ctrl.writes, {self.s2:self.t0})
self.ctrl.reads.clear() # these would normally be handled by
self.ctrl.writes.clear() # the run() method's try/finally
self.ctrl.current_listener = None # reset
d(a)
def testNoReadDuringCommit(self):
self.ctrl.readonly = True
self.assertRaises(RuntimeError, self.ctrl.changed, self.s1)
self.ctrl.readonly = False # normally reset by ctrl.run_rule()
d(a)
def testRecalcOnWrite(self):
stm.Link(self.s1, self.t0)
stm.Link(self.s2, self.t1)
stm.Link(self.s2, self.t0)
self.ctrl.current_listener = self.t1
self.ctrl.changed(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.writes, {self.s1:self.t1, self.s2:self.t1})
sp = self.ctrl.savepoint(); self.ctrl.has_run[self.t1] = self.t1
self.ctrl._process_writes(self.t1)
# Only t0 is notified, not t1, since t1 is the listener
self.assertEqual(self.ctrl.queues, {2: {self.t0:1}})
self.ctrl.rollback_to(sp)
self.ctrl.current_listener = None # reset
d(a)
def testDependencyUpdatingAndUndo(self):
stm.Link(self.s1, self.t0)
s3 = TestSubject()
stm.Link(s3, self.t0)
self.assertEqual(list(self.t0.iter_subjects()), [s3, self.s1])
self.ctrl.current_listener = self.t0
self.ctrl.used(self.s1)
self.ctrl.used(self.s2)
sp = self.ctrl.savepoint()
self.ctrl._process_reads(self.t0)
self.assertEqual(list(self.t0.iter_subjects()), [self.s2, self.s1])
self.ctrl.rollback_to(sp)
self.assertEqual(list(self.t0.iter_subjects()), [s3, self.s1])
self.ctrl.current_listener = None # reset
def runAs(self, listener, rule):
listener.run = rule
self.ctrl.run_rule(listener)
d(a)
def testIsRunningAndHasRan(self):
def rule():
self.assertEqual(self.ctrl.current_listener, self.t1)
self.assertEqual(self.ctrl.has_run, {self.t1: 0})
sp = self.ctrl.savepoint()
self.runAs(self.t1, rule)
self.assertEqual(self.ctrl.current_listener, None)
self.assertEqual(self.ctrl.has_run, {self.t1: 0})
d(a)
def testIsRunningButHasNotRan(self):
def rule():
self.assertEqual(self.ctrl.current_listener, self.t1)
self.assertEqual(self.ctrl.has_run, {})
sp = self.ctrl.savepoint()
self.t1.run = rule; self.ctrl.initialize(self.t1) # uninit'd rule
self.assertEqual(self.ctrl.current_listener, None)
self.assertEqual(self.ctrl.has_run, {})
d(a)
def testScheduleUndo(self):
sp = self.ctrl.savepoint()
self.ctrl.schedule(self.t2)
self.assertEqual(self.ctrl.queues, {2: {self.t2:1}})
self.ctrl.rollback_to(sp)
self.assertEqual(self.ctrl.queues, {})
def testNestedReadOnly(self):
log = []
def aRule():
log.append(trellis.ctrl.readonly); return 1
c1 = trellis.Cell(aRule)
c2 = trellis.Cell(lambda: c1.value * aRule())
c3 = trellis.Performer(lambda: c2.value)
self.assertEqual(log, [True, True])
d(a)
def testWriteProcessingInRun(self):
stm.Link(self.s1, self.t0)
stm.Link(self.s2, self.t1)
stm.Link(self.s2, self.t3)
stm.Link(self.s2, self.t0)
def rule():
self.ctrl.changed(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.writes, {self.s1:self.t1, self.s2:self.t1})
self.runAs(self.t1, rule)
# Only t0 is notified, not t1, since t1 is the listener & t3 is !dirty
self.assertEqual(self.ctrl.writes, {})
self.assertEqual(self.ctrl.queues, {2: {self.t0:1}})
self.ctrl.cancel(self.t0)
d(a)
def testReadProcessingInRun(self):
stm.Link(self.s1, self.t0)
s3 = TestSubject()
stm.Link(s3, self.t0)
self.assertEqual(list(self.t0.iter_subjects()), [s3, self.s1])
def rule():
self.ctrl.used(self.s1)
self.ctrl.used(self.s2)
self.assertEqual(self.ctrl.reads, {self.s1:1, self.s2:1})
self.runAs(self.t0, rule)
self.assertEqual(self.ctrl.reads, {})
self.assertEqual(list(self.t0.iter_subjects()), [self.s2, self.s1])
d(a)
def testReadOnlyDuringMax(self):
def rule():
self.assertEqual(self.ctrl.readonly, True)
self.t0.layer = Max
self.assertEqual(self.ctrl.readonly, False)
self.runAs(self.t0, rule)
self.assertEqual(self.ctrl.readonly, False)
d(a)
def testRunClearsReadWriteOnError(self):
def rule():
self.ctrl.used(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.reads, {self.s1:1})
self.assertEqual(self.ctrl.writes, {self.s2:1})
try:
self.runAs(self.t0, rule)
except DummyError:
pass
else:
raise AssertionError("Error should've propagated")
self.assertEqual(self.ctrl.reads, {})
self.assertEqual(self.ctrl.writes, {})
d(a)
def testSimpleCycle(self):
stm.Link(self.s1, self.t1)
stm.Link(self.s2, self.t2)
def rule0():
self.ctrl.used(self.s1)
self.ctrl.changed(self.s1)
def rule1():
self.ctrl.used(self.s1)
self.ctrl.changed(self.s2)
def rule2():
self.ctrl.used(self.s2)
self.ctrl.changed(self.s1)
self.runAs(self.t0, rule0)
self.runAs(self.t1, rule1)
self.runAs(self.t2, rule2)
try:
self.ctrl._retry()
except stm.CircularityError, e:
self.assertEqual(e.args[0],
{self.t0: set([self.t1]), self.t1: set([self.t2]),
self.t2: set([self.t0, self.t1])})
else:
raise AssertionError("Should've caught a cycle")
d(a)
def testSimpleRetry(self):
def rule():
pass
self.runAs(self.t0, rule)
self.runAs(self.t1, rule)
self.runAs(self.t2, rule)
self.assertEqual(set(self.ctrl.has_run),set([self.t0,self.t1,self.t2]))
self.ctrl.to_retry[self.t1]=1
self.ctrl._retry()
self.assertEqual(set(self.ctrl.has_run), set([self.t0]))
self.ctrl.to_retry[self.t0]=1
self.ctrl._retry()
d(a)
def testNestedNoRetry(self):
def rule0():
self.t1.run=rule1; self.ctrl.initialize(self.t1)
def rule1():
pass
self.runAs(self.t2, rule1)
self.runAs(self.t0, rule0)
self.ctrl.schedule(self.t1)
self.assertEqual(self.ctrl.to_retry, {})
self.assertEqual(
set(self.ctrl.has_run), set([self.t0, self.t2])
)
self.assertEqual(self.ctrl.queues, {1: {self.t1:1}})
def testRunScheduled(self):
log = []
self.t1.run = lambda: log.append(True)
def go():
self.ctrl.schedule(self.t1)
self.ctrl.atomically(go)
self.assertEqual(log, [True])
def testRollbackReschedules(self):
sp = []
def rule0():
self.ctrl.rollback_to(sp[0])
self.assertEqual(self.ctrl.queues, {0: {self.t0:1}})
self.ctrl.cancel(self.t0)
self.t0.run = rule0
def go():
self.ctrl.schedule(self.t0)
sp.append(self.ctrl.savepoint())
self.ctrl.atomically(go)
def testManagerCantCreateLoop(self):
class Mgr:
def __enter__(self): pass
def __exit__(*args):
self.ctrl.schedule(self.t1)
log = []
def rule1():
log.append(True)
self.t1.run = rule1
self.t0.run = lambda:self.ctrl.manage(Mgr())
self.ctrl.atomically(self.ctrl.schedule, self.t0)
self.assertEqual(log, [])
self.ctrl.atomically(lambda:None)
self.assertEqual(log, [True])
d(a)
def testNotifyOnChange(self):
stm.Link(self.s2, self.t2)
stm.Link(self.s2, self.t3)
self.ctrl.changed(self.s2)
self.ctrl.current_listener = self.t0
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.queues, {2: {self.t2:1}})
self.ctrl.cancel(self.t2)
self.ctrl.writes.clear()
self.ctrl.current_listener = None # reset
def testCommitCanLoop(self):
log=[]
def go():
log.append(True)
self.t0.run = go
self.ctrl.atomically(self.ctrl.on_commit, self.ctrl.schedule, self.t0)
self.assertEqual(log,[True])
d(a)
def testNoUndoDuringUndo(self):
def undo():
self.ctrl.on_undo(redo)
def redo():
raise AssertionError("Should not be run")
self.ctrl.on_undo(undo)
self.ctrl.rollback_to(0)
d(a)
def testReentrantRollbackToMinimumTarget(self):
sp = self.ctrl.savepoint()
# these 2 rollbacks will be ignored, since they target a higher sp.
# note that both are needed for testing, as one is there to potentially
# set a new target, and the other is there to make the offset wrong if
# the rollback stops prematurely.
self.ctrl.on_undo(self.ctrl.rollback_to, sp+100)
self.ctrl.on_undo(self.ctrl.rollback_to, sp+100)
sp2 = self.ctrl.savepoint()
# ensure that there's no way this test can pass unless rollback_to
# notices re-entrant invocations (because it would overflow the stack)
for i in range(sys.getrecursionlimit()*2):
# request a rollback all the way to 0; this target should be used
# in place of the sp2 target or sp+100 targets, since it will be
# the lowest target encountered during the rollback.
self.ctrl.on_undo(self.ctrl.rollback_to, sp)
self.ctrl.rollback_to(sp2) # ask to rollback to posn 2
self.assertEqual(self.ctrl.savepoint(), sp) # but should rollback to 0
d(a)
def testNestedRule(self):
def rule1():
self.assertEqual(set(self.ctrl.has_run), set([self.t0, self.t1]))
self.assertEqual(self.ctrl.current_listener, self.t1)
self.ctrl.used(self.s1)
self.ctrl.changed(self.s2)
self.assertEqual(self.ctrl.reads, {self.s1:1})
self.assertEqual(self.ctrl.writes, {self.s2:self.t1})
self.t2.run=rule2; self.ctrl.initialize(self.t2)
self.assertEqual(set(self.ctrl.has_run), set([self.t0, self.t1]))
self.assertEqual(self.ctrl.current_listener, self.t1)
self.assertEqual(self.ctrl.reads, {self.s1:1})
self.assertEqual(self.ctrl.writes, {self.s2:self.t1, s3:self.t2})
def rule2():
self.assertEqual(set(self.ctrl.has_run), set([self.t0, self.t1]))
self.assertEqual(self.ctrl.current_listener, self.t2)
self.assertEqual(self.ctrl.reads, {})
self.assertEqual(self.ctrl.writes, {self.s2:self.t1})
self.ctrl.used(self.s2)
self.ctrl.changed(s3)
def rule0():
pass
s3 = TestSubject(); s3.name = 's3'
self.runAs(self.t0, rule0)
self.runAs(self.t1, rule1)
self.assertEqual(
set(self.ctrl.has_run),
set([self.t1, self.t0]) # t2 was new, so doesn't show
)
self.assertEqual(list(self.t1.iter_subjects()), [self.s1])
self.assertEqual(list(self.t2.iter_subjects()), [self.s2])
self.ctrl.rollback_to(self.ctrl.has_run[self.t1]) # should undo both t1/t2
def testUndoLogSpansMultipleRecalcs(self):
c1 = trellis.Value(False, discrete=True)
c2 = trellis.Cell(lambda: (c1.value, log.append(trellis.savepoint())))
log = []; c2.value; log = []; c1.value = True
self.failUnless(len(log)==2 and log[1]>log[0], log)
def testUndoPostCommitCancelsUndoOfCommitSchedule(self):
c1 = trellis.Value(False, discrete=True)
def c2():
c1.value
log.append(trellis.savepoint())
if len(log)==2:
raise DummyError
c2 = trellis.Cell(c2)
log = []; c2.value; log = [];
# This will raise a different error if undoing the on-commit stack
# causes an underflow:
self.assertRaises(DummyError, setattr, c1, 'value', True)
class TestCells(mocker.MockerTestCase):
ctrl = stm.ctrl
def tearDown(self):
# make sure the old controller is back
trellis.install_controller(self.ctrl)
def testValueBasics(self):
self.failUnless(issubclass(trellis.Value, trellis.AbstractCell))
self.failUnless(issubclass(trellis.Value, stm.AbstractSubject))
v = trellis.Value()
self.assertEqual(v.value, None)
self.assertEqual(v._set_by, trellis._sentinel)
self.assertEqual(v._reset, trellis._sentinel)
v.value = 21
self.assertEqual(v._set_by, trellis._sentinel)
d(a)
def testValueUndo(self):
v = trellis.Value(42)
self.assertEqual(v.value, 42)
sp = self.ctrl.savepoint()
v.value = 43
self.assertEqual(v.value, 43)
self.ctrl.rollback_to(sp)
self.assertEqual(v.value, 42)
d(a)
def testValueUsed(self):
v = trellis.Value(42)
ctrl = self.mocker.replace(self.ctrl) #'peak.events.stm.ctrl')
ctrl.used(v)
self.mocker.replay()
trellis.install_controller(ctrl)
self.assertEqual(v.value, 42)
def testDiscrete(self):
v = trellis.Value(None, True)
v.value = 42
self.assertEqual(v.value, None)
def testValueChanged(self):
v = trellis.Value(42)
old_ctrl, ctrl = self.ctrl, self.mocker.replace(self.ctrl)
ctrl.lock(v)
ctrl.changed(v)
self.mocker.replay()
trellis.install_controller(ctrl)
v.value = 43
self.assertEqual(v.value, 43)
def testValueUnchanged(self):
v = trellis.Value(42)
ctrl = self.mocker.replace(self.ctrl)
ctrl.lock(v)
mocker.expect(ctrl.changed(v)).count(0)
self.mocker.replay()
trellis.install_controller(ctrl)
v.value = 42
self.assertEqual(v.value, 42)
d(a)
def testValueSetLock(self):
v = trellis.Value(42)
v.value = 43
self.assertEqual(v.value, 43)
self.assertEqual(v._set_by, None)
def go():
v.value = 99
t = TestListener(); t.name = 't'
t.run = go
self.assertRaises(trellis.InputConflict, self.ctrl.run_rule, t)
self.assertEqual(v.value, 43)
def go():
v.value = 43
t = TestListener(); t.name = 't'
t.run = go
self.ctrl.run_rule(t)
self.assertEqual(v.value, 43)
def testReadOnlyCellBasics(self):
log = []
c = trellis.Cell(lambda:log.append(1))
self.failUnless(type(c) is trellis.ReadOnlyCell)
c.value
self.assertEqual(log,[1])
c.value
self.assertEqual(log,[1])
def testDiscreteValue(self):
log = []
v = trellis.Value(False, True)
c = trellis.Cell(lambda: log.append(v.value))
self.assertEqual(log,[])
c.value
self.assertEqual(log,[False])
del log[:]
v.value = True
self.assertEqual(log, [True, False])
self.assertEqual(v.value, False)
del log[:]
v.value = False
self.assertEqual(log, [])
def testCellConstructor(self):
self.failUnless(type(trellis.Cell(value=42)) is trellis.Value)
self.failUnless(type(trellis.Cell(lambda:42)) is trellis.ReadOnlyCell)
self.failUnless(type(trellis.Cell(lambda:42, value=42)) is trellis.Cell)
def testRuleChain(self):
v = trellis.Value(0)
log = []
c1 = trellis.Cell(lambda:int(v.value/2))
c2 = trellis.Cell(lambda:log.append(c1.value))
c2.value
self.assertEqual(log, [0])
v.value = 1
self.assertEqual(log, [0])
v.value = 2
self.assertEqual(log, [0, 1])
def testConstant(self):
for v in (42, [57], "blah"):
c = trellis.Constant(v)
self.assertEqual(c.value, v)
self.assertEqual(c.get_value(), v)
self.failIf(hasattr(c,'set_value'))
self.assertRaises(AttributeError, setattr, c, 'value', v)
self.assertEqual(repr(c), "Constant(%r)" % (v,))
def testRuleToConstant(self):
log = []
def go():
log.append(1)
return 42
c = trellis.Cell(go)
self.assertEqual(c.value, 42)
self.assertEqual(log, [1])
self.failUnless(isinstance(c, trellis.ConstantRule))
self.assertEqual(repr(c), "Constant(42)")
self.assertEqual(c.value, 42)
self.assertEqual(c.get_value(), 42)
self.assertEqual(c.rule, None)
self.assertEqual(log, [1])
self.failIf(c.dirty())
c.__class__ = trellis.ReadOnlyCell # transition must be reversible to undo
self.failIf(isinstance(c, trellis.ConstantRule))
def testModifierIsAtomic(self):
log = []
d(trellis.modifier)
def do_it():
self.failUnless(self.ctrl.active)
self.assertEqual(self.ctrl.current_listener, None)
log.append(True)
return log
rv = do_it()
self.failUnless(rv is log)
self.assertEqual(log, [True])
d(a)
def testModifierAlreadyAtomic(self):
log = []
d(trellis.modifier)
def do_it():
self.failUnless(self.ctrl.active)
self.assertEqual(self.ctrl.current_listener, None)
log.append(True)
return log
rv = do_it()
self.failUnless(rv is log)
self.assertEqual(log, [True])
d(a)
def testModifierFromCell(self):
v1, v2 = trellis.Value(42), trellis.Value(99)
d(trellis.modifier)
def do_it():
v1.value = v1.value * 2
self.assertEqual(self.ctrl.reads, {v1:1})
def rule():
v2.value
do_it()
self.assertEqual(self.ctrl.reads, {v2:1})
trellis.Cell(rule).value
self.assertEqual(v1.value, 84)
def testDiscreteToConstant(self):
log = []
c1 = trellis.ReadOnlyCell(lambda:True, False, True)
c2 = trellis.Cell(lambda:log.append(c1.value))
c2.value
self.assertEqual(log, [True, False])
self.failUnless(isinstance(c1, trellis.ConstantRule))
def testReadWriteCells(self):
C = trellis.Cell(lambda: (F.value-32) * 5.0/9, -40)
F = trellis.Cell(lambda: (C.value * 9.0)/5 + 32, -40)
self.assertEqual(C.value, -40)
self.assertEqual(F.value, -40)
C.value = 0
self.assertEqual(C.value, 0)
self.assertEqual(F.value, 32)
def testSelfDependencyDoesNotIncreaseLayer(self):
c1 = trellis.Value(23)
c2 = trellis.Cell(lambda: c1.value + c2.value, 0)
self.assertEqual(c2.value, 23)
self.assertEqual(c2.layer, 1)
c1.value = 19
self.assertEqual(c2.value, 42)
self.assertEqual(c2.layer, 1)
def testSettingOccursForEqualObjects(self):
d1 = {}; d2 = {}
c1 = trellis.Value()
c1.value = d1
self.failUnless(c1.value is d1)
c1.value = d2
self.failUnless(c1.value is d2)
def testRepeat(self):
def counter():
if counter.value == 10:
return counter.value
trellis.repeat()
return counter.value + 1
counter = trellis.ReadOnlyCell(counter, 1)
self.assertEqual(counter.value, 10)
d(a)
def testTodoRollbackFuture(self):
sp = self.ctrl.savepoint()
tv = trellis.TodoValue(dict)
self.assertEqual(tv._savepoint, None)
tv.get_future()[1] = 2
self.assertEqual(tv._savepoint, sp)
sp2 = self.ctrl.savepoint()
tv.get_future()[2] = 3
self.assertEqual(tv._savepoint, sp)
self.ctrl.rollback_to(sp2)
self.assertEqual(self.ctrl.savepoint(), sp)
self.assertEqual(tv._savepoint, None)
d(a)
def testTodoRollbackSet(self):
sp = self.ctrl.savepoint()
tv = trellis.TodoValue(dict)
self.assertEqual(tv._savepoint, None)
tv.get_future()[1] = 2
self.assertEqual(tv._savepoint, sp)
sp2 = self.ctrl.savepoint()
tv.value = {2:3}
self.assertEqual(tv._savepoint, sp)
self.ctrl.rollback_to(sp2)
self.assertEqual(self.ctrl.savepoint(), sp)
self.assertEqual(tv._savepoint, None)
d(a)
def testFullRollbackList(self):
l = trellis.List()
sp = self.ctrl.savepoint()
l.append(1)
self.ctrl.on_undo(lambda:None)
sp2 = self.ctrl.savepoint()
l.append(2)
self.ctrl.rollback_to(sp2)
self.assertEqual(self.ctrl.savepoint(), sp)
d(a)
def testFullRollbackDict(self):
d = trellis.Dict()
sp = self.ctrl.savepoint()
d[1] = 2
self.ctrl.on_undo(lambda:None)
sp2 = self.ctrl.savepoint()
d[2] = 3
self.ctrl.rollback_to(sp2)
self.assertEqual(self.ctrl.savepoint(), sp)
d(a)
def testFullRollbackSet(self):
s = trellis.Set()
sp = self.ctrl.savepoint()
s.add(1)
self.ctrl.on_undo(lambda:None)
sp2 = self.ctrl.savepoint()
s.add(2)
self.ctrl.rollback_to(sp2)
self.assertEqual(self.ctrl.savepoint(), sp)
def run_modifier_and_rule(self, func, rule):
d(self.ctrl.atomically)
def go():
self.ctrl.schedule(trellis.Cell(rule))
func.sp = self.ctrl.savepoint()
trellis.modifier(func)()
def testDictUndo(self):
def do_it():
dd[1] = 2
self.ctrl.on_undo(lambda:None)
do_it.sp2 = self.ctrl.savepoint()
dd[4] = 6
del dd[5]
def rule():
if dict(dd)=={4:5, 5:6}: return
self.assertEqual(dict(dd), {1:2, 4:6})
self.ctrl.rollback_to(do_it.sp2)
self.assertEqual(self.ctrl.savepoint(), do_it.sp)
dd = trellis.Dict()
dd[4] = 5
dd[5] = 6
self.assertEqual(dict(dd), {4:5, 5:6})
self.run_modifier_and_rule(do_it, rule)
self.assertEqual(dict(dd), {4:5, 5:6})
def testSetAndObservingUndo(self):
def do_it():
s.add(1)
self.ctrl.on_undo(lambda:None)
do_it.sp2 = self.ctrl.savepoint()
s.add(3)
s.remove(4)
def rule():
if set(s)==set([4,5]): return
self.assertEqual(set(s), set([1,3,5]))
self.ctrl.rollback_to(do_it.sp2)
self.assertEqual(self.ctrl.savepoint(), do_it.sp)
s = trellis.Set([])
o = collections.Observing(keys=s)
s.update([4,5])
self.assertEqual(set(s), set([4,5]))
self.assertEqual(set(o._watching), set([4,5]))
self.run_modifier_and_rule(do_it, rule)
self.assertEqual(set(s), set([4,5]))
self.assertEqual(set(o._watching), set([4,5]))
class TestDefaultEventLoop(unittest.TestCase):
def setUp(self):
self.loop = EventLoop()
self.ctrl = trellis.ctrl
def testCallAndPoll(self):
log = []
self.loop.call(log.append, 1)
self.loop.call(log.append, 2)
self.assertEqual(log, [])
self.loop.poll()
self.assertEqual(log, [1])
self.loop.poll()
self.assertEqual(log, [1, 2])
self.loop.poll()
self.assertEqual(log, [1, 2])
d(a)
def testLoopIsNonAtomic(self):
self.assertRaises(AssertionError, self.loop.poll)
self.assertRaises(AssertionError, self.loop.flush)
self.assertRaises(AssertionError, self.loop.run)
def testCallAndFlush(self):
log = []
self.loop.call(log.append, 1)
self.loop.call(log.append, 2)
self.loop.call(self.loop.call, log.append, 3)
self.assertEqual(log, [])
self.loop.flush()
self.assertEqual(log, [1, 2])
self.loop.poll()
self.assertEqual(log, [1, 2, 3])
self.loop.poll()
self.assertEqual(log, [1, 2, 3])
def testUndoOfCall(self):
log = []
def do():
self.loop.call(log.append, 1)
sp = self.ctrl.savepoint()
self.loop.call(log.append, 2)
self.ctrl.rollback_to(sp)
self.loop.call(log.append, 3)
self.ctrl.atomically(do)
self.assertEqual(log, [])
self.loop.flush()
self.assertEqual(log, [1, 3])
def testScheduleUndo(self):
t = Time()
t.auto_update = False
t20 = t[20]
log = []
d(trellis.Cell)
def checktime():
t.reached(t20)
log.append(t._events[t20._when])
d(trellis.Performer)
def err_after_reached():
if len(t._schedule)>1:
raise DummyError
self.assertRaises(DummyError, checktime.get_value)
self.assertEqual(t._schedule, [t20._when, Max])
self.assertEqual(dict(t._events), {t20._when:log[0]})
del checktime
self.failUnless(isinstance(log.pop(), trellis.Sensor))
self.assertEqual(dict(t._events), {})
self.assertEqual(log, [])
def force_rollback(self):
d(trellis.Performer)
def do_it():
raise DummyError
def testUpdateUndo(self):
t = Time()
t.auto_update = False
t20 = t[20]
d(trellis.Cell)
def checktime():
if t.reached(t20):
self.force_rollback()
checktime.value
self.assertEqual(t._schedule, [t20._when, Max])
self.assertEqual(list(t._events), [t20._when])
self.assertRaises(DummyError, t.advance, 20)
self.assertEqual(t._schedule, [t20._when, Max])
self.assertEqual(list(t._events), [t20._when])
class TestTasks(unittest.TestCase):
ctrl = trellis.ctrl
def testRunAtomicallyInLoop(self):
log = []
def f():
self.failUnless(self.ctrl.active)
log.append(1)
yield activity.Pause
self.failUnless(self.ctrl.active)
log.append(2)
t = activity.TaskCell(f)
self.assertEqual(log, [])
t._loop.flush()
self.assertEqual(log, [1])
t._loop.flush()
self.assertEqual(log, [1, 2])
def testDependencyAndCallback(self):
log = []
v = trellis.Value(42)
v1 = trellis.Value(1)
c1 = trellis.Cell(lambda: v1.value*2)
def f():
while v.value:
log.append(v.value)
v1.value = v.value
yield activity.Pause
t = activity.TaskCell(f)
check = []
for j in 42, 57, 99, 106, 23, None:
self.assertEqual(log, check)
v.value = j
if j: check.append(j)
for i in range(5):
t._loop.flush()
if j: self.assertEqual(c1.value, j*2)
self.assertEqual(log, check)
def testPauseAndCall(self):
log = []
class TaskExample(trellis.Component):
trellis.attrs(
start = False,
stop = False
)
def wait_for_start(self):
log.append("waiting to start")
while not self.start:
yield activity.Pause
def wait_for_stop(self):
while not self.stop:
log.append("waiting to stop")
yield activity.Pause
activity.task()
def demo(self):
yield self.wait_for_start()
log.append("starting")
yield self.wait_for_stop()
log.append("stopped")
self.assertEqual(log, [])
t = TaskExample()
EventLoop.flush()
self.assertEqual(log, ['waiting to start'])
log.pop()
t.start = True
EventLoop.flush()
self.assertEqual(log, ['starting', 'waiting to stop'])
log.pop()
log.pop()
t.stop = True
EventLoop.flush()
self.assertEqual(log, ['stopped'])
def testValueReturns(self):
log = []
def f1():
yield 42
def f2():
yield f1(); yield activity.resume()
def f3():
yield f2(); v = activity.resume()
log.append(v)
t = activity.TaskCell(f3)
EventLoop.flush()
self.assertEqual(log, [42])
log = []
def f1():
yield activity.Return(42)
t = activity.TaskCell(f3)
EventLoop.flush()
self.assertEqual(log, [42])
def testErrorPropagation(self):
log = []
def f1():
raise DummyError
def f2():
try:
yield f1(); activity.resume()
except DummyError:
log.append(True)
else:
pass
t = activity.TaskCell(f2)
self.assertEqual(log, [])
EventLoop.flush()
self.assertEqual(log, [True])
def testSendAndThrow(self):
log = []
class SendThrowIter(object):
count = 0
def next(self):
if self.count==0:
self.count = 1
def f(): yield 99
return f()
raise StopIteration
def send(self, value):
log.append(value)
def f(): raise DummyError; yield None
return f()
def throw(self, typ, val, tb):
log.append(typ)
log.append(val.__class__) # type(val) is instance in Py<2.5
log.append(type(tb))
raise StopIteration
def fs(): yield SendThrowIter()
t = activity.TaskCell(fs)
self.assertEqual(log, [])
EventLoop.flush()
self.assertEqual(log, [99, DummyError,DummyError, types.TracebackType])
def testResumeOnlyOnceUntilFlushed(self):
log = []
c1 = trellis.Value(1)
c2 = trellis.Value(2)
def f():
for i in range(3):
c1.value, c2.value
log.append(i)
yield activity.Pause
t = activity.TaskCell(f)
self.assertEqual(log, [])
EventLoop.flush()
self.assertEqual(log, [0])
c1.value = 3
self.assertEqual(log, [0])
c2.value = 4
EventLoop.flush()
self.assertEqual(log, [0, 1])
def additional_tests():
import doctest, sys
files = [
'README.txt', 'STM-Observer.txt', 'Activity.txt', 'Collections.txt',
'Internals.txt',
][(sys.version<'2.4')*3:] # All but Internals+Collections use decorator syntax
try:
from sqlalchemy.orm.attributes import ClassManager
except ImportError:
pass
else:
files.insert(0, 'SQLAlchemy.txt')
return doctest.DocFileSuite(
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE, *files
)
| [
"tomjoy002@gmail.com"
] | tomjoy002@gmail.com |
225d48e80f10f03bb995d8fb1ba7892453a63f12 | 3a3529f566957d8d79afc7d1ebe533fba239ab7c | /forum/urls.py | 3af8ed2a2bc3f64f573748ac7ea101fcff412b82 | [] | no_license | zhufree/sample | 9dc4f3aef86322487b9f252163d8b17937651ee7 | ba765b5e0a91e53d179f3593c0578a31e2bddfd8 | refs/heads/master | 2021-10-28T18:49:27.091127 | 2021-10-21T02:06:35 | 2021-10-21T02:06:35 | 41,006,994 | 30 | 5 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | __author__ = 'zhufree'
from django.conf.urls import url
from .views import *
urlpatterns = [
# Examples:
url(r'^$', index),
url(r'^p/(?P<id>\d+)/$', single_post),
url(r'^topic/(?P<id>\d+)/$', show_topic),
url(r'^post/$', post),
]
| [
"zhufree2013@gmail.com"
] | zhufree2013@gmail.com |
73583adcaea5d524cdb542a439f178769047ef38 | 480cbc014abecd36899915e814157fe7e0d4072f | /tools/tasks.py | 3dc7ec6cf9c2c1af355e90ce242754ed7b321601 | [
"Apache-2.0"
] | permissive | mattcarabine/sync_gateway | 090c48f6a9646f40feda1a36ed10b8a33f7fc9f3 | a8bc5099d3c7185de72829a10311c96c800c01c7 | refs/heads/master | 2021-01-18T20:07:25.023611 | 2016-05-20T17:55:36 | 2016-05-20T17:55:36 | 59,411,033 | 0 | 0 | null | 2016-05-22T12:26:34 | 2016-05-22T12:26:33 | null | UTF-8 | Python | false | false | 34,404 | py | #!/usr/bin/env python
# -*- python -*-
import os
import sys
import tempfile
import time
import subprocess
import string
import re
import platform
import glob
import socket
import threading
import optparse
import atexit
import signal
import urllib
import shutil
import urlparse
class AltExitC(object):
def __init__(self):
self.list = []
self.lock = threading.Lock()
atexit.register(self.at_exit_handler)
def register(self, f):
self.lock.acquire()
self.register_and_unlock(f)
def register_and_unlock(self, f):
try:
self.list.append(f)
finally:
self.lock.release()
def at_exit_handler(self):
self.lock.acquire()
self.list.reverse()
for f in self.list:
try:
f()
except:
pass
def exit(self, status):
self.at_exit_handler()
os._exit(status)
AltExit = AltExitC()
def log(message, end = '\n'):
sys.stderr.write(message + end)
sys.stderr.flush()
class Task(object):
privileged = False
no_header = False
num_samples = 1
interval = 0
def __init__(self, description, command, timeout=None, **kwargs):
self.description = description
self.command = command
self.timeout = timeout
self.__dict__.update(kwargs)
def execute(self, fp):
"""Run the task"""
import subprocess
use_shell = not isinstance(self.command, list)
if "literal" in self.__dict__:
print >> fp, self.literal
return 0
env = None
if "addenv" in self.__dict__:
env = os.environ.copy()
env.update(self.addenv)
try:
p = subprocess.Popen(self.command, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=use_shell, env=env)
except OSError, e:
# if use_shell is False then Popen may raise exception
# if binary is missing. In this case we mimic what
# shell does. Namely, complaining to stderr and
# setting non-zero status code. It's might also
# automatically handle things like "failed to fork due
# to some system limit".
print >> fp, "Failed to execute %s: %s" % (self.command, e)
return 127
p.stdin.close()
from threading import Timer, Event
timer = None
timer_fired = Event()
if self.timeout is not None and hasattr(p, 'kill'):
def on_timeout():
p.kill()
timer_fired.set()
timer = Timer(self.timeout, on_timeout)
timer.start()
try:
while True:
data = p.stdout.read(64 * 1024)
if not data:
break
fp.write(data)
finally:
if timer is not None:
timer.cancel()
timer.join()
# there's a tiny chance that command succeeds just before
# timer is fired; that would result in a spurious timeout
# message
if timer_fired.isSet():
print >> fp, "`%s` timed out after %s seconds" % (self.command, self.timeout)
return p.wait()
def will_run(self):
"""Determine if this task will run on this platform."""
return sys.platform in self.platforms
class TaskRunner(object):
default_name = "couchbase.log"
def __init__(self, verbosity=0):
self.files = {}
self.tasks = {}
self.verbosity = verbosity
self.start_time = time.strftime("%Y%m%d-%H%M%S", time.gmtime())
self.tmpdir = tempfile.mkdtemp()
AltExit.register(self.finalize)
def finalize(self):
try:
for fp in self.files.iteritems():
fp.close()
except:
pass
shutil.rmtree(self.tmpdir, ignore_errors=True)
def collect_file(self, filename):
"""Add a file to the list of files collected. Used to capture the exact
file (including timestamps) from the Couchbase instance.
filename - Absolute path to file to collect.
"""
if not filename in self.files:
self.files[filename] = open(filename, 'r')
else:
log("Unable to collect file '{0}' - already collected.".format(
filename))
def get_file(self, filename):
if filename in self.files:
fp = self.files[filename]
else:
fp = open(os.path.join(self.tmpdir, filename), 'w+')
self.files[filename] = fp
return fp
def header(self, fp, title, subtitle):
separator = '=' * 78
print >> fp, separator
print >> fp, title
print >> fp, subtitle
print >> fp, separator
fp.flush()
def log_result(self, result):
if result == 0:
log("OK")
else:
log("Exit code %d" % result)
def run(self, task):
"""Run a task with a file descriptor corresponding to its log file"""
if task.will_run():
if hasattr(task, 'command_to_print'):
command_to_print = task.command_to_print
else:
command_to_print = task.command
log("%s (%s) - " % (task.description, command_to_print), end='')
if task.privileged and os.getuid() != 0:
log("skipped (needs root privs)")
return
if hasattr(task, 'log_file'):
filename = task.log_file
else:
filename = self.default_name
fp = self.get_file(filename)
if not task.no_header:
self.header(fp, task.description, command_to_print)
for i in xrange(task.num_samples):
if i > 0:
log("Taking sample %d after %f seconds - " % (i+1, task.interval), end='')
time.sleep(task.interval)
result = task.execute(fp)
self.log_result(result)
fp.flush()
elif self.verbosity >= 2:
log('Skipping "%s" (%s): not for platform %s' % (task.description, command_to_print, sys.platform))
def zip(self, filename, node):
"""Write all our logs to a zipfile"""
exe = exec_name("gozip")
prefix = "cbcollect_info_%s_%s" % (node, self.start_time)
files = []
for name, fp in self.files.iteritems():
fp.close()
files.append(fp.name)
fallback = False
try:
p = subprocess.Popen([exe, "-strip-path", "-prefix", prefix, filename] + files,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE)
p.stdin.close()
status = p.wait()
if status != 0:
log("gozip terminated with non-zero exit code (%d)" % status)
except OSError, e:
log("Exception during compression: %s" % e)
fallback = True
if fallback:
log("IMPORTANT:")
log(" Compression using gozip failed.")
log(" Falling back to python implementation.")
log(" Please let us know about this and provide console output.")
self._zip_fallback(filename, prefix, files)
def _zip_fallback(self, filename, prefix, files):
from zipfile import ZipFile, ZIP_DEFLATED
zf = ZipFile(filename, mode='w', compression=ZIP_DEFLATED)
try:
for name in files:
zf.write(name,
"%s/%s" % (prefix, os.path.basename(name)))
finally:
zf.close()
class SolarisTask(Task):
platforms = ['sunos5', 'solaris']
class LinuxTask(Task):
platforms = ['linux2']
class WindowsTask(Task):
platforms = ['win32', 'cygwin']
class MacOSXTask(Task):
platforms = ['darwin']
class UnixTask(SolarisTask, LinuxTask, MacOSXTask):
platforms = SolarisTask.platforms + LinuxTask.platforms + MacOSXTask.platforms
class AllOsTask(UnixTask, WindowsTask):
platforms = UnixTask.platforms + WindowsTask.platforms
def make_curl_task(name, user, password, url,
timeout=60, log_file="couchbase.log", base_task=AllOsTask,
**kwargs):
def make_cmd(pwd):
return ["curl", "-sS", "--proxy", "",
"-u", "%s:%s" % (user, pwd), url]
return base_task(name, make_cmd(password),
timeout=timeout,
log_file=log_file,
command_to_print=make_cmd("*****"), **kwargs)
def make_query_task(statement, user, password, port):
url = "http://127.0.0.1:%s/query/service?statement=%s" % (port, urllib.quote(statement))
return make_curl_task(name="Result of query statement \'%s\'" % statement,
user=user, password=password, url=url)
def basedir():
mydir = os.path.dirname(sys.argv[0])
if mydir == "":
mydir = "."
return mydir
def make_event_log_task():
from datetime import datetime, timedelta
# I found that wmic ntevent can be extremely slow; so limiting the output
# to approximately last month
limit = datetime.today() - timedelta(days=31)
limit = limit.strftime('%Y%m%d000000.000000-000')
return WindowsTask("Event log",
"wmic ntevent where "
"\""
"(LogFile='application' or LogFile='system') and "
"EventType<3 and TimeGenerated>'%(limit)s'"
"\" "
"get TimeGenerated,LogFile,SourceName,EventType,Message "
"/FORMAT:list" % locals())
def make_os_tasks():
programs = " ".join(["moxi", "memcached", "beam.smp",
"couch_compact", "godu", "sigar_port",
"cbq-engine", "indexer", "projector", "goxdcr",
"cbft"])
_tasks = [
UnixTask("uname", "uname -a"),
UnixTask("time and TZ", "date; date -u"),
UnixTask("ntp time",
"ntpdate -q pool.ntp.org || "
"nc time.nist.gov 13 || "
"netcat time.nist.gov 13"),
UnixTask("ntp peers", "ntpq -p"),
UnixTask("raw /etc/sysconfig/clock", "cat /etc/sysconfig/clock"),
UnixTask("raw /etc/timezone", "cat /etc/timezone"),
WindowsTask("System information", "systeminfo"),
WindowsTask("Computer system", "wmic computersystem"),
WindowsTask("Computer OS", "wmic os"),
LinuxTask("System Hardware", "lshw -json || lshw"),
SolarisTask("Process list snapshot", "prstat -a -c -n 100 -t -v -L 1 10"),
SolarisTask("Process list", "ps -ef"),
SolarisTask("Service configuration", "svcs -a"),
SolarisTask("Swap configuration", "swap -l"),
SolarisTask("Disk activity", "zpool iostat 1 10"),
SolarisTask("Disk activity", "iostat -E 1 10"),
LinuxTask("Process list snapshot", "export TERM=''; top -Hb -n1 || top -H n1"),
LinuxTask("Process list", "ps -AwwL -o user,pid,lwp,ppid,nlwp,pcpu,maj_flt,min_flt,pri,nice,vsize,rss,tty,stat,wchan:12,start,bsdtime,command"),
LinuxTask("Raw /proc/vmstat", "cat /proc/vmstat"),
LinuxTask("Raw /proc/mounts", "cat /proc/mounts"),
LinuxTask("Raw /proc/partitions", "cat /proc/partitions"),
LinuxTask("Raw /proc/diskstats", "cat /proc/diskstats; echo ''", num_samples=10, interval=1),
LinuxTask("Raw /proc/interrupts", "cat /proc/interrupts"),
LinuxTask("Swap configuration", "free -t"),
LinuxTask("Swap configuration", "swapon -s"),
LinuxTask("Kernel modules", "lsmod"),
LinuxTask("Distro version", "cat /etc/redhat-release"),
LinuxTask("Distro version", "lsb_release -a"),
LinuxTask("Distro version", "cat /etc/SuSE-release"),
LinuxTask("Distro version", "cat /etc/issue"),
LinuxTask("Installed software", "rpm -qa"),
# NOTE: AFAIK columns _was_ necessary, but it doesn't appear to be
# required anymore. I.e. dpkg -l correctly detects stdout as not a
# tty and stops playing smart on formatting. Lets keep it for few
# years and then drop, however.
LinuxTask("Installed software", "COLUMNS=300 dpkg -l"),
LinuxTask("Extended iostat", "iostat -x -p ALL 1 10 || iostat -x 1 10"),
LinuxTask("Core dump settings", "find /proc/sys/kernel -type f -name '*core*' -print -exec cat '{}' ';'"),
UnixTask("sysctl settings", "sysctl -a"),
LinuxTask("Relevant lsof output",
"echo %(programs)s | xargs -n1 pgrep | xargs -n1 -r -- lsof -n -p" % locals()),
LinuxTask("LVM info", "lvdisplay"),
LinuxTask("LVM info", "vgdisplay"),
LinuxTask("LVM info", "pvdisplay"),
MacOSXTask("Process list snapshot", "top -l 1"),
MacOSXTask("Disk activity", "iostat 1 10"),
MacOSXTask("Process list",
"ps -Aww -o user,pid,lwp,ppid,nlwp,pcpu,pri,nice,vsize,rss,tty,"
"stat,wchan:12,start,bsdtime,command"),
WindowsTask("Installed software", "wmic product get name, version"),
WindowsTask("Service list", "wmic service where state=\"running\" GET caption, name, state"),
WindowsTask("Process list", "wmic process"),
WindowsTask("Process usage", "tasklist /V /fo list"),
WindowsTask("Swap settings", "wmic pagefile"),
WindowsTask("Disk partition", "wmic partition"),
WindowsTask("Disk volumes", "wmic volume"),
UnixTask("Network configuration", "ifconfig -a", interval=10,
num_samples=2),
LinuxTask("Network configuration", "echo link addr neigh rule route netns | xargs -n1 -- sh -x -c 'ip $1 list' --"),
WindowsTask("Network configuration", "ipconfig /all", interval=10,
num_samples=2),
LinuxTask("Raw /proc/net/dev", "cat /proc/net/dev"),
LinuxTask("Network link statistics", "ip -s link"),
UnixTask("Network status", "netstat -anp || netstat -an"),
WindowsTask("Network status", "netstat -ano"),
AllOsTask("Network routing table", "netstat -rn"),
LinuxTask("Network socket statistics", "ss -an"),
LinuxTask("Extended socket statistics", "ss -an --info --processes"),
UnixTask("Arp cache", "arp -na"),
LinuxTask("Iptables dump", "iptables-save"),
UnixTask("Raw /etc/hosts", "cat /etc/hosts"),
UnixTask("Raw /etc/resolv.conf", "cat /etc/resolv.conf"),
UnixTask("Raw /etc/nsswitch.conf", "cat /etc/nsswitch.conf"),
WindowsTask("Arp cache", "arp -a"),
WindowsTask("Network Interface Controller", "wmic nic"),
WindowsTask("Network Adapter", "wmic nicconfig"),
WindowsTask("Active network connection", "wmic netuse"),
WindowsTask("Protocols", "wmic netprotocol"),
WindowsTask("Hosts file", "type %SystemRoot%\system32\drivers\etc\hosts"),
WindowsTask("Cache memory", "wmic memcache"),
WindowsTask("Physical memory", "wmic memphysical"),
WindowsTask("Physical memory chip info", "wmic memorychip"),
WindowsTask("Local storage devices", "wmic logicaldisk"),
UnixTask("Filesystem", "df -ha"),
UnixTask("System activity reporter", "sar 1 10"),
UnixTask("System paging activity", "vmstat 1 10"),
UnixTask("System uptime", "uptime"),
UnixTask("couchbase user definition", "getent passwd couchbase"),
UnixTask("couchbase user limits", "su couchbase -c \"ulimit -a\"",
privileged=True),
UnixTask("couchbase user limits", "su couchbase -c \"ulimit -a\"",
privileged=True),
UnixTask("Interrupt status", "intrstat 1 10"),
UnixTask("Processor status", "mpstat 1 10"),
UnixTask("System log", "cat /var/adm/messages"),
LinuxTask("Raw /proc/uptime", "cat /proc/uptime"),
LinuxTask("All logs", "tar cz /var/log/syslog* /var/log/dmesg /var/log/messages* /var/log/daemon* /var/log/debug* /var/log/kern.log* 2>/dev/null",
log_file="syslog.tar.gz", no_header=True),
LinuxTask("Relevant proc data", "echo %(programs)s | "
"xargs -n1 pgrep | xargs -n1 -- sh -c 'echo $1; cat /proc/$1/status; cat /proc/$1/limits; cat /proc/$1/smaps; cat /proc/$1/numa_maps; cat /proc/$1/task/*/sched; echo' --" % locals()),
LinuxTask("Processes' environment", "echo %(programs)s | "
r"xargs -n1 pgrep | xargs -n1 -- sh -c 'echo $1; ( cat /proc/$1/environ | tr \\0 \\n ); echo' --" % locals()),
LinuxTask("NUMA data", "numactl --hardware"),
LinuxTask("NUMA data", "numactl --show"),
LinuxTask("NUMA data", "cat /sys/devices/system/node/node*/numastat"),
UnixTask("Kernel log buffer", "dmesg -H || dmesg"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/transparent_hugepage/enabled"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/transparent_hugepage/defrag"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/redhat_transparent_hugepage/enabled"),
LinuxTask("Transparent Huge Pages data", "cat /sys/kernel/mm/redhat_transparent_hugepage/defrag"),
LinuxTask("Network statistics", "netstat -s"),
LinuxTask("Full raw netstat", "cat /proc/net/netstat"),
LinuxTask("CPU throttling info", "echo /sys/devices/system/cpu/cpu*/thermal_throttle/* | xargs -n1 -- sh -c 'echo $1; cat $1' --"),
make_event_log_task(),
]
return _tasks
# stolen from http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
def iter_flatten(iterable):
it = iter(iterable)
for e in it:
if isinstance(e, (list, tuple)):
for f in iter_flatten(e):
yield f
else:
yield e
def flatten(iterable):
return [e for e in iter_flatten(iterable)]
def read_guts(guts, key):
return guts.get(key, "")
def winquote_path(s):
return '"'+s.replace("\\\\", "\\").replace('/', "\\")+'"'
# python's split splits empty string to [''] which doesn't make any
# sense. So this function works around that.
def correct_split(string, splitchar):
rv = string.split(splitchar)
if rv == ['']:
rv = []
return rv
def make_stats_archives_task(guts, initargs_path):
escript = exec_name("escript")
escript_wrapper = find_script("escript-wrapper")
dump_stats = find_script("dump-stats")
stats_dir = read_guts(guts, "stats_dir")
if dump_stats is None or escript_wrapper is None or not stats_dir:
return []
return AllOsTask("stats archives",
[escript,
escript_wrapper,
"--initargs-path", initargs_path, "--",
dump_stats, stats_dir],
no_header=True,
log_file="stats_archives.json")
def make_product_task(guts, initargs_path, options):
root = os.path.abspath(os.path.join(initargs_path, "..", "..", "..", ".."))
dbdir = read_guts(guts, "db_dir")
viewdir = read_guts(guts, "idx_dir")
diag_url = "http://127.0.0.1:%s/diag?noLogs=1" % read_guts(guts, "rest_port")
if options.single_node_diag:
diag_url += "&oneNode=1"
from distutils.spawn import find_executable
lookup_cmd = None
for cmd in ["dig", "nslookup", "host"]:
if find_executable(cmd) is not None:
lookup_cmd = cmd
break
lookup_tasks = []
if lookup_cmd is not None:
lookup_tasks = [UnixTask("DNS lookup information for %s" % node,
"%(lookup_cmd)s '%(node)s'" % locals())
for node in correct_split(read_guts(guts, "nodes"), ",")]
query_tasks = []
query_port = read_guts(guts, "query_port")
if query_port:
def make(statement):
return make_query_task(statement, user="@",
password=read_guts(guts, "memcached_pass"),
port=query_port)
query_tasks = [make("SELECT * FROM system:datastores"),
make("SELECT * FROM system:namespaces"),
make("SELECT * FROM system:keyspaces"),
make("SELECT * FROM system:indexes")]
index_tasks = []
index_port = read_guts(guts, "indexer_http_port")
if index_port:
url='http://127.0.0.1:%s/getIndexStatus' % index_port
index_tasks = [make_curl_task(name="Index definitions are: ",
user="@", password=read_guts(guts, "memcached_pass"), url=url)]
fts_tasks = []
fts_port = read_guts(guts, "fts_http_port")
if fts_port:
url='http://127.0.0.1:%s/api/diag' % fts_port
fts_tasks = [make_curl_task(name="FTS /api/diag: ",
user="@", password=read_guts(guts, "memcached_pass"), url=url)]
_tasks = [
UnixTask("Directory structure",
["ls", "-lRai", root]),
UnixTask("Database directory structure",
["ls", "-lRai", dbdir]),
UnixTask("Index directory structure",
["ls", "-lRai", viewdir]),
UnixTask("couch_dbinfo",
["find", dbdir, "-type", "f",
"-name", "*.couch.*",
"-exec", "couch_dbinfo", "{}", "+"]),
LinuxTask("Database directory filefrag info",
["find", dbdir, "-type", "f", "-exec", "filefrag", "-v", "{}", "+"]),
LinuxTask("Index directory filefrag info",
["find", viewdir, "-type", "f", "-exec", "filefrag", "-v", "{}", "+"]),
WindowsTask("Database directory structure",
"dir /s " + winquote_path(dbdir)),
WindowsTask("Index directory structure",
"dir /s " + winquote_path(viewdir)),
WindowsTask("Version file",
"type " + winquote_path(basedir()) + "\\..\\VERSION.txt"),
WindowsTask("Manifest file",
"type " + winquote_path(basedir()) + "\\..\\manifest.txt"),
WindowsTask("Manifest file",
"type " + winquote_path(basedir()) + "\\..\\manifest.xml"),
LinuxTask("Version file", "cat '%s/VERSION.txt'" % root),
LinuxTask("Manifest file", "cat '%s/manifest.txt'" % root),
LinuxTask("Manifest file", "cat '%s/manifest.xml'" % root),
AllOsTask("Couchbase config", "", literal = read_guts(guts, "ns_config")),
AllOsTask("Couchbase static config", "", literal = read_guts(guts, "static_config")),
AllOsTask("Raw ns_log", "", literal = read_guts(guts, "ns_log")),
# TODO: just gather those in python
WindowsTask("Memcached logs",
"cd " + winquote_path(read_guts(guts, "memcached_logs_path")) + " && " +
"for /f %a IN ('dir /od /b memcached.log.*') do type %a",
log_file="memcached.log"),
UnixTask("Memcached logs",
["sh", "-c", 'cd "$1"; for file in $(ls -tr memcached.log.*); do cat \"$file\"; done', "--", read_guts(guts, "memcached_logs_path")],
log_file="memcached.log"),
[WindowsTask("Ini files (%s)" % p,
"type " + winquote_path(p),
log_file="ini.log")
for p in read_guts(guts, "couch_inis").split(";")],
UnixTask("Ini files",
["sh", "-c", 'for i in "$@"; do echo "file: $i"; cat "$i"; done', "--"] + read_guts(guts, "couch_inis").split(";"),
log_file="ini.log"),
make_curl_task(name="couchbase diags",
user="@",
password=read_guts(guts, "memcached_pass"),
timeout=600,
url=diag_url,
log_file="diag.log"),
make_curl_task(name="master events",
user="@",
password=read_guts(guts, "memcached_pass"),
timeout=300,
url='http://127.0.0.1:%s/diag/masterEvents?o=1' % read_guts(guts, "rest_port"),
log_file="master_events.log",
no_header=True),
make_curl_task(name="ale configuration",
user="@",
password=read_guts(guts, "memcached_pass"),
url='http://127.0.0.1:%s/diag/ale' % read_guts(guts, "rest_port"),
log_file="couchbase.log"),
[AllOsTask("couchbase logs (%s)" % name, "cbbrowse_logs %s" % name,
addenv = [("REPORT_DIR", read_guts(guts, "log_path"))],
log_file="ns_server.%s" % name)
for name in ["debug.log", "info.log", "error.log", "couchdb.log",
"xdcr.log", "xdcr_errors.log",
"views.log", "mapreduce_errors.log",
"stats.log", "babysitter.log", "ssl_proxy.log",
"reports.log", "xdcr_trace.log", "http_access.log",
"http_access_internal.log", "ns_couchdb.log",
"goxdcr.log", "query.log", "projector.log", "indexer.log",
"fts.log", "metakv.log"]],
[AllOsTask("memcached stats %s" % kind,
flatten(["cbstats", "-a", "127.0.0.1:%s" % read_guts(guts, "memcached_port"), kind, "-b", read_guts(guts, "memcached_admin"), "-p", read_guts(guts, "memcached_pass")]),
log_file="stats.log",
timeout=60)
for kind in ["all", "allocator", "checkpoint", "config",
"dcp", "dcpagg",
["diskinfo", "detail"], ["dispatcher", "logs"],
"failovers", ["hash", "detail"],
"kvstore", "kvtimings", "memory",
"prev-vbucket",
"runtimes", "scheduler",
"tap", "tapagg",
"timings", "uuid",
"vbucket", "vbucket-details", "vbucket-seqno",
"warmup", "workload"]],
[AllOsTask("memcached mcstat %s" % kind,
flatten(["mcstat", "-h", "127.0.0.1:%s" % read_guts(guts, "memcached_port"),
"-u", read_guts(guts, "memcached_admin"),
"-P", read_guts(guts, "memcached_pass"), kind]),
log_file="stats.log",
timeout=60)
for kind in ["connections"]],
[AllOsTask("ddocs for %s (%s)" % (bucket, path),
["couch_dbdump", path],
log_file = "ddocs.log")
for bucket in set(correct_split(read_guts(guts, "buckets"), ",")) - set(correct_split(read_guts(guts, "memcached_buckets"), ","))
for path in glob.glob(os.path.join(dbdir, bucket, "master.couch*"))],
[AllOsTask("replication docs (%s)" % (path),
["couch_dbdump", path],
log_file = "ddocs.log")
for path in glob.glob(os.path.join(dbdir, "_replicator.couch*"))],
[AllOsTask("Couchstore local documents (%s, %s)" % (bucket, os.path.basename(path)),
["couch_dbdump", "--local", path],
log_file = "couchstore_local.log")
for bucket in set(correct_split(read_guts(guts, "buckets"), ",")) - set(correct_split(read_guts(guts, "memcached_buckets"), ","))
for path in glob.glob(os.path.join(dbdir, bucket, "*.couch.*"))],
[UnixTask("moxi stats (port %s)" % port,
"echo stats proxy | nc 127.0.0.1 %s" % port,
log_file="stats.log",
timeout=60)
for port in correct_split(read_guts(guts, "moxi_ports"), ",")],
[AllOsTask("mctimings",
["mctimings",
"-u", read_guts(guts, "memcached_admin"),
"-P", read_guts(guts, "memcached_pass"),
"-h", "127.0.0.1:%s" % read_guts(guts, "memcached_port"),
"-v"] + stat,
log_file="stats.log",
timeout=60)
for stat in ([], ["subdoc_execute"])],
make_stats_archives_task(guts, initargs_path)
]
_tasks = flatten([lookup_tasks, query_tasks, index_tasks, fts_tasks, _tasks])
return _tasks
def find_script(name):
dirs = [basedir(), os.path.join(basedir(), "scripts")]
for d in dirs:
path = os.path.join(d, name)
if os.path.exists(path):
log("Found %s: %s" % (name, path))
return path
return None
def get_server_guts(initargs_path):
dump_guts_path = find_script("dump-guts")
if dump_guts_path is None:
log("Couldn't find dump-guts script. Some information will be missing")
return {}
escript = exec_name("escript")
extra_args = os.getenv("EXTRA_DUMP_GUTS_ARGS")
args = [escript, dump_guts_path, "--initargs-path", initargs_path]
if extra_args:
args = args + extra_args.split(";")
print("Checking for server guts in %s..." % initargs_path)
p = subprocess.Popen(args, stdout = subprocess.PIPE)
output = p.stdout.read()
p.wait()
rc = p.returncode
# print("args: %s gave rc: %d and:\n\n%s\n" % (args, rc, output))
tokens = output.rstrip("\0").split("\0")
d = {}
if len(tokens) > 1:
for i in xrange(0, len(tokens), 2):
d[tokens[i]] = tokens[i+1]
return d
def guess_utility(command):
if isinstance(command, list):
command = ' '.join(command)
if not command:
return None
if re.findall(r'[|;&]|\bsh\b|\bsu\b|\bfind\b|\bfor\b', command):
# something hard to easily understand; let the human decide
return command
else:
return command.split()[0]
def dump_utilities(*args, **kwargs):
specific_platforms = { SolarisTask : 'Solaris',
LinuxTask : 'Linux',
WindowsTask : 'Windows',
MacOSXTask : 'Mac OS X' }
platform_utils = dict((name, set()) for name in specific_platforms.values())
class FakeOptions(object):
def __getattr__(self, name):
return None
tasks = make_os_tasks() + make_product_task({}, "", FakeOptions())
for task in tasks:
utility = guess_utility(task.command)
if utility is None:
continue
for (platform, name) in specific_platforms.items():
if isinstance(task, platform):
platform_utils[name].add(utility)
print '''This is an autogenerated, possibly incomplete and flawed list
of utilites used by cbcollect_info'''
for (name, utilities) in sorted(platform_utils.items(), key=lambda x: x[0]):
print "\n%s:" % name
for utility in sorted(utilities):
print " - %s" % utility
sys.exit(0)
def setup_stdin_watcher():
def _in_thread():
sys.stdin.readline()
AltExit.exit(2)
th = threading.Thread(target = _in_thread)
th.setDaemon(True)
th.start()
class CurlKiller:
def __init__(self, p):
self.p = p
def cleanup(self):
if self.p != None:
print("Killing curl...")
os.kill(self.p.pid, signal.SIGKILL)
print("done")
def disarm(self):
self.p = None
def do_upload_and_exit(path, url):
output_fd, output_file = tempfile.mkstemp()
os.close(output_fd)
AltExit.register(lambda: os.unlink(output_file))
args = ["curl", "-sS",
"--output", output_file,
"--write-out", "%{http_code}", "--upload-file", path, url]
AltExit.lock.acquire()
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE)
k = CurlKiller(p)
AltExit.register_and_unlock(k.cleanup)
except Exception, e:
AltExit.lock.release()
raise e
stdout, _ = p.communicate()
k.disarm()
if p.returncode != 0:
sys.exit(1)
else:
if stdout.strip() == '200':
log('Done uploading')
sys.exit(0)
else:
log('HTTP status code: %s' % stdout)
sys.exit(1)
def parse_host(host):
url = urlparse.urlsplit(host)
if not url.scheme:
url = urlparse.urlsplit('https://' + host)
return url.scheme, url.netloc, url.path
def generate_upload_url(parser, options, zip_filename):
upload_url = None
if options.upload_host:
if not options.upload_customer:
parser.error("Need --customer when --upload-host is given")
scheme, netloc, path = parse_host(options.upload_host)
customer = urllib.quote(options.upload_customer)
fname = urllib.quote(zip_filename)
if options.upload_ticket:
full_path = '%s/%s/%d/%s' % (path, customer, options.upload_ticket, fname)
else:
full_path = '%s/%s/%s' % (path, customer, fname)
upload_url = urlparse.urlunsplit((scheme, netloc, full_path, '', ''))
log("Will upload collected .zip file into %s" % upload_url)
return upload_url
def check_ticket(option, opt, value):
if re.match('^\d{1,7}$', value):
return int(value)
else:
raise optparse.OptionValueError(
"option %s: invalid ticket number: %r" % (opt, value))
class CbcollectInfoOptions(optparse.Option):
from copy import copy
TYPES = optparse.Option.TYPES + ("ticket",)
TYPE_CHECKER = copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER["ticket"] = check_ticket
def find_primary_addr(default = None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
try:
s.connect(("8.8.8.8", 56))
addr, port = s.getsockname()
return addr
except socket.error:
return default
finally:
s.close()
def exec_name(name):
if sys.platform == 'win32':
name += ".exe"
return name
| [
"tleyden@couchbase.com"
] | tleyden@couchbase.com |
d9bdb178ecc13cd0d02f628d51c3fc104d950945 | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /puzzles/power_of_three.py | 0c90784597ced25c72515a818f2ab265938bf1d4 | [] | no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 862 | py | # https://leetcode.com/problems/power-of-three/
"""
Given an integer n, return true if it is a power of three. Otherwise, return false.
An integer n is a power of three, if there exists an integer x such that n == 3x.
Example 1:
Input: n = 27
Output: true
Example 2:
Input: n = 0
Output: false
Example 3:
Input: n = 9
Output: true
Constraints:
-231 <= n <= 231 - 1
Follow up: Could you solve it without loops/recursion?
"""
from math import log
def is_power_of_three(n: int) -> bool:
if n <= 0:
return False
val = round(log(n, 3))
return 3**val == n
def is_power_of_three(n: int) -> bool:
def helper(n: int):
if n <= 0:
return False
if n == 1:
return True
div, residual = divmod(n, 3)
if residual:
return False
return helper(div)
return helper(n)
| [
"tyivanwu@gmail.com"
] | tyivanwu@gmail.com |
86e15099b852b3ba1d7f58082cd64ac62fd06500 | 74b8a63615281a74a3646c9a03928bea60c3c6f3 | /pymccrgb/tests/context.py | 25bf269f3088eab6a0e29d1b098ee1e77eea81e9 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | lswsyx/pymccrgb | 8d3df8200633b9b9918b8d7ec7ee84baa49750c6 | dc8ad2e46cbe6ff8081c32fa11bce68f869baafa | refs/heads/master | 2023-02-23T20:38:14.727935 | 2020-08-06T21:18:05 | 2020-08-06T21:18:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
import pymccrgb
| [
"robertmsare@gmail.com"
] | robertmsare@gmail.com |
799ae55b2b7a4557348b168f0a3fc74d923f5fd4 | 2cd0a84aefb8a7141d1c8da99845a8ada0cc009c | /tensorflow/python/ops/rnn_cell.py | 9aa2314e5e65b02c0d4f7ee1661b77200ea50ef1 | [
"Apache-2.0"
] | permissive | hholst80/tensorflow-old | d466cee96eac717524ab8e4ee85275ce28bb5d68 | 79df325975402e03df89747947ff5b7f18407c52 | refs/heads/master | 2022-12-20T22:07:40.427519 | 2016-05-13T09:57:24 | 2016-05-13T09:57:24 | 58,914,336 | 1 | 1 | Apache-2.0 | 2022-12-09T21:52:14 | 2016-05-16T08:00:04 | C++ | UTF-8 | Python | false | false | 26,838 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
class RNNCell(object):
"""Abstract object representing an RNN cell.
An RNN cell, in the most abstract setting, is anything that has
a state -- a vector of floats of size self.state_size -- and performs some
operation that takes inputs of size self.input_size. This operation
results in an output of size self.output_size and a new state.
This module provides a number of basic commonly used RNN cells, such as
LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number
of operators that allow add dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by a super-class, MultiRNNCell,
defined later. Every RNNCell must have the properties below and and
implement __call__ with the following signature.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: 2D Tensor with shape [batch_size x self.input_size].
state: 2D Tensor with shape [batch_size x self.state_size].
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A 2D Tensor with shape [batch_size x self.output_size]
- New state: A 2D Tensor with shape [batch_size x self.state_size].
"""
raise NotImplementedError("Abstract method")
@property
def input_size(self):
"""Integer: size of inputs accepted by this cell."""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
@property
def state_size(self):
"""Integer: size of state used by this cell."""
raise NotImplementedError("Abstract method")
def zero_state(self, batch_size, dtype):
"""Return state tensor (shape [batch_size x state_size]) filled with 0.
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
A 2D Tensor of shape [batch_size x state_size] filled with zeros.
"""
zeros = array_ops.zeros(
array_ops.pack([batch_size, self.state_size]), dtype=dtype)
zeros.set_shape([None, self.state_size])
return zeros
class BasicRNNCell(RNNCell):
"""The most basic RNN cell."""
def __init__(self, num_units, input_size=None):
self._num_units = num_units
self._input_size = num_units if input_size is None else input_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicRNNCell"
output = tanh(linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, input_size=None):
self._num_units = num_units
self._input_size = num_units if input_size is None else input_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(1, 2, linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = tanh(linear([inputs, r * state], self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full LSTMCell that follows.
"""
def __init__(self, num_units, forget_bias=1.0, input_size=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: int, The dimensionality of the inputs into the LSTM cell,
by default equal to num_units.
"""
self._num_units = num_units
self._input_size = num_units if input_size is None else input_size
self._forget_bias = forget_bias
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return 2 * self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
c, h = array_ops.split(1, 2, state)
concat = linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(1, 4, concat)
new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * tanh(j)
new_h = tanh(new_c) * sigmoid(o)
return new_h, array_ops.concat(1, [new_c, new_h])
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
It uses peep-hole connections, optional cell clipping, and an optional
projection layer.
"""
def __init__(self, num_units, input_size=None,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None,
num_unit_shards=1, num_proj_shards=1, forget_bias=1.0):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: int, The dimensionality of the inputs into the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of the training.
"""
self._num_units = num_units
self._input_size = input_size
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
if num_proj:
self._state_size = num_units + num_proj
self._output_size = num_proj
else:
self._state_size = 2 * num_units
self._output_size = num_units
@property
def input_size(self):
return self._num_units if self._input_size is None else self._input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
actual_input_size = inputs.get_shape().as_list()[1]
if self._input_size and self._input_size != actual_input_size:
raise ValueError("Actual input size not same as specified: %d vs %d." %
(actual_input_size, self._input_size))
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "LSTMCell"
concat_w = _get_concat_variable(
"W", [actual_input_size + num_proj, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [inputs, m_prev])
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
return m, array_ops.concat(1, [c, m])
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
@property
def input_size(self):
return self._cell.input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = linear(output, self._output_size, True)
return projected, res_state
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self, cell, input_size):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
input_size: integer, the size of the inputs before projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if input_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if input_size < 1:
raise ValueError("Parameter input_size must be > 0: %d." % input_size)
self._cell = cell
self._input_size = input_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
with vs.variable_scope(scope or type(self).__name__):
projected = linear(inputs, self._cell.input_size, True)
return self._cell(projected, state)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
@property
def input_size(self):
return self._cell.input_size
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self, cell, embedding_classes, embedding_size, initializer=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
@property
def input_size(self):
return 1
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell on embedded inputs."""
with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper"
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
embedding = vs.get_variable("embedding", [self._embedding_classes,
self._embedding_size],
initializer=initializer)
embedded = embedding_ops.embedding_lookup(
embedding, array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
Raises:
ValueError: if cells is empty (not allowed) or if their sizes don't match.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
for i in xrange(len(cells) - 1):
if cells[i + 1].input_size != cells[i].output_size:
raise ValueError("In MultiRNNCell, the input size of each next"
" cell must match the output size of the previous one."
" Mismatched output size in cell %d." % i)
self._cells = cells
@property
def input_size(self):
return self._cells[0].input_size
@property
def output_size(self):
return self._cells[-1].output_size
@property
def state_size(self):
return sum([cell.state_size for cell in self._cells])
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell"
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("Cell%d" % i):
cur_state = array_ops.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
return cur_inp, array_ops.concat(1, new_states)
class SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
_, init_state = self._cell_fn(None, None)
state_shape = init_state.get_shape()
self._state_size = state_shape.with_rank(2)[1].value
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s",
self._cell_name, state_shape)
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (isinstance(args, (list, tuple)) and not args):
raise ValueError("`args` must be specified")
if not isinstance(args, (list, tuple)):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with vs.variable_scope(scope or "Linear"):
matrix = vs.get_variable("Matrix", [total_arg_size, output_size])
if len(args) == 1:
res = math_ops.matmul(args[0], matrix)
else:
res = math_ops.matmul(array_ops.concat(1, args), matrix)
if not bias:
return res
bias_term = vs.get_variable(
"Bias", [output_size],
initializer=init_ops.constant_initializer(bias_start))
return res + bias_term
| [
"henrik.holst@frostbite.com"
] | henrik.holst@frostbite.com |
6751b2b46e1d04b8b4096859890d818f7342a742 | 60cc5a46e6b48716ee734526cdde74a993921a88 | /pysignup/backends/memory.py | 279145f454fe6d85579ee27a41d12d74b4d8db08 | [] | no_license | pietvanzoen/signup | fc65e4a26301e7fd7dc28af328488464b733fff2 | 80c62cb1841b83f439b547add758ae9ccaddd00d | refs/heads/master | 2021-01-16T19:31:06.049713 | 2013-10-08T20:22:18 | 2013-10-08T20:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | from collections import OrderedDict
from . import base
class MemoryStore(type):
def __new__(cls, name, bases, namespace, **kwargs):
result = type.__new__(cls, name, bases, namespace)
result._store = OrderedDict()
return result
class MemoryModelMixin(metaclass=MemoryStore):
@classmethod
def get(cls, id=None):
if id is None:
return cls._store.values()
elif id in cls._store:
return cls._store[id]
else:
raise base.NotFound("{} not found".format(id))
@classmethod
def put(cls, *args, **kwargs):
kwargs['id'] = len(cls._store) + 1
self = super().put(*args, **kwargs)
cls._store[id] = self
return self
class Schedule(MemoryModelMixin, base.Schedule):
pass
class ScheduleDate(MemoryModelMixin, base.ScheduleDate):
pass
class ScheduleSignup(MemoryModelMixin, base.ScheduleSignup):
pass
| [
"m@schmichael.com"
] | m@schmichael.com |
f44f7184c5e26e6031fc36a3813d8d0f6e48eb80 | 8e2474e41b37a54774610fa7519d6000d8fb01d8 | /application/migrations/0004_auto_20170417_2205.py | c3402c87417ffe72d634a2f5884ff0600c1a7637 | [] | no_license | dezdem0na/make-an-appointment | 446548b911b6f8960e6afee204a5be2f0a519329 | 1695f4544c668c84ba02b3723ff2925b74c2f8e3 | refs/heads/master | 2021-01-19T23:16:23.506988 | 2017-04-23T12:58:45 | 2017-04-23T12:58:45 | 88,956,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-17 19:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0003_auto_20170417_2204'),
]
operations = [
migrations.AlterField(
model_name='application',
name='patient_name_first',
field=models.CharField(max_length=255, verbose_name='Имя'),
),
migrations.AlterField(
model_name='application',
name='patient_name_last',
field=models.CharField(max_length=255, verbose_name='Фамилия'),
),
migrations.AlterField(
model_name='application',
name='patient_name_middle',
field=models.CharField(blank=True, max_length=255, verbose_name='Отчество'),
),
]
| [
"natalie.reshetnikova@gmail.com"
] | natalie.reshetnikova@gmail.com |
88d6df365f47a253326935bb8fac400997ba4126 | 702339cb0a4d0a1f7f01705107d77a4950e6f91d | /Snakefile | be70350f3631faa16423080dcb37b1e6daa5c0c2 | [
"MIT"
] | permissive | hans-vg/snakemake_guppy_basecall | a2c2ad9f9da779f8ce7556fcdf0700f7db7be37c | 76c1a08e2c553a8976108397d292203a1796b81e | refs/heads/main | 2023-08-05T09:45:00.360836 | 2021-09-08T22:39:18 | 2021-09-08T22:39:18 | 400,228,888 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,052 | import glob
configfile: "config.yaml"
inputdirectory=config["directory"]
SAMPLES, = glob_wildcards(inputdirectory+"/{sample}.fast5", followlinks=True)
print(SAMPLES)
wildcard_constraints:
sample="\w+\d+_\w+_\w+\d+_.+_\d"
##### target rules #####
rule all:
input:
expand("basecall/{sample}/sequencing_summary.txt", sample=SAMPLES),
"qc/multiqc.html"
rule make_indvidual_samplefiles:
input:
inputdirectory+"/{sample}.fast5",
output:
"lists/{sample}.txt",
shell:
"basename {input} > {output}"
rule guppy_basecall_persample:
input:
directory=directory(inputdirectory),
samplelist="lists/{sample}.txt",
output:
summary="basecall/{sample}/sequencing_summary.txt",
directory=directory("basecall/{sample}/"),
params:
config["basealgo"]
shell:
"guppy_basecaller -i {input.directory} --input_file_list {input.samplelist} -s {output.directory} -c {params} --trim_barcodes --compress_fastq -x \"auto\" --gpu_runners_per_device 3 --num_callers 2 --chunks_per_runner 200"
#def aggregate_input(wildcards):
# checkpoint_output = checkpoints.guppy_basecall_persample.get(**wildcards).output[1]
# print(checkpoint_output)
# exparr = expand("basecall/{sample}/pass/{runid}.fastq.gz", sample=wildcards.sample, runid=glob_wildcards(os.path.join(checkpoint_output, "pass/", "{runid}.fastq.gz")).runid)
# print(exparr)
# return exparr
#
##SAMPLES, RUNIDS, = glob_wildcards("basecall/{sample}/pass/{runid}.fastq.gz", followlinks=True)
##print(RUNIDS)
##print(SAMPLES)
#
#
#rule fastqc_pretrim:
# input:
# aggregate_input
# output:
# html="qc/fastqc_pretrim/{sample}.html",
# zip="qc/fastqc_pretrim/{sample}_fastqc.zip" # the suffix _fastqc.zip is necessary for multiqc to find the file. If not using multiqc, you are free to choose an arbitrary filename
# params: ""
# log:
# "logs/fastqc_pretrim/{sample}.log"
# threads: 1
# wrapper:
# "0.77.0/bio/fastqc"
#
#rule multiqc:
# input:
# #expand("basecall/{sample}.fastq.gz", sample=SAMPLES)
# #expand("qc/fastqc_pretrim/{sample}_fastqc.zip", sample=SAMPLES)
# expand(rules.fastqc_pretrim.output.zip, sample=SAMPLES)
# output:
# "qc/multiqc.html"
# params:
# "" # Optional: extra parameters for multiqc.
# log:
# "logs/multiqc.log"
# wrapper:
# "0.77.0/bio/multiqc"
##rule fastqc_pretrim:
## input:
## "basecall/{sample}/{failpass}/{runid}.fastq.gz",
## output:
## html="qc/fastqc_pretrim/{sample}_{failpass}_{runid}.html",
## zip="qc/fastqc_pretrim/{sample}_{failpass}_{runid}_fastqc.zip" # the suffix _fastqc.zip is necessary for multiqc to find the file. If not using multiqc, you are free to choose an arbitrary filename
## params: ""
## log:
## "logs/fastqc_pretrim/{sample}_{failpass}_{runid}.log"
## #resources: time_min=320, mem_mb=8000, cpus=1
## threads: 1
## wrapper:
## "v0.75.0/bio/fastqc"
| [
"hvasquezgross@unr.edu"
] | hvasquezgross@unr.edu | |
9f59f49f87adf975eba07fe96fc5a30a73485cc8 | fc678a0a5ede80f593a29ea8f43911236ed1b862 | /77-Combinations.py | 91ae53a9008ceabe92c1973817d59e67f8a8c2c3 | [] | no_license | dq-code/leetcode | 4be0b1b154f8467aa0c07e08b5e0b6bd93863e62 | 14dcf9029486283b5e4685d95ebfe9979ade03c3 | refs/heads/master | 2020-12-13T15:57:30.171516 | 2017-11-07T17:43:19 | 2017-11-07T17:43:19 | 35,846,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
def helper(comb, start, k):
if k == 0:
res.append(comb)
return
for i in range(start, n + 1):
if n - start + 1 >= k:
helper(comb + [i], i + 1, k - 1)
res = []
helper([], 1, k)
return res
| [
"qiandeng@Qians-iMac.local"
] | qiandeng@Qians-iMac.local |
624bff574da212d7d85f10e9d8fb96838f062fbc | c638ed4efd02028c97c0d6fe84200c4e4484fdba | /CCC/triangleTimes.py | 11a228073e63568a153a405bd609ebddd0c4ea4c | [] | no_license | oliver-ode/Algorithmic-Coding | 3c08f783e2f8d07040b81610a8e175383716eaf3 | dc4d4bfb538e04789ed731236e1a5a39978cad0a | refs/heads/master | 2022-12-07T02:35:56.629521 | 2020-08-31T03:18:30 | 2020-08-31T03:18:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | angles = []
for i in range(3):
angles.append(int(input()))
if angles[0] == 60 and angles[1] == 60 and angles[2] == 60:
print("Equilateral")
elif sum(angles) == 180 and angles[0] == angles[1] or angles[1] == angles[2] or angles[0] == angles[2]:
print("Isosceles")
elif sum(angles) != 180:
print("Error")
else:
print("Scalene") | [
"oliverode@gmail.com"
] | oliverode@gmail.com |
0e9d83f11f89926b5b0ac3d2b057dcf7f4fed023 | 4817b6eea29d9d952ef91899efee23becd741757 | /ryu/app/network_awareness3/shortest_forwarding.py | b60beb9a8db1ce446fdc203f2b65f082d326ae88 | [
"Apache-2.0"
] | permissive | zspcchinu/Optimal-routing-using-RL-with-SDN | 153eec7aae93e852278e02e4bac39a6fa9be4a8a | b7993c7f328ab6bafdcb6dab56f4ecd83ef0fe14 | refs/heads/master | 2022-06-17T21:26:41.293024 | 2020-05-17T03:42:16 | 2020-05-17T03:42:16 | 264,542,516 | 15 | 3 | null | null | null | null | UTF-8 | Python | false | false | 14,357 | py | from __future__ import absolute_import
# Copyright (C) 2016 Li Cheng at Beijing University of Posts
# and Telecommunications. www.muzixing.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# conding=utf-8
from builtins import range
import logging
import struct
import networkx as nx
from operator import attrgetter
from ryu import cfg
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import arp
from ryu.lib import hub
import numpy as np
import random
from ryu.topology import event, switches
from ryu.topology.api import get_switch, get_link
from . import network_awareness
from . import network_monitor
from . import network_delay_detector
from . import setting
from ryu.app import simple_switch_13
from ryu.app.rl_module.network_RLModule import network_RLModule
CONF = cfg.CONF
class ShortestForwarding(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
"network_awareness": network_awareness.NetworkAwareness,
"network_monitor": network_monitor.NetworkMonitor,
"network_delay_detector": network_delay_detector.NetworkDelayDetector}
WEIGHT_MODEL = {'hop': 'weight', 'delay': "delay", "bw": "bw"}
def __init__(self, *args, **kwargs):
super(ShortestForwarding, self).__init__(*args, **kwargs)
self.name = 'shortest_forwarding'
self.awareness = kwargs["network_awareness"]
self.monitor = kwargs["network_monitor"]
self.delay_detector = kwargs["network_delay_detector"]
self.mac_to_port = {}
self.datapaths = {}
self.rl = None
self.weight = self.WEIGHT_MODEL[CONF.weight]
self.rl_start_thread = hub.spawn(self._start_rl)
def _start_rl(self):
self.rl = network_RLModule(self.awareness)
def set_weight_mode(self, weight):
self.weight = weight
if self.weight == self.WEIGHT_MODEL['hop']:
self.awareness.get_shortest_paths(weight=self.weight)
return True
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def add_flow(self, dp, p, match, actions, idle_timeout=0, hard_timeout=0):
ofproto = dp.ofproto
parser = dp.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=dp, priority=p,
idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
match=match, instructions=inst)
dp.send_msg(mod)
def send_flow_mod(self, datapath, flow_info, src_port, dst_port):
parser = datapath.ofproto_parser
actions = []
actions.append(parser.OFPActionOutput(dst_port))
match = parser.OFPMatch(
in_port=src_port, eth_type=flow_info[0],
ipv4_src=flow_info[1], ipv4_dst=flow_info[2])
self.add_flow(datapath, 1, match, actions,
idle_timeout=15, hard_timeout=60)
def _build_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
actions = []
if dst_port:
actions.append(datapath.ofproto_parser.OFPActionOutput(dst_port))
msg_data = None
if buffer_id == datapath.ofproto.OFP_NO_BUFFER:
if data is None:
return None
msg_data = data
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=buffer_id,
data=msg_data, in_port=src_port, actions=actions)
return out
def send_packet_out(self, datapath, buffer_id, src_port, dst_port, data):
out = self._build_packet_out(datapath, buffer_id,
src_port, dst_port, data)
if out:
datapath.send_msg(out)
def get_port(self, dst_ip, access_table):
# access_table: {(sw,port) :(ip, mac)}
if access_table:
if isinstance(list(access_table.values())[0], tuple):
for key in list(access_table.keys()):
if dst_ip == access_table[key][0]:
dst_port = key[1]
return dst_port
return None
def get_link_to_port(self, link_to_port, src_dpid, dst_dpid):
if (src_dpid, dst_dpid) in link_to_port:
return link_to_port[(src_dpid, dst_dpid)]
else:
self.logger.info("dpid:%s->dpid:%s is not in links" % (
src_dpid, dst_dpid))
return None
def flood(self, msg):
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
for dpid in self.awareness.access_ports:
for port in self.awareness.access_ports[dpid]:
if (dpid, port) not in list(self.awareness.access_table.keys()):
datapath = self.datapaths[dpid]
out = self._build_packet_out(
datapath, ofproto.OFP_NO_BUFFER,
ofproto.OFPP_CONTROLLER, port, msg.data)
datapath.send_msg(out)
self.logger.debug("Flooding msg")
def arp_forwarding(self, msg, src_ip, dst_ip):
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
result = self.awareness.get_host_location(dst_ip)
if result: # host record in access table.
datapath_dst, out_port = result[0], result[1]
datapath = self.datapaths[datapath_dst]
out = self._build_packet_out(datapath, ofproto.OFP_NO_BUFFER,
ofproto.OFPP_CONTROLLER,
out_port, msg.data)
datapath.send_msg(out)
self.logger.debug("Reply ARP to knew host")
else:
self.flood(msg)
def get_path(self, src, dst, weight):
shortest_paths = self.awareness.shortest_paths
graph = self.awareness.graph
return shortest_paths.get(src).get(dst)[0]
if weight == self.WEIGHT_MODEL['hop']:
return shortest_paths.get(src).get(dst)[0]
elif weight == self.WEIGHT_MODEL['delay']:
# If paths existed, return it, else figure it out and save it.
try:
paths = shortest_paths.get(src).get(dst)
return paths[0]
except:
paths = self.awareness.k_shortest_paths(graph, src, dst,
weight=weight)
shortest_paths.setdefault(src, {})
shortest_paths[src].setdefault(dst, paths)
return paths[0]
elif weight == self.WEIGHT_MODEL['bw']:
result = self.monitor.get_best_path_by_bw(graph, shortest_paths)
paths = result[1]
best_path = paths.get(src).get(dst)
return best_path
def get_sw(self, dpid, in_port, src, dst):
src_sw = dpid
dst_sw = None
src_location = self.awareness.get_host_location(src)
if in_port in self.awareness.access_ports[dpid]:
if (dpid, in_port) == src_location:
src_sw = src_location[0]
else:
return None
dst_location = self.awareness.get_host_location(dst)
if dst_location:
dst_sw = dst_location[0]
return src_sw, dst_sw
def install_flow(self, datapaths, link_to_port, access_table, path,
flow_info, buffer_id, data=None):
''' path=[dpid1, dpid2...]
flow_info=(eth_type, src_ip, dst_ip, in_port)
'''
if path is None or len(path) == 0:
self.logger.info("Path error!")
return
in_port = flow_info[3]
first_dp = datapaths[path[0]]
out_port = first_dp.ofproto.OFPP_LOCAL
back_info = (flow_info[0], flow_info[2], flow_info[1])
# inter_link
if len(path) > 2:
for i in range(1, len(path)-1):
port = self.get_link_to_port(link_to_port, path[i-1], path[i])
port_next = self.get_link_to_port(link_to_port,
path[i], path[i+1])
if port and port_next:
src_port, dst_port = port[1], port_next[0]
datapath = datapaths[path[i]]
self.send_flow_mod(datapath, flow_info, src_port, dst_port)
self.send_flow_mod(datapath, back_info, dst_port, src_port)
self.logger.debug("inter_link flow install")
if len(path) > 1:
# the last flow entry: tor -> host
port_pair = self.get_link_to_port(link_to_port, path[-2], path[-1])
if port_pair is None:
self.logger.info("Port is not found")
return
src_port = port_pair[1]
dst_port = self.get_port(flow_info[2], access_table)
if dst_port is None:
self.logger.info("Last port is not found.")
return
last_dp = datapaths[path[-1]]
self.send_flow_mod(last_dp, flow_info, src_port, dst_port)
self.send_flow_mod(last_dp, back_info, dst_port, src_port)
# the first flow entry
port_pair = self.get_link_to_port(link_to_port, path[0], path[1])
if port_pair is None:
self.logger.info("Port not found in first hop.")
return
out_port = port_pair[0]
self.send_flow_mod(first_dp, flow_info, in_port, out_port)
self.send_flow_mod(first_dp, back_info, out_port, in_port)
self.send_packet_out(first_dp, buffer_id, in_port, out_port, data)
# src and dst on the same datapath
else:
out_port = self.get_port(flow_info[2], access_table)
if out_port is None:
self.logger.info("Out_port is None in same dp")
return
self.send_flow_mod(first_dp, flow_info, in_port, out_port)
self.send_flow_mod(first_dp, back_info, out_port, in_port)
self.send_packet_out(first_dp, buffer_id, in_port, out_port, data)
def shortest_forwarding(self, msg, eth_type, ip_src, ip_dst):
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
result = self.get_sw(datapath.id, in_port, ip_src, ip_dst)
#print("ip src adn ip dst:", ip_src, ip_dst)
if result:
src_sw, dst_sw = result[0], result[1]
if dst_sw:
path = []
if setting.ALGORITHM is setting.RL:
path = self.get_rl_path(src_sw, dst_sw, weight=self.weight)
self.logger.info("RL path is between switches %s to %s: is %s" % (src_sw, dst_sw,path))
elif setting.ALGORITHM is setting.SHORTEST_PATH:
path = self.get_path(src_sw, dst_sw, weight=self.weight)
self.logger.info("Ryu PATH %s<-->%s: %s" % (ip_src, ip_dst, path))
else:
self.logger.error("Unknown Algorithm for path calculation")
#print("RL path between src and dest is", path)
flow_info = (eth_type, ip_src, ip_dst, in_port)
self.install_flow(self.datapaths,
self.awareness.link_to_port,
self.awareness.access_table, path,
flow_info, msg.buffer_id, msg.data)
return
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
'''
In packet_in handler, we need to learn access_table by ARP.
Therefore, the first packet from UNKOWN host MUST be ARP.
'''
msg = ev.msg
datapath = msg.datapath
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
arp_pkt = pkt.get_protocol(arp.arp)
ip_pkt = pkt.get_protocol(ipv4.ipv4)
if isinstance(arp_pkt, arp.arp):
self.logger.debug("ARP processing")
self.arp_forwarding(msg, arp_pkt.src_ip, arp_pkt.dst_ip)
arp_src_ip = arp_pkt.src_ip
arp_dst_ip = arp_pkt.dst_ip
mac = arp_pkt.src_mac
# record the access info
self.awareness.register_access_info(datapath.id, in_port, arp_src_ip, mac)
if isinstance(ip_pkt, ipv4.ipv4):
self.logger.debug("IPV4 processing")
if len(pkt.get_protocols(ethernet.ethernet)):
eth_type = pkt.get_protocols(ethernet.ethernet)[0].ethertype
self.shortest_forwarding(msg, eth_type, ip_pkt.src, ip_pkt.dst)
def get_rl_path(self, src, dst, weight):
return self.rl.rl_optimal_path(src, dst)
| [
"chithambaram.singaravelupoonkodi@sjsu.edu"
] | chithambaram.singaravelupoonkodi@sjsu.edu |
1ba71e9765761905566094b0343c991430cf2236 | 2bcbd13a1d91ada88ec490de767f7e4cb01d6232 | /reading file.py | b48a01dd7da758360e07d7f6b2906043fae49ec3 | [] | no_license | Naveen131298/hackertest | 406484933418489940ebb305700f16d8f39aa685 | 5b5a34ba6f779b564279248ce73f3a2a58e89f57 | refs/heads/master | 2020-12-05T22:30:35.424802 | 2020-10-11T18:16:02 | 2020-10-11T18:16:02 | 232,261,603 | 0 | 1 | null | 2020-10-11T18:16:03 | 2020-01-07T06:50:13 | Python | UTF-8 | Python | false | false | 215 | py | import os
import glob
def read_pdf(path):
for pdf_file in glob.glob(path + '/*.pdf'):
print(pdf_file)
pdf_location = os.path.join(os.getcwd())
read_pdf(pdf_location)
print(os.path.join(os.getcwd()))
| [
"naveenmurugan72@gmail.com"
] | naveenmurugan72@gmail.com |
077349d5fcd423d4c28bef9cf4945a2d36546d58 | 0b36cff195d540e96f50f87c2b984235d958905c | /input_file_with_xy.py | 112ea05be72c8c61be1a1e99aa0d8f2640760816 | [] | no_license | JeanLouis1606/premiers_scripts_python | a41adea7b5c6d01c3d7538e2e3039e034bd15f4b | c0b348f736990900ff5596e295a350265ecf1741 | refs/heads/master | 2020-08-02T07:03:57.755530 | 2019-09-27T14:10:07 | 2019-09-27T14:10:07 | 211,271,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | with open("/usr/share/dict/words") as inputfile:
for line in inputfile:
if len(line)<15:
if (line[0] == 'x' and line[1] == 'y'):
line = line.strip()
print(line)
| [
"jeanlouisfuccellaro@MacBook-Pro-de-jl-2.local"
] | jeanlouisfuccellaro@MacBook-Pro-de-jl-2.local |
bb9b8448866a42aee485331c76d2d094853127b4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_231/ch22_2020_06_20_19_00_14_584797.py | bd79702d66dbec13e717be885a2a86143f73ec2b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | c= int(input('quantos cigarros vc fuma por dia?'))
a= int(input('ha quantos anos?'))
t= ((10*c*a*365)/1440)
print(t)
| [
"you@example.com"
] | you@example.com |
ffed5598e099bdd416d547810817ad878dfc91b7 | 1ad482ad03c0241cc39067c47d5b046dd8371fa9 | /core/migrations/0009_auto_20210319_2041.py | 7333b546d4720a4839edcd38dc03bc34a4667bec | [] | no_license | momentum-team-7/django-habit-tracker-drehan7 | aa421911434d0d548de232cb2f12f4ac11ddf509 | 471950de16de47fea0a020809e98d5f8abdaceb1 | refs/heads/main | 2023-03-31T11:33:00.586816 | 2021-03-27T18:55:27 | 2021-03-27T18:55:27 | 348,402,252 | 0 | 0 | null | 2021-03-22T15:49:06 | 2021-03-16T15:39:35 | JavaScript | UTF-8 | Python | false | false | 506 | py | # Generated by Django 3.1.7 on 2021-03-19 20:41
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20210319_2019'),
]
operations = [
migrations.AlterField(
model_name='habitlog',
name='date',
field=models.DateField(default=datetime.datetime(2021, 3, 19, 20, 41, 1, 263174, tzinfo=utc), unique=True),
),
]
| [
"d.rehan7@gmail.com"
] | d.rehan7@gmail.com |
04543567fbdf013acee68970b2130008939f9517 | 29ff0f91f49117d214f6f776b59a2769ad5b38d5 | /tests/test_adapters.py | e260ecfa2d316ea75354eb7e2626740e65f0aa78 | [
"MIT"
] | permissive | caioraposo/uceasy | 5f770595f99e847cff75a99f82cb78196857f981 | 921103f64c69c31aa6fc05fdf474c17a575a5525 | refs/heads/master | 2021-03-13T10:17:26.033997 | 2020-04-13T19:37:49 | 2020-04-13T19:37:49 | 246,669,260 | 0 | 0 | MIT | 2020-03-11T20:05:53 | 2020-03-11T20:05:53 | null | UTF-8 | Python | false | false | 494 | py | from uceasy.adapters import ADAPTERS
def test_illumiprocessor_help():
cmd = ADAPTERS["illumiprocessor"](["--help"], capture_output=True)
assert "usage: illumiprocessor" in cmd[0]
def test_trinity_help():
cmd = ADAPTERS["trinity"](["--help"], capture_output=True)
assert "usage: phyluce_assembly_assemblo_trinity" in cmd[0]
def test_spades_help():
cmd = ADAPTERS["spades"](["--help"], capture_output=True)
assert "usage: phyluce_assembly_assemblo_spades" in cmd[0]
| [
"caioraposo@protonmail.com"
] | caioraposo@protonmail.com |
ed3cb703c2428ed63dfa7f758269be8f2bb0a7af | 885c1cab7931d010b6711af652c9a79e2f7490c2 | /MyDjangoProjects/mydjangosite/mydjangosite/wsgi.py | a5dfbab0db7f5cb9860a1848e33e3de5a5df1a1b | [] | no_license | shreyash14s/Manjo | 3f1c11f7234cd12783c5e60a05cbf3f5ae9ca21d | b0ea4e3ef31d7853a8288e06a84bf556c4908d63 | refs/heads/master | 2021-01-17T21:56:38.905412 | 2015-08-15T17:45:39 | 2015-08-15T17:45:39 | 40,665,170 | 0 | 1 | null | 2015-08-13T15:05:27 | 2015-08-13T15:05:27 | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for mydjangosite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mydjangosite.settings")
application = get_wsgi_application()
| [
"sanjay.india96@gmail.com"
] | sanjay.india96@gmail.com |
566fdde94b7a27a1ac308ac870b09e58209d60fc | 2827d7a837eb29c3cb07793ab6d3d5a753e18669 | /alipay/aop/api/request/AlipayMarketingCampaignDiscountBudgetAppendRequest.py | 3e1af80821fc15b93a0a4328c878c0180e7b136d | [
"Apache-2.0"
] | permissive | shaobenbin/alipay-sdk-python | 22e809b8f5096bec57d2bb25414f64bdc87fa8b3 | 5232ad74dff2e8a6e0e7646ab3318feefa07a37d | refs/heads/master | 2020-03-21T04:51:39.935692 | 2018-06-21T07:03:31 | 2018-06-21T07:03:31 | 138,131,022 | 0 | 0 | null | 2018-06-21T06:50:24 | 2018-06-21T06:50:24 | null | UTF-8 | Python | false | false | 4,058 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignDiscountBudgetAppendModel import AlipayMarketingCampaignDiscountBudgetAppendModel
class AlipayMarketingCampaignDiscountBudgetAppendRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignDiscountBudgetAppendModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignDiscountBudgetAppendModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._notify_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.discount.budget.append'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
360e98c144781868bcbc8c5a13d5f42085348077 | 066d05a826430a05fc1c333bd988ef702a8ed364 | /lesson3/ex4.py | 9835c43e2cca47438c789a51671c6be04f77085c | [] | no_license | se7entyse7en/learning-tensorflow | 4a5693ea49cd5a88add241301b565d672f28a318 | 1377739d54998c773e90b30dd57f3a407e19e245 | refs/heads/master | 2021-01-12T15:26:34.812952 | 2016-11-10T22:02:25 | 2016-11-10T22:02:25 | 71,783,274 | 1 | 1 | null | 2019-05-11T23:24:10 | 2016-10-24T11:45:52 | Jupyter Notebook | UTF-8 | Python | false | false | 853 | py | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
filename = 'MarshOrchid.jpg'
image = mpimg.imread(filename)
x = tf.Variable(image, name='x')
model = tf.initialize_all_variables()
with tf.Session() as session:
session.run(model)
shape = tf.shape(x)
height, width, depth = session.run(shape)
# Slice the left half
left_sliced = tf.slice(x, [0, 0, 0], [height, int(width / 2), depth])
# Mirror pixels along the vertical axis of the left half
left_mirrored_sliced = tf.reverse_sequence(
left_sliced, np.ones(height) * int(width / 2), 1, batch_dim=0)
# Paste the two slices to obtain the left half mirrored on the right half
pasted = tf.concat(1, [left_sliced, left_mirrored_sliced])
result = session.run(pasted)
plt.imshow(result)
plt.show()
| [
"loumarvincaraig@gmail.com"
] | loumarvincaraig@gmail.com |
c8327228eb9f84c7971a01151d5d026d74f669aa | 929d9121a74f3167e620bf4f2c9073f84e57d422 | /mysite/mainapp/forms.py | b7cb0e2cd653d5d80b50e5bea1012688afae1a98 | [] | no_license | zoriana-melnyk/HealthyEating | f7c91caa82830c112ca132fef6d172d867eb3868 | 4fea7ab1e7a5a8438c9d57288dd3c294a4a06bff | refs/heads/master | 2023-04-21T07:54:56.288872 | 2021-05-11T22:29:24 | 2021-05-11T22:29:24 | 333,217,246 | 0 | 0 | null | 2021-05-11T22:29:25 | 2021-01-26T21:05:50 | Python | UTF-8 | Python | false | false | 302 | py | from django.forms import ModelForm
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2'] | [
"48826458+zoriana-melnyk@users.noreply.github.com"
] | 48826458+zoriana-melnyk@users.noreply.github.com |
c39a87caae3620cf991bb70f79a8ef12cfc44fbe | 7fda36e97edc2fbcf8ad2fc01ea070f881e756d2 | /tuts100_exercise11.py | cb42f99394e09547e422659a6eb8794f6e8a0eee | [] | no_license | RashadGhzi/My-Repository | 6b386521cea3b40ce0be065ca53b2bd633b36197 | 91f5897bdfb869a0d30f49693b87ed1244488e3d | refs/heads/main | 2023-08-16T07:34:24.357667 | 2021-10-12T11:44:46 | 2021-10-12T11:44:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import re
str = '''Email: northamerica@tata.com
email = rashadarsh78@gmail.com
priya@yahoo.com
email = meeting @2pm
Website: www.northamerica.tata.com
shubhamg199630@gmail.com
harrygoodboy@gamil.com
Directions: View map fass
indian no. +91 5588-940010
harry bhai lekin
indian no. ++91 5588-000000'''
item = re.compile(r'\S+@\S+')
item_1 = item.findall(str)
print(item_1)
i = 1
for email in item_1:
with open('harry_larry.txt', 'a') as file:
file.write(f'Email_{i}:{email}\n\n')
i += 1 | [
"rashadarsh0@gmail.com"
] | rashadarsh0@gmail.com |
8c093d5bdd4b85a2f36c0adbc7b3a65e995fff87 | ecf96ce18d8c1bfc20b667f2bd2050dbf4286fb7 | /weights.py | 0729d7f47c0b1130a2ba5b4be4d84b925acf9bcb | [] | no_license | wiktoraleksanderkaczor/PyNN | 900ab16df0dedec9591193c6527e595c47d36bf0 | 9cd594be39f6e62ab095595cdf956282b2bf88fc | refs/heads/master | 2021-08-19T11:39:11.097391 | 2021-06-11T01:02:20 | 2021-06-11T01:02:20 | 158,038,948 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | # Module: weights.py
# Definition of some useful weight initialization functions.
import numpy as np
np.random.seed(123)
def random_init(num_prev_neurons, precision):
"""
Initializes the weights using a random number generated from a seed.
Args:
num_prev_neurons (int): The number of neurons in the previous layer.
precision (numpy.dtype): The numpy dtype for the network precision.
Returns:
weights (numpy.array): A 1-dimensional array of the randomly initialized weights for a neuron.
"""
# Storing weights for each connection to each neuron in the next layer.
weights = np.random.rand(num_prev_neurons)
return weights.astype(precision)
def xavier_init(num_prev_neurons, precision):
"""
Initializes the weights using the xavier weight initialization algorithm.
Args:
num_prev_neurons (int): The number of neurons in the previous layer.
precision (numpy.dtype): The numpy dtype for the network precision.
Returns:
weights (numpy.array): A 1-dimensional array of the xavier initialized weights for a neuron.
"""
# Setting seed based on number of previous neurons.
#np.random.seed(num_prev_neurons)
lower = -(1.0 / np.sqrt(num_prev_neurons))
upper = (1.0 / np.sqrt(num_prev_neurons))
# Storing weights for each connection to each neuron in the next layer.
weights = np.random.rand(num_prev_neurons)
return weights.astype(precision)
| [
"wiktoraleksanderkaczor@gmail.com"
] | wiktoraleksanderkaczor@gmail.com |
47a5f7dac1c21ccd1fd6d4200b4a2068c776886a | a28946f264ebb2648b6e31f709d5bdf845bf0b50 | /lyrics.py | dd21c6cbdc825e9feddcc055895f4296128f90e2 | [
"MIT"
] | permissive | py-paulo/lyrics | e6be77132a353000b939941ea751c19534b0068e | 9bc1596fa9acc423aedace747ddb2e2339258331 | refs/heads/master | 2023-04-07T10:21:22.899483 | 2021-04-08T10:39:39 | 2021-04-08T10:39:39 | 354,976,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | import urllib.request
from bs4 import BeautifulSoup
uri_base = 'http://www.google.com/search?q='
artist = 'Milton Nascimento'
music = 'Clube da Esquina n 2'
query_quote = ('%s %s letra' % (artist, music)).replace(' ', '+')
req = urllib.request.Request(uri_base+query_quote, headers={
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,applica'
'tion/signed-exchange;v=b3;q=0.9',
'accept-language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7',
'referer': 'https://www.google.com/',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/'
'537.36'
})
# with urllib.request.urlopen(req) as response:
# html = response.read().decode('utf-8', errors='ignore')
# with open('html.music.example.html', 'w') as fp:
# fp.write(html)
with open('html.music.example.html') as fp:
soup = BeautifulSoup(fp, 'html.parser')
for sub_soup in soup.find_all('div'):
if 'data-lyricid' in sub_soup.attrs:
for index, div in enumerate(sub_soup):
next_div = div.find_next()
spans = next_div.find_all('span')
for span in spans:
print(span.text)
break
| [
"paulo.rb.beserra@gmail.com"
] | paulo.rb.beserra@gmail.com |
f2a5384d6168682cc2ed2c5a6b873ece4666dcf3 | 417c3cceaa1e010c7747db4e9ea4ffabfff92732 | /learning_user/basic_app/views.py | dcdc631b7e9fae41fb151a10f7f520363b17169b | [] | no_license | ibukun-brain/django-deployment-example | ef54c2a26153026c68986fc41d53725fdb52743a | b984426f7108a40f15ba5cc3dbdd86f3b93fa298 | refs/heads/master | 2023-05-31T21:21:55.624611 | 2021-07-04T05:29:44 | 2021-07-04T05:29:44 | 382,760,477 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | from django.shortcuts import render
from basic_app.forms import UserInfoForm, UserProfileInfoForm
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.contrib.auth.decorators import login_required
# Create your views here.
def base(request):
return render(request, 'basic_app/base.html')
@login_required
def special(request):
return HttpResponse("You're logged in, Nice!")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def index(request):
return render(request, 'basic_app/index.html')
def registration(request):
registered = False
if request.method == 'POST':
user_form = UserInfoForm(request.POST)
profile_form = UserProfileInfoForm(request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
profile.profile_pic = request.FILES['profile_pic']
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
else:
user_form = UserInfoForm()
profile_form = UserProfileInfoForm()
return render(request, 'basic_app/registration.html', {'user_form': user_form, 'profile_form': profile_form, 'registered': registered})
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponse('ACCOUNT NOT ACTIVE')
else:
print("Someone tried to login and failed!")
print('Username: {} and password: {}'.format(username, password))
return HttpResponse('Invalid login details supplied!')
else:
return render(request, 'basic_app/login.html')
| [
"ibukunolaifa1984@gmail.com"
] | ibukunolaifa1984@gmail.com |
97a1c88e30cf7f7e198ab1dfadc80c3db447a9ba | 1a324f9aefc9cc6f858effea02501f0885611c28 | /search_engine_demo/googlesearch/urls.py | 5b1087f1554c44a6d214a1fd5274cc1b42dc6ba4 | [] | no_license | AmamiRena/py | 72c55180209b1c18a5484fa37f4b4d6abac746f9 | 0f9f3b05fed09b2fff329a103426dde718f798cf | refs/heads/master | 2021-10-25T17:30:19.419609 | 2021-10-22T01:26:08 | 2021-10-22T01:26:08 | 143,627,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from django.urls import path
from .views import *
urlpatterns = [
path('', index),
path('search', SearchView.as_view(), name="google-search-view")
] | [
"mayuzumi159@gmail.com"
] | mayuzumi159@gmail.com |
22eb63305890280ff00427e395dc7ee12f3f314c | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4330/codes/1594_1800.py | 0302eb5caf63f16066aa6406b53455d42458aa87 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | a=int(input("Insira o valor de A"))
b=int(input("Insira o valor de B"))
c=int(input("Insira o valor de C"))
x = (a**2)+(b**2)+(c**2)
y = a+b+c
t=x/y
print(round(t,7))
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
c9580567614da5bed9f9c744137f3d463eb77515 | dac7d0abff54dbeb9e6587f17866a34b5e7f3948 | /Cobbity/compare.py | ec3b6cf07d175832a7fb04e914de1c0c894bf84c | [] | no_license | KipCrossing/EMI_Field | 5665aba5ff5fbf4a4d42fc9b3efc9aa3b3f51eea | e52142648388a25d26f682986c586cd1827e31e0 | refs/heads/master | 2020-05-22T12:37:42.892290 | 2019-09-12T01:27:24 | 2019-09-12T01:27:24 | 186,342,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | import pandas as pd
df_OpenEM = pd.read_csv("~/Cobbity/Output/Smooth_C2_OpenEM.xyz", header=None, delimiter=r"\s+")
df_DUALEM = pd.read_csv("~/Cobbity/Output/Smooth_C2_DUALEM.xyz", header=None, delimiter=r"\s+")
print(df_OpenEM.head())
print(df_DUALEM.head())
New_OpenEM_readings = []
New_OpenEM_lon = []
New_OpenEM_lat = []
sum = 0
for read in df_OpenEM[2].tolist():
if read > -9999:
New_OpenEM_readings.append(read)
New_OpenEM_lon.append(df_OpenEM[0].tolist()[sum])
New_OpenEM_lat.append(df_OpenEM[1].tolist()[sum])
sum += 1
print(len(New_OpenEM_lon),len(New_OpenEM_lat),len(New_OpenEM_readings))
New_DUALEM_readings = []
New_DUALEM_lon = []
New_DUALEM_lat = []
sum = 0
for read in df_DUALEM[2].tolist():
if read > -9999:
New_DUALEM_readings.append(read)
New_DUALEM_lon.append(df_DUALEM[0].tolist()[sum])
New_DUALEM_lat.append(df_DUALEM[1].tolist()[sum])
sum += 1
print(len(New_DUALEM_lon),len(New_DUALEM_lat),len(New_DUALEM_readings))
data = {"DUALEM": New_DUALEM_readings,"OpenEM": New_OpenEM_readings,"X1":New_DUALEM_lon,"X2":New_OpenEM_lon,"Y1":New_DUALEM_lat,"Y2":New_OpenEM_lat}
df_out = pd.DataFrame(data, columns=["DUALEM","OpenEM","X1","X2","Y1","Y2"])
df_out.to_csv("~/Cobbity/Output/compare_Smooth_DUALEM_OpenEm.csv")
count = 0
for i in New_DUALEM_lon:
if New_DUALEM_lon[count] == New_OpenEM_lon[count] and New_DUALEM_lat[count] == New_OpenEM_lat[count]:
print(count)
count += 1
| [
"kip.crossing@gmail.com"
] | kip.crossing@gmail.com |
0efac6e3d7417f91494c5e8208d5faffb89d643d | 5edf72a6e86f35fb00a34670a6f9ca61d96c2db8 | /kmeans.py | 11b764d161a4957b8d65b733c9e8080f2df3de7d | [] | no_license | isabellekazarian/kmeans-algorithm | b94008ed80ec198f2d8cb7def960025dfdfad53e | 7378cdf56f1c51c953c34f1e11c76499850d07c1 | refs/heads/master | 2023-03-11T19:37:57.830724 | 2021-02-08T18:39:13 | 2021-02-08T18:39:13 | 337,162,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,487 | py |
import matplotlib.pyplot as plt
import numpy
import csv
import random
from collections import defaultdict
DATA_FILE = './dataBoth.csv'
NUM_CLUSTERS = 4
NUM_ITERATIONS = 4
# returns distance between two data points
def distance (x1, y1, x2, y2):
square = pow((x2 - x1), 2) + pow((y2 - y1), 2)
return pow(square, 0.5)
# returns data from the csv file as a list of values
# 2D list with values: [country, birth_rate, life_expectancy]
def readCsv(file_name):
data = []
with open(file_name) as csvFile:
readCSV = csv.reader(csvFile, delimiter=',')
next(readCSV)
# create point and add to data dict
for row in readCSV:
country = row[0]
birth_rate = float(row[1])
life_expectancy = float(row[2])
data.append([country, birth_rate, life_expectancy])
return data
# returns index of centroid in centroid list closest to the given point
# takes a 2D array of centroids and a point as an (x,y) array
def getClosestCentroid(centroids, point):
if(point in centroids):
return point
px, py = point[1], point[2]
distances = []
# calculate each distance
for centroid in centroids:
cx, cy = centroid[0], centroid[1]
distances.append(distance(px,py,cx,cy))
# find min distance & return centroid
min_distance = numpy.amin(distances)
dist_sum = pow(min_distance, 2)
min_index = (numpy.where(distances == min_distance))[0][0]
return centroids[min_index], dist_sum
# returns randomly generated list of centroids
def initializeCentroids(data):
samples = random.sample(list(data), NUM_CLUSTERS)
centroids = []
# create xy point for each data sample
for sample in samples:
centroids.append([sample[1], sample[2]])
#return centroid list
return centroids
# returns mean-calculated list of centroids
def calculateNewCentroidsFromMean(clusters):
centroids = []
# for each cluster, calculate mean of all points
for cluster in clusters:
x_sum, y_sum = 0, 0
size = len(cluster)
# points are an array list item for each cluster
for point in cluster:
x_sum += point[1]
y_sum += point[2]
mean_point = [(x_sum / size), (y_sum / size)]
centroids.append(mean_point)
return centroids
# scatter plot of all clusters
def plotClusters(clusters, centroids):
points = [[] for i in range(0, NUM_CLUSTERS)]
colors = ["Blue", "Green", "Pink", "Red", "Orange", "Purple", "Gray"]
# plot each point by cluster
for cluster in range(0, len(clusters)):
for point in clusters[cluster]:
plt.scatter(point[1], point[2], c = colors[cluster])
# Plot centroids
centroids_x, centroids_y = zip(*centroids)
plt.scatter(centroids_x, centroids_y, s=80, c='black')
plt.show()
# read data file
data = readCsv(DATA_FILE)
# initialize
centroids = initializeCentroids(data)
clusters = [[] for x in range(NUM_CLUSTERS)]
# run iterations
for i in range(0, NUM_ITERATIONS):
# initialize
clusters = [[] for x in range(NUM_CLUSTERS)]
dist_sum = 0
# for each point find closest centroid and add to cluster
for point in data:
# get closest centroid index
closest_centroid, dist_sq = getClosestCentroid(centroids, point)
dist_sum += dist_sq
# add point to the cluster for corresponding centroid
closest_centroid_index = centroids.index(closest_centroid)
clusters[closest_centroid_index].append(point)
# visualize clusters
plotClusters(clusters, centroids)
# print distance sum
print("Sum of distances: " + str(dist_sum))
# get new centroids
centroids = calculateNewCentroidsFromMean(clusters)
# print results ---------------------------------
for cluster in range(0, len(clusters)):
countries = []
num_points = 0
sum_life_expectancy = 0
sum_birth_rate = 0
for point in clusters[cluster]:
num_points += 1
sum_birth_rate += point[1]
sum_life_expectancy += point[2]
countries.append(point[0])
print()
print("Cluster: " + str(cluster))
print("Mean life expectancy: " + str(sum_life_expectancy / num_points))
print("Mean birth rate: " + str(sum_birth_rate / num_points))
print("Number of countries: " + str(num_points))
print()
print("Countries:")
for country in countries:
print(country)
| [
"rosekaz13@gmail.com"
] | rosekaz13@gmail.com |
d839e4467adb97c603f1bbf720207d83942d87d2 | 46267e38d63bb487ccef4612593676412ea956d7 | /astraeus/core.py | 268d58bf9ad346c038f6b1a1989ccc7a00c0339b | [
"MIT"
] | permissive | eos-sns/astraeus | 17f63fc02e27b8b40b8470fb8202b9bb4b50e3d6 | bbbe820bdc02d7c0209854b80b1f952bfaaf984a | refs/heads/master | 2020-04-25T12:56:35.666259 | 2019-09-18T12:15:04 | 2019-09-18T12:15:04 | 172,793,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,379 | py | # -*- coding: utf-8 -*-
import abc
import datetime
import uuid
from astraeus.models.memcache import MemcacheClientBuilder, MemcacheFacade
from astraeus.models.mongodb import MongoDBBuilder
class Hasher:
""" Something that hashes something """
@abc.abstractmethod
def hash_key(self, key):
return 0
class UUIDHasher(Hasher):
""" Hashing based on UUID4 """
def hash_key(self, key=None):
hashed = str(uuid.uuid4())
hashed = hashed.replace('-', '')
return hashed
class Astraeus(object):
""" Saves in-memory data about stuff """
MEMCACHE_PORT = 11211 # default memcache port
EXPIRE_SECONDS = ((60 * 60) * 24) * 14 # 14 days
def __init__(self,
port=MEMCACHE_PORT,
expire_seconds=EXPIRE_SECONDS,
hash_function=UUIDHasher().hash_key):
"""
:param port: port where memcache runs
:param expire_seconds: values in memcache will be null after that
:param hash_function: function to compute hash of key
"""
client = MemcacheClientBuilder() \
.with_server('localhost') \
.with_port(port) \
.build()
self.memcache = MemcacheFacade(client, expire_seconds)
self.hasher = hash_function # function to hash stuff
def _get_key(self, val):
return self.hasher(str(val)) # todo better jsonify ?
def save(self, val):
"""
:param val: Saves val in memcache database
:return: key of memcache
"""
assert not (val is None)
key = self._get_key(val)
if self.memcache.set(key, val):
return key
return None
def retrieve(self, key):
assert not (key is None)
return self.memcache.get(key)
class MongoAstraeus(Astraeus):
""" Normal Astraeus, but saves data also in MongoDB for reduncancy
reasons """
MONGO_DB = 'astraeus' # todo move to config
def _get_parent(self):
return super(self.__class__, self)
def __init__(self,
mongo_collection,
mongo_db=MONGO_DB,
port=Astraeus.MEMCACHE_PORT,
expire_seconds=Astraeus.EXPIRE_SECONDS,
hash_function=UUIDHasher().hash_key):
super(self.__class__, self).__init__(port, expire_seconds, hash_function)
mongo = MongoDBBuilder() \
.with_db(mongo_db) \
.build()
self.mongo = mongo[mongo_collection] # specify collection
def _try_save_to_memcache(self, val):
try:
return self._get_parent().save(val)
except:
print('Cannot save {} to memcache'.format(val))
return None
def _try_save_to_mongodb(self, memcache_key, val):
if not memcache_key:
memcache_key = self._get_key(val)
try:
item = self.build_mongo_item(memcache_key, val)
self.mongo.insert_one(item)
return memcache_key
except:
print('Cannot save {} to mongodb'.format(val))
return None
def save(self, val):
key = self._try_save_to_memcache(val) # first save to memcache ...
key = self._try_save_to_mongodb(key, val) # ... then in mongo
return key
def _try_retrieve_from_memcache(self, key):
try:
return self._get_parent().retrieve(key)
except:
print('Cannot retrieve {} from memcache'.format(key))
return None
def _try_retrieve_from_mongodb(self, key):
try:
results = self.mongo.find({'key': key})
if results:
most_recent = max(results, key=lambda x: x['time']) # sort by date
return most_recent['val'] # DO NOT check expiration: this is a redundant database
except:
print('Cannot retrieve {} from mongodb'.format(key))
return None
def retrieve(self, key):
val = self._try_retrieve_from_memcache(key) # first try with memcache ...
if not val:
return self._try_retrieve_from_mongodb(key) # ... then with mongo
return val
@staticmethod
def build_mongo_item(key, val):
time_now = datetime.datetime.now()
return {
'key': key,
'val': val,
'time': time_now
}
| [
"sirfoga@protonmail.com"
] | sirfoga@protonmail.com |
8ffe7365488fff3d284791da93f2ec10bd6e22b7 | e45060b2fb6c5911249f930703db06af74292a14 | /src/misc.py | 2ef953d46b21e269cfe78df97b2fb7c04182704b | [
"MIT"
] | permissive | Guigouu/clonesquad-ec2-pet-autoscaler | a6dd350acaa559fba6caf209c439579a5c7ab15a | 33eee544a5c208caf548869c2e714d9ebc7f0be6 | refs/heads/master | 2023-01-07T01:27:33.725418 | 2020-11-05T21:28:02 | 2020-11-05T21:28:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,784 | py | import os
import sys
import re
import hashlib
import json
import math
import gzip
# Hack: Force gzip to have a deterministic output (See https://stackoverflow.com/questions/264224/setting-the-gzip-timestamp-from-python/264303#264303)
class GzipFakeTime:
def time(self):
return 1.1
gzip.time = GzipFakeTime()
import base64
import boto3
from datetime import datetime
from datetime import timezone
from datetime import timedelta
import requests
from requests_file import FileAdapter
from collections import defaultdict
import pdb
import debug as Dbg
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core import patch_all
patch_all()
def is_sam_local():
return "AWS_SAM_LOCAL" in os.environ and os.environ["AWS_SAM_LOCAL"] == "true"
import cslog
log = cslog.logger(__name__)
def is_direct_launch():
return len(sys.argv) > 1
def utc_now():
return datetime.now(tz=timezone.utc) # datetime.utcnow()
def epoch():
return datetime.utcfromtimestamp(0).replace(tzinfo=timezone.utc)
def seconds_from_epoch_utc(now=None):
if now is None: now = utc_now()
return int((now - epoch()).total_seconds())
def seconds2utc(seconds):
return datetime.utcfromtimestamp(int(seconds)).replace(tzinfo=timezone.utc)
def str2utc(s, default=None):
try:
return datetime.fromisoformat(s)
except:
return default
return None
def sha256(s):
m = hashlib.sha256()
m.update(bytes(s,"utf-8"))
return m.hexdigest()
def abs_or_percent(value, default, max_value):
v = default
try:
if value.endswith("%"):
v = math.ceil(float(value[:-1])/100.0 * max_value)
else:
v = int(value)
except:
pass
return v
def str2duration_seconds(s):
try:
return int(s)
except:
# Parse timedelta metadata
meta = s.split(",")
metas = {}
for m in meta:
k, v = m.split("=")
metas[k] = float(v)
return timedelta(**metas).total_seconds()
def decode_json(value):
if value is None:
return None
if value.startswith("b'"):
value = value[2:][:-1]
try:
uncompress = gzip.decompress(base64.b64decode(value))
value = str(uncompress, "utf-8")
except:
pass
return json.loads(value)
def encode_json(value, compress=False):
value_j = json.dumps(value, sort_keys=True, default=str)
if compress:
compressed = gzip.compress(bytes(value_j, "utf-8"), compresslevel=9)
value_j = str(base64.b64encode(compressed), "utf-8")
return value_j
def Session():
s = requests.Session()
s.mount('file://', FileAdapter())
return s
url_cache = {}
def get_url(url):
global url_cache
if url is None or url == "":
return None
if url in url_cache:
return url_cache[url]
# internal: protocol management
internal_str = "internal:"
if url.startswith(internal_str):
filename = url[len(internal_str):]
paths = [os.getcwd(), "/opt" ]
if "LAMBDA_TASK_ROOT" in os.environ:
paths.insert(0, os.environ["LAMBDA_TASK_ROOT"])
if "CLONESQUAD_DIR" in os.environ:
paths.append(os.environ["CLONESQUAD_DIR"])
for path in paths:
for sub_path in [".", "custo", "resources" ]:
try:
f = open("%s/%s/%s" % (path, sub_path, filename), "rb")
except:
continue
url_cache[url] = f.read()
return url_cache[url]
log.warning("Fail to read internal url '%s'!" % url)
return None
# s3:// protocol management
if url.startswith("s3://"):
m = re.search("^s3://([-.\w]+)/(.*)", url)
if len(m.groups()) != 2:
return None
bucket, key = [m.group(1), m.group(2)]
client = boto3.client("s3")
try:
response = client.get_object(
Bucket=bucket,
Key=key)
url_cache[url] = response["Body"].read()
return url_cache[url]
except Exception as e:
log.warning("Failed to fetch S3 url '%s' : %s" % (url, e))
return None
# <other>:// protocols management
s = Session()
try:
response = s.get(url)
except Exception as e:
log.warning("Failed to fetch url '%s' : %s" % (url, e))
return None
if response is not None:
url_cache[url] = response.content
return url_cache[url]
return None
def parse_line_as_list_of_dict(string, leading_keyname="_", default=None):
if string is None:
return default
def _remove_escapes(s):
return s.replace("\\;", ";").replace("\\,", ",").replace("\\=", "=")
l = []
for d in re.split("(?<!\\\\);", string):
if d == "": continue
el = re.split("(?<!\\\\),", d)
key = el[0]
if key == "": continue
dct = defaultdict(str)
dct[leading_keyname] = _remove_escapes(key) #.replace("\\,", ",")
for item in el[1:]:
i_el = re.split("(?<!\\\\)=", item, maxsplit=1)
dct[i_el[0]] = _remove_escapes(i_el[1]) if len(i_el) > 1 else True
l.append(dct)
return l
def dynamodb_table_scan(client, table_name, max_size=32*1024*1024):
xray_recorder.begin_subsegment("misc.dynamodb_table_scan")
items = []
size = 0
response = None
while response is None or "LastEvaluatedKey" in response:
query = {
"TableName": table_name,
"ConsistentRead": True
}
if response is not None and "LastEvaluatedKey" in response: query["ExclusiveStartKey"] = response["LastEvaluatedKey"]
response = client.scan(**query)
if "Items" not in response: raise Exception("Failed to scan table '%s'!" % self.table_name)
# Flatten the structure to make it more useable
for i in response["Items"]:
item = {}
for k in i:
item[k] = i[k][list(i[k].keys())[0]]
# Do not manage expired records
if "ExpirationTime" in item:
expiration_time = int(item["ExpirationTime"])
if seconds_from_epoch_utc() > expiration_time:
continue
if max_size != -1:
item_size = 0
for k in item: item_size += len(item[k])
if size + item_size > max_size:
break # Truncate too big DynamoDB table
else:
size += item_size
items.append(item)
log.debug("Table scan of '%s' returned %d items." % (table_name, len(items)))
xray_recorder.end_subsegment()
return items
| [
"jeancharlesjorel@gmail.com"
] | jeancharlesjorel@gmail.com |
f2ab8dfb0b4f100d21c732dc63482a3816c4a33e | 9629daa92c3a002dcfb5e81ba1870c8bf22c4ae3 | /Shop/forms.py | 721799ae0e2fa7dfb43d4f8c56df47d172623992 | [
"MIT"
] | permissive | forhadsidhu/Django-E-Commerce-App | ce61e15836a9dd4d808b52768ab4d592c0f7890f | 5c07b0c2e562fc0bb8dcc6803a7595b889ea8954 | refs/heads/master | 2023-03-16T14:58:06.391665 | 2021-03-15T10:50:00 | 2021-03-15T10:50:00 | 248,206,652 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | from django import forms
from .models import ImageUploadModel,Post
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.contrib.auth.models import User
# Now create customized user creation form like for adding email field in default django form.
class CreateUserform(UserCreationForm):
# Meta class is simply inner class
#add image field
# image = forms.ImageField()
class Meta:
model = User
fields = ['username','email','password1','password2']
class Rev(forms.ModelForm):
class Meta:
model = Post
fields=['review'] | [
"forhadsidhu@gmail.com"
] | forhadsidhu@gmail.com |
19d14b124965f2f461568792ad34bb6bbd4dc10d | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/367ff95d4d3d3770fa7b/snippet.py | 6cd51cef4fd2bff70541bd8d5ea0c23646114dd5 | [
"Apache-2.0"
] | permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 1,003 | py | from collections import defaultdict
from django.db.models.signals import *
class DisableSignals(object):
def __init__(self, disabled_signals=None):
self.stashed_signals = defaultdict(list)
self.disabled_signals = disabled_signals or [
pre_init, post_init,
pre_save, post_save,
pre_delete, post_delete,
pre_migrate, post_migrate,
]
def __enter__(self):
for signal in self.disabled_signals:
self.disconnect(signal)
def __exit__(self, exc_type, exc_val, exc_tb):
for signal in self.stashed_signals.keys():
self.reconnect(signal)
def disconnect(self, signal):
self.stashed_signals[signal] = signal.receivers
signal.receivers = []
def reconnect(self, signal):
signal.receivers = self.stashed_signals.get(signal, [])
del self.stashed_signals[signal]
# Example usage:
# with DisableSignals():
# user.save() # will not call any signals
| [
"42325807+dockerizeme@users.noreply.github.com"
] | 42325807+dockerizeme@users.noreply.github.com |
425ccb10cc834c44b5fad2c6f6259a5ce46223e7 | 3babd5887c70ff85a2254882c36e35b127a8905d | /dbhandler.py | 756e6cefac6a212f9c4f414705806bf6d4b51364 | [] | no_license | angyay0/demo-devops-users | 1109e443fbaf087e9e632e8902bfcb968bb0cc34 | f170ae65358993a48f6a073895ca2585fa087ba1 | refs/heads/master | 2022-12-02T06:09:55.470309 | 2020-08-13T21:36:10 | 2020-08-13T21:36:10 | 287,142,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from userapi.models import LoginResponse
import psycopg2
import json
#Load and return json as dict
def ReadFromJson():
with open("db.json") as file:
return (json.load(file))
#Database operations to retrieve and save users
class DBHandler:
def __init__(self):
self.credentials = ReadFromJson()
self.connection = None
def connect(self):
try:
#Connect
return True
except:
return False
def hasAccess(self, data):
try:
print (data)
return LoginResponse("Valid-Token","Angel","angyay0")
except:
return LoginResponse("","","")
def storeUser(self, data):
try:
print (data)
#return ("Fallo interno", False)
return LoginResponse("Valid-Token","Angel","angyay0")
except:
return LoginResponse("","","") | [
"aperez@mstecs.com"
] | aperez@mstecs.com |
abc7888375db7b5790e14fedfa8dedb11c05d33e | d2e6823851e236312e4b99acca53a293dff5b1a7 | /BlogApp/managers.py | eedf596bd527a60656aea2a3e09bf9a12dcf89b5 | [
"Apache-2.0"
] | permissive | devvspaces/nimrodage | f061378692e94b8dc9b15ae2f3fdcd587bfdfe1d | 578eb14e2e8f7dc7ae58913b6131fd60c1596c0b | refs/heads/main | 2023-05-09T03:41:22.345841 | 2021-06-04T03:47:56 | 2021-06-04T03:47:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from django.db import models
class PostQuery(models.QuerySet):
def get_authors_post(self, username):
return self.filter(author__username=username)
class PostManager(models.Manager):
def get_queryset(self):
return PostQuery(model=self.model, using=self._db)
def get_posts(self, username):
return self.get_queryset().get_authors_post(username) | [
"netrobeweb@gmail.com"
] | netrobeweb@gmail.com |
d2feecf8c86df8ebfbb1a826ff254b1b98455ddf | 4ac6c008882c1a7321bf9477ba532b88bb113741 | /ThiNet_TPAMI/ResNet50/analysis_reid.py | 9e4300e6213e6eaf5645e641d6a0213350804a8f | [
"MIT"
] | permissive | QQQYang/ThiNet_Code | cbd67470838b0d1d0a1803ae66a9a74a899adf89 | 850525c8ca85b63e5f7cec1a73b1b681178a5786 | refs/heads/master | 2020-09-18T05:36:23.093997 | 2019-12-05T11:54:39 | 2019-12-05T11:54:39 | 224,130,726 | 0 | 0 | MIT | 2019-11-26T07:37:29 | 2019-11-26T07:37:28 | null | UTF-8 | Python | false | false | 6,649 | py | #coding=utf-8
'''
This file is used for analysing the filters and activations of a network, which inspire us of new ideas about network pruning
Author: yqian@aibee.com
'''
# import ptvsd
# ptvsd.enable_attach(address = ('0.0.0.0', 5678))
# ptvsd.wait_for_attach()
import caffe
import numpy as np
from PIL import Image
import cv2
from net_generator import solver_and_prototxt
import random
import time
import os
import argparse
import json
def cal_corrcoef(act):
act_sum = np.sum(act)
act = np.sort(act)[::-1]
y = [sum(act[:i+1])/act_sum for i in range(len(act))]
x = [float(i+1)/len(act) for i in range(len(act))]
coef = np.corrcoef(np.array([x, y]))
return coef[0, 1]
def resize_image_with_padding(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]),
dtype=np.float32)
ret.fill(0)
target_as = new_dims[1] / float(new_dims[0])
aspect_ratio = im.shape[1] / float(im.shape[0])
if target_as < aspect_ratio:
scale = new_dims[1] / float(im.shape[1])
scaled_width = int(new_dims[1])
scaled_height = min(int(new_dims[0]), int(scale* im.shape[0]))
resized_img = cv2.resize(im, (scaled_width, scaled_height))
start_x = 0
start_y = 0
ret[start_x: start_x + scaled_height, start_y: start_y + scaled_width, :] = resized_img
else:
scale = new_dims[0] / float(im.shape[0])
scaled_width = min(int(new_dims[1]), int(scale* im.shape[1]))
scaled_height = int(new_dims[0])
resized_img = cv2.resize(im, (scaled_width, scaled_height))
start_x = 0
start_y = int((new_dims[1] - scaled_width) / 2)
ret[start_x: start_x + scaled_height, start_y: start_y + scaled_width, :] = resized_img
return ret.astype(np.float32)
def collect_activation(selected_layer, selected_block):
model_def = '/ssd/yqian/prune/model/reid/deploy_baseline.prototxt'
model_weights = '/ssd/yqian/prune/model/body_reid_general_npair_caffe_cpu_ctf_20190925_v010002/npair_may_to_aug_ctf_all_stores_finetune_full_year_iter_44000.caffemodel'
# load net
caffe.set_device(0)
caffe.set_mode_gpu()
net = caffe.Net(model_def, model_weights, caffe.TEST)
# load the mean ImageNet image (as distributed with Caffe) for subtraction
mean_value = np.array([104, 117, 123], dtype=float)
sample_num = 2000
act_mean = {}
layers = ['2a', '2b', '2c', '3a', '3b', '3c', '3d', '4a', '4b', '4c', '4d', '4e', '4f', '5a', '5b', '5c']
data_list = np.loadtxt('/ssd/yqian/prune/dataset/data/test_data/eval_CTF_beijing_xhm_20181207_label_finish_revision.txt', dtype=str)
img_index = random.sample(range(len(data_list)), sample_num)
# f = open('/ssd/yqian/prune/dataset/data/train_all_new.txt')
for file_index in img_index:
# offset = random.randrange(2e7)
# f.seek(offset, 0)
# line = f.readline()
# time_start = time.time()
# while len(line) < 2:
# offset = random.randrange(2e7)
# f.seek(offset, 0)
# line = f.readline()
# try:
# file_path = '/ssd/yqian/prune/dataset/data/' + line.split()[0]
# except IndexError:
# print('error: ', len(line))
# im = cv2.imread(file_path)
# while im is None:
# offset = random.randrange(2e7)
# f.seek(offset, 0)
# line = f.readline()
# while len(line) < 2:
# offset = random.randrange(2e7)
# f.seek(offset, 0)
# line = f.readline()
# try:
# file_path = '/ssd/yqian/prune/dataset/data/' + line.split()[0]
# except IndexError:
# print('error: ', len(line))
# im = cv2.imread(file_path)
# print(line.split()[0])
file_path = '/ssd/yqian/prune/dataset/data/test_data/all/' + data_list[file_index][0]
im = cv2.imread(file_path)
im = resize_image_with_padding(im, (384, 128))
im -= mean_value
im = np.transpose(im, (2,0,1)) # HWC -> CHW
im = np.reshape(im, (1, 3, 384, 128)) #CHW ->NCHW
# shape for input (data blob is N x C x H x W), set data
# center crop
# im = im[:, 16:240, 16:240]
net.blobs['data'].reshape(*im.shape)
net.blobs['data'].data[...] = im
# run net and take argmax for prediction
net.forward()
for i in range(len(selected_layer)):
for j in range(len(selected_block)):
if selected_block[j] == 1:
output_layer = 'res' + layers[selected_layer[i]] + '_branch2a'
else:
output_layer = 'res' + layers[selected_layer[i]] + '_branch2b'
activation = net.blobs[output_layer].data
if output_layer not in act_mean:
act_mean[output_layer] = [np.mean(activation, axis=(0, 2, 3)).tolist()]
else:
act_mean[output_layer].append(np.mean(activation, axis=(0, 2, 3)).tolist())
for key in act_mean:
layer_act = act_mean[key]
act_mean[key] = np.sum(np.abs(np.array(layer_act)), axis=0).tolist()
act_mean[key] = float(cal_corrcoef(act_mean[key]))
print(act_mean)
with open('act_mean.json','w') as f:
json.dump(act_mean, f)
def get_opt():
parser = argparse.ArgumentParser()
parser.add_argument("--selected_layer", type=int, nargs='+', default = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
parser.add_argument("--selected_block", type=int, nargs='+', default = [1,2], help='range from 1 to 2')
parser.add_argument("--gpu", type=int, default = 4)
opt = parser.parse_args()
return opt
if __name__ == "__main__":
while True:
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
memory_gpu=[int(x.split()[2]) for x in open('tmp','r').readlines()]
memory_max = max(memory_gpu)
if memory_max>5000:
gpu = np.argmax(memory_gpu)
os.environ["CUDA_VISIBLE_DEVICES"] = str(np.argmax(memory_gpu))
os.system('rm tmp')
print('Find vacant GPU: %d' % gpu)
break
opt = get_opt()
collect_activation(opt.selected_layer, opt.selected_block) | [
"yqian@gpu002.aibee.cn"
] | yqian@gpu002.aibee.cn |
b0eab76f789da136a8a281e69b3f5b956c7456b4 | d0a2df49e95881b3e3cdde806e55d5ef7ca84526 | /logistic_regression.py | e8f81f7f32d6b1273b8f2a77b8c6408ec5b2571b | [] | no_license | nhan-dam/svgd-variance-reduction | b288d35f0ad7c342be57043d14104a8cdc905a66 | 7f626a198cf0cf3aab083e1ac720ea58d3c9b7b9 | refs/heads/master | 2022-04-18T07:07:30.940649 | 2020-04-06T08:35:31 | 2020-04-06T08:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,968 | py | from __future__ import print_function, division, absolute_import
import numpy as np
'''
Logistic Regression:
The observed data D = {X, y} consist of N binary class labels,
y_t \in {-1,+1}, and d covariates for each datapoint, X_t \in R^d.
p(y_t = 1| x_t, w) = 1 / (1 + exp(-w^T x_t))
'''
class LogisticRegression:
def __init__(self, W, solver='sgd', batchsize=128):
'''
Initialise a Logistic Regression model.
solver: name of the solver. Currently, this function supports 4 solvers: 'sgd', 'adagrad', 'rmsprop',
'svrg-sgd', 'svrg-adagrad' and 'svrg-rmsprop'. By default, solver = 'sgd'.
'''
self.W = np.copy(W)
self.solver = solver
self.batchsize = batchsize
def _sigmoid(self, X, W):
'''
Compute the sigmoid function given weights and inputs.
X: N x D, where N is the number of data points and D is the dimension of each data point.
W: (D,) array.
'''
coeff = -1.0 * np.matmul(X, W)
return np.divide(np.ones(X.shape[0]), 1 + np.exp(coeff))
def _fn_J(self, w, x, y, reg):
loss_term = np.sum(-np.multiply(np.dot(x, w), y) + np.log(1 + np.exp(np.dot(x, w))))
reg_term = reg / 2.0 * np.sum(w ** 2)
return 1.0 / len(y) * loss_term + reg_term
def fit(self, X, y, n_updates=128, learning_rate=0.01, regularisation_factor=0.1, n_svrg_updates=128,
online=False, eval_freq=0, eval_fn=None, momentum_factor=0.9, decay_lr=0, debug=False):
'''
Train the model.
n_updates: number of training iterations. By default, n_updates = 100.
learning_rate: the learning rate. By default, learning_rate = 0.01.
regularisation_factor: regularisation parameter used in L2 penalty.
n_svrg_updates: number of training iterations in the inner loop of SVRG solver. By default,
n_svrg_updates = 100.
online: boolean flag for online learning setting. If online = True, we follow online learning to update
particles. That means for each training data point, we predict its label before using it for training.
We compute the accumulated accuracy of prediction. By default, online = False.
eval_freq: the frequency that the performance of the model with current parameters is evaluated.
If online = True, eval_freq is automatically set to -1, that means the evaluation is executed before
training with each data point. Otherwise, if eval_freq <= 0, no evaluation will be executed during
training and if eval_freq > 0 the evaluation will be executed after every eval_freq data points trained.
eval_fn: the function to evaluate the performance of the model with the current parameters.
By default, eval_fn = None.
momentum_factor: momentum parameter used in RMSProp. By default, momentum_factor = 0.9.
decay_lr: the hyperparameters that control the decay of learning rate. By default, decay_stepsize = 0,
that means there is no decay.
debug: boolean flag to determine the mode of this function. In debug mode, the function will print more
information to the standard output during training. By default, debug = False.
'''
X_train = np.copy(X)
y_train = np.copy(y)
y_train[y_train == -1] = 0 # in this function, we use labels 0 and 1.
num_data = X_train.shape[0]
n_svrg_updates = 1 if self.solver != 'svrg-sgd' and self.solver != 'svrg-adagrad' and \
self.solver != 'svrg-rmsprop' else n_svrg_updates
if online:
batchsize = 1
eval_freq = min(-eval_freq, -1)
n_updates = int(np.ceil(num_data / n_svrg_updates))
else:
batchsize = min(self.batchsize, X_train.shape[0])
eval_freq = n_updates * n_svrg_updates + 1 if eval_freq <= 0 else eval_freq
data_idx_perm = np.random.permutation(num_data)
if eval_freq < 0:
loss_log = np.zeros(int(np.ceil(num_data / (-eval_freq))) + 1)
accumulated_loss = 0
cnt = 0
elif eval_freq > 0:
eval_log = []
if self.solver == 'sgd':
print('Train Logistic Regression with SGD solver.')
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
data_idx = data_idx_perm[np.arange(it * batchsize, (it + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if it % (-eval_freq) == 0 or (it + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it + 1)
cnt += 1
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
# W_prime = self.W + 1.0 / 10000 * np.random.normal(0, 1, len(self.W))
# numerical_grad_J = (self._fn_J(W_prime, x_batch, y_batch, regularisation_factor) - self._fn_J(self.W, x_batch, y_batch, regularisation_factor)) / (W_prime - self.W)
# diff = numerical_grad_J - grad_J
self.W = self.W - learning_rate * np.exp(-decay_lr * it) * grad_J
# self.W = self.W - learning_rate * np.exp(-decay_lr * it) * numerical_grad_J
if eval_freq > 0 and (it + 1) % eval_freq == 0:
if (it + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
elif self.solver == 'adagrad':
print('Train Logistic Regression with AdaGrad solver.')
fudge_factor = 1e-6
historical_grad = 0.0
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
data_idx = data_idx_perm[np.arange(it * batchsize, (it + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if it % (-eval_freq) == 0 or (it + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it + 1)
cnt += 1
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
historical_grad += (grad_J ** 2)
adj_grad = np.divide(grad_J, fudge_factor + np.sqrt(historical_grad))
self.W = self.W - learning_rate * np.exp(-decay_lr * it) * adj_grad
if eval_freq > 0 and (it + 1) % eval_freq == 0:
if (it + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
elif self.solver == 'rmsprop':
print('Train Logistic Regression with RMSProp solver.')
fudge_factor = 1e-6
historical_grad = 0.0
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
data_idx = data_idx_perm[np.arange(it * batchsize, (it + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if it % (-eval_freq) == 0 or (it + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it + 1)
cnt += 1
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
if it == 0:
historical_grad = grad_J ** 2
else:
historical_grad = momentum_factor * historical_grad + (1 - momentum_factor) * (grad_J ** 2)
adj_grad = np.divide(grad_J, fudge_factor + np.sqrt(historical_grad))
self.W = self.W - learning_rate * np.exp(-decay_lr * it) * adj_grad
if eval_freq > 0 and (it + 1) % eval_freq == 0:
if (it + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
elif self.solver == 'svrg-sgd':
print('Train Logistic Regression with SVRG-SGD solver.')
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
mu = -np.sum(np.multiply(X_train, np.broadcast_to(np.vstack(y_train - self._sigmoid(X_train, self.W)),
(len(y_train), X_train.shape[1]))),
axis=0) * 1.0 / len(y_train) \
+ regularisation_factor * self.W
w_hat = np.copy(self.W)
for it_svrg in np.arange(n_svrg_updates):
data_idx = data_idx_perm[np.arange((it * n_svrg_updates + it_svrg) * batchsize,
(it * n_svrg_updates + it_svrg + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
if it * n_svrg_updates + it_svrg >= num_data:
break
self.W, w_hat = w_hat, self.W
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if (it * n_svrg_updates + it_svrg) % (-eval_freq) == 0 or (
it * n_svrg_updates + it_svrg + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it * n_svrg_updates + it_svrg + 1)
cnt += 1
self.W, w_hat = w_hat, self.W
grad_J_hat = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, w_hat)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * w_hat
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
w_hat = w_hat - learning_rate * np.exp(-decay_lr * it) * (grad_J_hat - grad_J + mu)
if eval_freq > 0 and (it * n_svrg_updates + it_svrg + 1) % eval_freq == 0:
self.W, w_hat = w_hat, self.W
if (it * n_svrg_updates + it_svrg + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
self.W, w_hat = w_hat, self.W
self.W = np.copy(w_hat)
elif self.solver == 'svrg-adagrad':
print('Train Logistic Regression with SVRG-AdaGrad solver.')
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
fudge_factor = 1e-6
historical_grad = 0.0
mu = -np.sum(np.multiply(X_train, np.broadcast_to(np.vstack(y_train - self._sigmoid(X_train, self.W)),
(len(y_train), X_train.shape[1]))),
axis=0) * 1.0 / len(y_train) \
+ regularisation_factor * self.W
w_hat = np.copy(self.W)
for it_svrg in np.arange(n_svrg_updates):
data_idx = data_idx_perm[np.arange((it * n_svrg_updates + it_svrg) * batchsize,
(it * n_svrg_updates + it_svrg + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
if it * n_svrg_updates + it_svrg >= num_data:
break
self.W, w_hat = w_hat, self.W
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if (it * n_svrg_updates + it_svrg) % (-eval_freq) == 0 or (
it * n_svrg_updates + it_svrg + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it * n_svrg_updates + it_svrg + 1)
cnt += 1
self.W, w_hat = w_hat, self.W
grad_J_hat = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, w_hat)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * w_hat
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
grad_J_svrg = grad_J_hat - grad_J + mu
historical_grad += (grad_J_svrg ** 2)
adj_grad = np.divide(grad_J_svrg, fudge_factor + np.sqrt(historical_grad))
w_hat = w_hat - learning_rate * np.exp(-decay_lr * it) * adj_grad
if eval_freq > 0 and (it * n_svrg_updates + it_svrg + 1) % eval_freq == 0:
self.W, w_hat = w_hat, self.W
if (it * n_svrg_updates + it_svrg + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
self.W, w_hat = w_hat, self.W
self.W = np.copy(w_hat)
elif self.solver == 'svrg-rmsprop':
print('Train Logistic Regression with SVRG-RMSProp solver.')
for it in np.arange(n_updates):
if debug and (it + 1) * batchsize % 1000 == 0:
print('iter %d' % (it + 1))
fudge_factor = 1e-6
historical_grad = 0.0
mu = -np.sum(np.multiply(X_train, np.broadcast_to(np.vstack(y_train - self._sigmoid(X_train, self.W)),
(len(y_train), X_train.shape[1]))),
axis=0) * 1.0 / len(y_train) \
+ regularisation_factor * self.W
w_hat = np.copy(self.W)
for it_svrg in np.arange(n_svrg_updates):
data_idx = data_idx_perm[np.arange((it * n_svrg_updates + it_svrg) * batchsize,
(it * n_svrg_updates + it_svrg + 1) * batchsize) % num_data]
x_batch = X_train[data_idx, :]
y_batch = y_train[data_idx]
if eval_freq < 0:
if it * n_svrg_updates + it_svrg >= num_data:
break
self.W, w_hat = w_hat, self.W
current_loss = self.predict(np.copy(X[data_idx]), np.copy(y[data_idx]), get_label=False,
get_prob=False)
accumulated_loss += (current_loss * len(y_batch))
if (it * n_svrg_updates + it_svrg) % (-eval_freq) == 0 or (
it * n_svrg_updates + it_svrg + 1) == num_data:
loss_log[cnt] = accumulated_loss * 1.0 / (it * n_svrg_updates + it_svrg + 1)
cnt += 1
self.W, w_hat = w_hat, self.W
grad_J_hat = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, w_hat)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * w_hat
grad_J = -np.sum(
np.multiply(x_batch, np.broadcast_to(np.vstack(y_batch - self._sigmoid(x_batch, self.W)),
(batchsize, x_batch.shape[1]))), axis=0) * 1.0 / batchsize \
+ regularisation_factor * self.W
grad_J_svrg = grad_J_hat - grad_J + mu
if it_svrg == 0:
historical_grad = grad_J_svrg ** 2
else:
historical_grad = momentum_factor * historical_grad + (1 - momentum_factor) * (grad_J_svrg ** 2)
adj_grad = np.divide(grad_J_svrg, fudge_factor + np.sqrt(historical_grad))
w_hat = w_hat - learning_rate * np.exp(-decay_lr * it) * adj_grad
if eval_freq > 0 and (it * n_svrg_updates + it_svrg + 1) % eval_freq == 0:
self.W, w_hat = w_hat, self.W
if (it * n_svrg_updates + it_svrg + 1) // eval_freq == 1:
eval_log = eval_fn()
else:
eval_log = np.vstack((eval_log, eval_fn()))
self.W, w_hat = w_hat, self.W
self.W = np.copy(w_hat)
else:
raise ValueError('The requested solver %s is currently not supported.' % (self.solver))
if eval_freq < 0:
return self, loss_log
if eval_fn is not None:
return self, eval_log
return self
def predict(self, X_test, y_test=None, get_label=True, get_prob=False):
'''
Predict the labels given observations.
x: one or many new observations. The dimensions of the matrix of observations are N x D, where N is the number
of observations and D is the dimension of each observation.
y: corresponding labels of the observations x. If we pass in y_test, the return values of this function
will include the accuracy. By default, y is None.
get_label: a boolean flag to determine if we return the predicted labels. get_label has higher precedence
than get_prob. That means if y_test = None and get_label = False, then get_prob is automatically True.
By default, get_label = True.
get_prob: a boolean flag to determine if we return the uncertainty of the prediction. By default, get_prob = False.
Return: (predicted labels, probabilities, accuracy)
'''
prob = self._sigmoid(X_test, self.W)
y_pred = np.ones(len(prob))
y_pred[prob <= 0.5] = -1
if y_test is None:
if not get_label: # get_prob is automatically True
return 0.5 + np.abs(prob - 0.5)
if not get_prob:
return y_pred
return y_pred, 0.5 + np.abs(prob - 0.5)
if not get_label:
if not get_prob:
return np.sum(y_pred == y_test) * 1.0 / len(y_test)
return 0.5 + np.abs(prob - 0.5), np.sum(y_pred == y_test) * 1.0 / len(y_test)
return y_pred, 0.5 + np.abs(prob - 0.5), np.sum(y_pred == y_test) * 1.0 / len(y_test)
| [
"ndam@deakin.edu.au"
] | ndam@deakin.edu.au |
345af0510923871e7c277fa605b5fbb91b36acd5 | 357048e2990a572be19f2816a9890fdb10b4ef71 | /bmips.py | 2b0713eb577cd1cf89d6eeb2eb0ee707691543e2 | [] | no_license | brightp-py/bmips | 444771a8b46edd7bded2e048dc58573cee2007ca | 59308e55f5eb4b56937044e932383216b1a0c8c7 | refs/heads/master | 2023-04-03T01:58:14.517627 | 2021-04-10T19:52:42 | 2021-04-10T19:52:42 | 356,678,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import sys
from src import simulator, parsemips
if len(sys.argv) < 2:
print("FAILED: Expected a file name.")
print("'python bmips.py [filename] (-d)'")
sys.exit()
simulator.DEBUGPRINT = False
simulator.REPORTPRINT = None
wantreportfile = False
for arg in sys.argv[2:]:
if wantreportfile:
if arg[0] != "-":
simulator.REPORTPRINT = arg
else:
simulator.REPORTPRINT = "print"
wantreportfile = False
if arg == "-d":
simulator.DEBUGPRINT = True
elif arg == "-r":
wantreportfile = True
if "-d" in sys.argv[2:]:
simulator.DEBUGPRINT = True
with open(sys.argv[1], 'r') as f:
p = parsemips.parseCode(f.read())
sim = simulator.Simulator(p)
sim.run() | [
"brightp@umich.edu"
] | brightp@umich.edu |
f931f93487dee0b1d116ef38d52fa5222198b620 | b6c09a1b87074d6e58884211ce24df8ec354da5c | /345. 反转字符串中的元音字母.py | f259c3af854c1e4b250ef47b593bf61f4f86067c | [] | no_license | fengxiaolong886/leetcode | a0ee12d67c4a10fb12d6ca4369762ab5b090cab1 | 4c0897bc06a297fa9225a0c46d8ec9217d876db8 | refs/heads/master | 2023-03-18T22:16:29.212016 | 2021-03-07T03:48:16 | 2021-03-07T03:48:16 | 339,604,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | """
编写一个函数,以字符串作为输入,反转该字符串中的元音字母。
"""
def reverseVowels(s):
query = "aeiouAEIOU"
vow = []
idx = []
for i, j in enumerate(s):
if j in query:
vow.append(j)
idx.append(i)
vow = vow[::-1]
s = list(s)
for i, j in zip(idx, vow):
s[i] = j
return "".join(s)
print(reverseVowels("hello"))
print(reverseVowels("leetcode")) | [
"xlfeng886@163.com"
] | xlfeng886@163.com |
7b4f54884801a64b393131a5a772f15a7ccfe5aa | 45a70554091ea06afc63d86ddb2724053513189b | /dataset/__init__.py | 6db9f41c3b178cc49530593183e883df9f08deb2 | [] | no_license | mponza/keyphrase-annotation | d10705c2ccf9ae7b2d2e3d8aa1901460de564976 | 14abbd4ebcf449f65f1b1c392235b55eb051005b | refs/heads/master | 2021-06-13T00:21:49.108263 | 2017-01-04T14:09:00 | 2017-01-04T14:09:00 | 77,946,796 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | from duc import DUCDataset
from icsi import ICSIASRDataset, ICSIHumanTranscriptDataset
from inspec import InspectTrainingDataset, InspectValidationDataset, \
InspectTestDataset
from nus import NUSDataset
def make_dataset(dataset_name):
"""
Factory-style method for getting dataset from string name.
"""
return {
'duc': DUCDataset(),
'icsi-asr': ICSIASRDataset(),
'icsi-ht': ICSIHumanTranscriptDataset(),
'inspec-train': InspectTrainingDataset(),
'inspec-val': InspectValidationDataset(),
'inspec-test': InspectTestDataset(),
'nus': NUSDataset()
}[dataset_name]
| [
"mponza@gmail.com"
] | mponza@gmail.com |
aea6c3de83c23b1dc7a355b74d2d31cefade985e | dd9f643d5833a3a4707a08eac38e30e03819a5f8 | /scomp/migrations/0014_blogmodel_blogparamodel.py | 8b5a0b8680eb54ca0f8280f9415cd36845bdfcdc | [] | no_license | Ashwary-Jharbade/services | e65e99be5508c9854797124f0392c2d32477ee7a | 2e514117e374fee4feef908e85cf8853f830f390 | refs/heads/master | 2023-03-04T10:10:35.499854 | 2021-02-18T19:12:16 | 2021-02-18T19:12:16 | 304,116,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | # Generated by Django 3.0.4 on 2020-08-27 07:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scomp', '0013_servicemodel_desc'),
]
operations = [
migrations.CreateModel(
name='BlogModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('date', models.DateTimeField(auto_now_add=True)),
('image', models.FileField(blank=True, null=True, upload_to='blogimages')),
('blogauthor', models.CharField(max_length=30)),
('aboutauthor', models.CharField(max_length=200)),
('intropara', models.CharField(max_length=150)),
('content', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='BlogParaModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('image', models.FileField(blank=True, null=True, upload_to='blogimages')),
('content', models.CharField(max_length=500)),
('date', models.DateTimeField(auto_now_add=True)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scomp.BlogModel')),
],
),
]
| [
"ashwary.jharbade999@gmail.com"
] | ashwary.jharbade999@gmail.com |
49735df185c393c2ec9eacd961fb3a5fade967e1 | b3a9740a5099c490c2f21ca9a9bbf507ad2bd2bf | /blog/apps/aricle/migrations/0002_auto_20190725_1259.py | edfa89f55b029a422eb06d9951b52e75e5453e68 | [] | no_license | JummyWu/drf_blog | c27d39237fa0953650d092f40cfcc73beb238652 | b192485ad460eb1f05322dd09d0e97a63c476d4f | refs/heads/master | 2022-12-08T11:40:41.594820 | 2019-08-24T02:13:26 | 2019-08-24T02:13:26 | 164,213,480 | 1 | 0 | null | 2022-12-08T01:48:34 | 2019-01-05T12:36:00 | Python | UTF-8 | Python | false | false | 1,515 | py | # Generated by Django 2.1.5 on 2019-07-25 12:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('aricle', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='tag',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AddField(
model_name='category',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AddField(
model_name='aricle',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='aricle.Category', verbose_name='分类'),
),
migrations.AddField(
model_name='aricle',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AddField(
model_name='aricle',
name='tags',
field=models.ManyToManyField(related_name='posts', to='aricle.Tag', verbose_name='标签'),
),
]
| [
"929440925@qq.com"
] | 929440925@qq.com |
f316549e5a2ecc6bd4a40922f52af9d83adf665c | 55e79a84cc8f416ef354c9457f53ba0ddf1dde09 | /tweets/migrations/0003_auto_20200120_1407.py | 107537f578f2235605ad5eb7b08cfa7cced2601d | [] | no_license | montionugera/twitter-api-drf | fdb9935b924ca406a4d472b0d38a13c06988bd7d | e1a6e7d4e88b8946548f7c7a301061871e65206c | refs/heads/master | 2020-12-14T23:50:41.866101 | 2020-01-20T15:01:34 | 2020-01-20T15:01:34 | 234,916,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | # Generated by Django 3.0.2 on 2020-01-20 14:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tweets', '0002_remove_tweet_pub_date'),
]
operations = [
migrations.AlterModelOptions(
name='tweet',
options={'ordering': ['created_at']},
),
]
| [
"montionugera@gmail.com"
] | montionugera@gmail.com |
e5b94ab47de93b8731f125af0e33149260abb4fe | 6a515e3eaec9ddc9a0f5d61a8295ef26b250a520 | /run_tests.py | 88d89d01c2be113fec3bb199ac59b13e114a55df | [] | no_license | EventsExpertsMIEM/backend_19288 | 88146d0e1e0140318040e2f9fc68da14e180a76b | f4a98b325366ef7bbdbe9243f9b742a6998d6da3 | refs/heads/dev | 2020-09-14T14:17:51.939454 | 2020-04-23T14:45:49 | 2020-04-23T14:45:49 | 223,153,250 | 1 | 1 | null | 2020-04-10T10:34:15 | 2019-11-21T10:56:04 | Python | UTF-8 | Python | false | false | 172 | py | from app import db
import bcrypt
import testsuite
pw = bcrypt.hashpw(str('1234').encode('utf-8'), bcrypt.gensalt())
db.create_tables(pw.decode('utf-8'))
testsuite.run()
| [
"mvalkhimovich@miem.hse.ru"
] | mvalkhimovich@miem.hse.ru |
80bd00d96dd1eb06ab47528bd9e4e22aa23bbe46 | 152eae1f0febe35268c65b80ac218480486f2123 | /py/my_collections/test/test_sorting.py | 44a05787466dec2edb9df712b10793728e81a659 | [] | no_license | Crescent617/code-practice | a02b65516d296db15e72b2c2d1412f5befd7034f | f3dd8a3cf0bc9b1d00ed37793d02f1a89d8d5a96 | refs/heads/master | 2023-06-28T02:30:30.987862 | 2023-06-22T14:41:08 | 2023-06-22T14:41:08 | 218,738,764 | 0 | 0 | null | 2023-06-22T14:41:09 | 2019-10-31T10:17:55 | Python | UTF-8 | Python | false | false | 1,378 | py | from sorting import *
from dllist import DoubleLinkedList
from random import randint
from cProfile import Profile
max_num = 5000
def random_list(count):
numbers = DoubleLinkedList()
for i in range(0, count):
numbers.shift(randint(0, 10000))
return numbers
def is_sorted(numbers):
node = numbers.begin.next
while node:
if node.prev.value > node.value:
return False
else:
node = node.next
return True
def test_bubble():
numbers = random_list(max_num)
bubble_sort(numbers)
assert is_sorted(numbers)
def test_merge():
numbers = random_list(max_num)
merge_sort(numbers)
assert is_sorted(numbers)
def test_quick():
numbers = [randint(0, 10000) for i in range(max_num)]
quick_sort(numbers, 0, max_num-1)
i = 1
while i < max_num:
assert numbers[i] >= numbers[i-1]
i += 1
def test_all():
numbers = [randint(0, 10000) for i in range(max_num)]
numbers_m = DoubleLinkedList()
numbers_b = DoubleLinkedList()
for i in numbers:
numbers_m.shift(i)
numbers_b.shift(i)
quick_sort(numbers, 0, max_num-1)
merge_sort(numbers_m)
bubble_sort(numbers_b)
if __name__ == '__main__':
prof = Profile()
prof.enable()
test_all()
prof.create_stats()
prof.print_stats('sorting.py', sort="cumulative")
| [
"lihuaru617@outlook.com"
] | lihuaru617@outlook.com |
dab971b8321388507ca5447b0771da8ff6b6cbe4 | 9ecfdfbe098070079c9d96eb41ddb73f95857f93 | /Problems/Fahrenheit/main.py | a79b42503b60d7e0b1dac928368c284aaae7d333 | [] | no_license | sathishkumar8594ys/Simple_Chatty_Bot | 0e850c616bc6dbd1a970596a3a6105d38960f59a | b07c148fa057bd3171a86e6bb456342fbfd38bfe | refs/heads/master | 2023-03-09T21:13:13.150854 | 2021-02-28T04:07:00 | 2021-02-28T04:07:00 | 343,017,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | def fahrenheit_to_celsius(fahrenheit):
cel = ((fahrenheit - 32)*(5/9))
return round(cel, 3)
| [
"sk@kali"
] | sk@kali |
65ece97ccb16002fa54a5cd663cf837dc9ccdf3f | 96bf70c65bbca98f85112e09d51ca749eeeaeb6b | /selftf/tf_job/inception/slim/ops_test.py | fa6b41c009c903fcb6a101d5c69b220604dc8b40 | [] | no_license | MLSysTune/MLTuner | 13f3ad91ce243224bf54e4b1af0a39046c4c45cb | 82fbeadb64a476a6d37afc7f34bd29ca2627740e | refs/heads/master | 2023-08-01T08:17:08.112017 | 2021-07-30T12:19:19 | 2021-07-30T12:19:19 | 407,482,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,284 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from selftf.tf_job.inception.slim import ops
from selftf.tf_job.inception.slim import scopes
from selftf.tf_job.inception.slim import variables
class ConvTest(tf.test.TestCase):
def testCreateConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateSquareConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, 3)
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvWithTensorShape(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, images.get_shape()[1:3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateFullyConv(self):
height, width = 6, 6
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
output = ops.conv2d(images, 64, images.get_shape()[1:3], padding='VALID')
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64])
def testCreateVerticalConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 1])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height, width, 32])
def testCreateHorizontalConv(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [1, 3])
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height, width, 32])
def testCreateConvWithStride(self):
height, width = 6, 6
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], stride=2)
self.assertEquals(output.op.name, 'Conv/Relu')
self.assertListEqual(output.get_shape().as_list(),
[5, height/2, width/2, 32])
def testCreateConvCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
images = tf.random_uniform((5, height, width, 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('conv1/weights'))
self.assertFalse(variables.get_variables('conv1/biases'))
ops.conv2d(images, 32, [3, 3], scope='conv1')
self.assertTrue(variables.get_variables('conv1/weights'))
self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateConvWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEquals(output.op.name, 'conv1/Relu')
def testCreateConvWithoutActivation(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], activation=None)
self.assertEquals(output.op.name, 'Conv/BiasAdd')
def testCreateConvValid(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.conv2d(images, 32, [3, 3], padding='VALID')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
def testCreateConvWithWD(self):
height, width = 3, 3
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], weight_decay=0.01)
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEquals(wd.op.name,
'Conv/weights/Regularizer/L2Regularizer/value')
sess.run(tf.global_variables_initializer())
self.assertTrue(sess.run(wd) <= 0.01)
def testCreateConvWithoutWD(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], weight_decay=0)
self.assertEquals(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], scope='conv1')
self.assertEquals(len(variables.get_variables()), 2)
ops.conv2d(images, 32, [3, 3], scope='conv1', reuse=True)
self.assertEquals(len(variables.get_variables()), 2)
def testNonReuseVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3])
self.assertEquals(len(variables.get_variables()), 2)
ops.conv2d(images, 32, [3, 3])
self.assertEquals(len(variables.get_variables()), 4)
def testReuseConvWithWD(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1')
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1',
reuse=True)
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testConvWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with scopes.arg_scope([ops.conv2d], batch_norm_params={'decay': 0.9}):
net = ops.conv2d(images, 32, [3, 3])
net = ops.conv2d(net, 32, [3, 3])
self.assertEquals(len(variables.get_variables()), 8)
self.assertEquals(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEquals(len(variables.get_variables('Conv_1/BatchNorm')), 3)
def testReuseConvWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 32), seed=1)
with scopes.arg_scope([ops.conv2d], batch_norm_params={'decay': 0.9}):
net = ops.conv2d(images, 32, [3, 3], scope='Conv')
net = ops.conv2d(net, 32, [3, 3], scope='Conv', reuse=True)
self.assertEquals(len(variables.get_variables()), 4)
self.assertEquals(len(variables.get_variables('Conv/BatchNorm')), 3)
self.assertEquals(len(variables.get_variables('Conv_1/BatchNorm')), 0)
class FCTest(tf.test.TestCase):
def testCreateFC(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
output = ops.fc(inputs, 32)
self.assertEquals(output.op.name, 'FC/Relu')
self.assertListEqual(output.get_shape().as_list(), [5, 32])
def testCreateFCWithScope(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
output = ops.fc(inputs, 32, scope='fc1')
self.assertEquals(output.op.name, 'fc1/Relu')
def testCreateFcCreatesWeightsAndBiasesVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
self.assertFalse(variables.get_variables('fc1/weights'))
self.assertFalse(variables.get_variables('fc1/biases'))
ops.fc(inputs, 32, scope='fc1')
self.assertTrue(variables.get_variables('fc1/weights'))
self.assertTrue(variables.get_variables('fc1/biases'))
def testReuseVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
ops.fc(inputs, 32, scope='fc1')
self.assertEquals(len(variables.get_variables('fc1')), 2)
ops.fc(inputs, 32, scope='fc1', reuse=True)
self.assertEquals(len(variables.get_variables('fc1')), 2)
def testNonReuseVars(self):
height, width = 3, 3
inputs = tf.random_uniform((5, height * width * 3), seed=1)
with self.test_session():
ops.fc(inputs, 32)
self.assertEquals(len(variables.get_variables('FC')), 2)
ops.fc(inputs, 32)
self.assertEquals(len(variables.get_variables('FC')), 4)
def testCreateFCWithoutActivation(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
output = ops.fc(inputs, 32, activation=None)
self.assertEquals(output.op.name, 'FC/xw_plus_b')
def testCreateFCWithWD(self):
height, width = 3, 3
with self.test_session() as sess:
inputs = tf.random_uniform((5, height * width * 3), seed=1)
ops.fc(inputs, 32, weight_decay=0.01)
wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertEquals(wd.op.name,
'FC/weights/Regularizer/L2Regularizer/value')
sess.run(tf.global_variables_initializer())
self.assertTrue(sess.run(wd) <= 0.01)
def testCreateFCWithoutWD(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
ops.fc(inputs, 32, weight_decay=0)
self.assertEquals(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
def testReuseFCWithWD(self):
height, width = 3, 3
with self.test_session():
inputs = tf.random_uniform((5, height * width * 3), seed=1)
ops.fc(inputs, 32, weight_decay=0.01, scope='fc')
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
ops.fc(inputs, 32, weight_decay=0.01, scope='fc', reuse=True)
self.assertEquals(len(variables.get_variables()), 2)
self.assertEquals(
len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testFCWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height * width * 3), seed=1)
with scopes.arg_scope([ops.fc], batch_norm_params={}):
net = ops.fc(images, 27)
net = ops.fc(net, 27)
self.assertEquals(len(variables.get_variables()), 8)
self.assertEquals(len(variables.get_variables('FC/BatchNorm')), 3)
self.assertEquals(len(variables.get_variables('FC_1/BatchNorm')), 3)
def testReuseFCWithBatchNorm(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height * width * 3), seed=1)
with scopes.arg_scope([ops.fc], batch_norm_params={'decay': 0.9}):
net = ops.fc(images, 27, scope='fc1')
net = ops.fc(net, 27, scope='fc1', reuse=True)
self.assertEquals(len(variables.get_variables()), 4)
self.assertEquals(len(variables.get_variables('fc1/BatchNorm')), 3)
class MaxPoolTest(tf.test.TestCase):
def testCreateMaxPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3])
self.assertEquals(output.op.name, 'MaxPool/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateSquareMaxPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, 3)
self.assertEquals(output.op.name, 'MaxPool/MaxPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateMaxPoolWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3], scope='pool1')
self.assertEquals(output.op.name, 'pool1/MaxPool')
def testCreateMaxPoolSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3])
def testCreateMaxPoolStrideSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalMaxPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.max_pool(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class AvgPoolTest(tf.test.TestCase):
def testCreateAvgPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3])
self.assertEquals(output.op.name, 'AvgPool/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateSquareAvgPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, 3)
self.assertEquals(output.op.name, 'AvgPool/AvgPool')
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
def testCreateAvgPoolWithScope(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3], scope='pool1')
self.assertEquals(output.op.name, 'pool1/AvgPool')
def testCreateAvgPoolSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3], padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, 2, 2, 3])
def testCreateAvgPoolStrideSAME(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, [3, 3], stride=1, padding='SAME')
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testGlobalAvgPool(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.avg_pool(images, images.get_shape()[1:3], stride=1)
self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
class OneHotEncodingTest(tf.test.TestCase):
def testOneHotEncodingCreate(self):
with self.test_session():
labels = tf.constant([0, 1, 2])
output = ops.one_hot_encoding(labels, num_classes=3)
self.assertEquals(output.op.name, 'OneHotEncoding/SparseToDense')
self.assertListEqual(output.get_shape().as_list(), [3, 3])
def testOneHotEncoding(self):
with self.test_session():
labels = tf.constant([0, 1, 2])
one_hot_labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
output = ops.one_hot_encoding(labels, num_classes=3)
self.assertAllClose(output.eval(), one_hot_labels.eval())
class DropoutTest(tf.test.TestCase):
def testCreateDropout(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.dropout(images)
self.assertEquals(output.op.name, 'Dropout/dropout/mul')
output.get_shape().assert_is_compatible_with(images.get_shape())
def testCreateDropoutNoTraining(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = ops.dropout(images, is_training=False)
self.assertEquals(output, images)
class FlattenTest(tf.test.TestCase):
def testFlatten4D(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
output = ops.flatten(images)
self.assertEquals(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlatten3D(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width), seed=1, name='images')
output = ops.flatten(images)
self.assertEquals(output.get_shape().num_elements(),
images.get_shape().num_elements())
self.assertEqual(output.get_shape()[0], images.get_shape()[0])
def testFlattenBatchSize(self):
height, width = 3, 3
with self.test_session() as sess:
images = tf.random_uniform((5, height, width, 3), seed=1, name='images')
inputs = tf.placeholder(tf.int32, (None, height, width, 3))
output = ops.flatten(inputs)
self.assertEquals(output.get_shape().as_list(),
[None, height * width * 3])
output = sess.run(output, {inputs: images.eval()})
self.assertEquals(output.size,
images.get_shape().num_elements())
self.assertEqual(output.shape[0], images.get_shape()[0])
class BatchNormTest(tf.test.TestCase):
def testCreateOp(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
output = ops.batch_norm(images)
self.assertTrue(output.op.name.startswith('BatchNorm/batchnorm'))
self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
def testCreateVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images)
beta = variables.get_variables_by_name('beta')[0]
self.assertEquals(beta.op.name, 'BatchNorm/beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEquals(gamma, [])
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testCreateVariablesWithScale(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scale=True)
beta = variables.get_variables_by_name('beta')[0]
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEquals(beta.op.name, 'BatchNorm/beta')
self.assertEquals(gamma.op.name, 'BatchNorm/gamma')
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testCreateVariablesWithoutCenterWithScale(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, center=False, scale=True)
beta = variables.get_variables_by_name('beta')
self.assertEquals(beta, [])
gamma = variables.get_variables_by_name('gamma')[0]
self.assertEquals(gamma.op.name, 'BatchNorm/gamma')
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testCreateVariablesWithoutCenterWithoutScale(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, center=False, scale=False)
beta = variables.get_variables_by_name('beta')
self.assertEquals(beta, [])
gamma = variables.get_variables_by_name('gamma')
self.assertEquals(gamma, [])
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testMovingAverageVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scale=True)
moving_mean = tf.moving_average_variables()[0]
moving_variance = tf.moving_average_variables()[1]
self.assertEquals(moving_mean.op.name, 'BatchNorm/moving_mean')
self.assertEquals(moving_variance.op.name, 'BatchNorm/moving_variance')
def testUpdateOps(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
update_moving_mean = update_ops[0]
update_moving_variance = update_ops[1]
self.assertEquals(update_moving_mean.op.name,
'BatchNorm/AssignMovingAvg')
self.assertEquals(update_moving_variance.op.name,
'BatchNorm/AssignMovingAvg_1')
def testReuseVariables(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scale=True, scope='bn')
ops.batch_norm(images, scale=True, scope='bn', reuse=True)
beta = variables.get_variables_by_name('beta')
gamma = variables.get_variables_by_name('gamma')
self.assertEquals(len(beta), 1)
self.assertEquals(len(gamma), 1)
moving_vars = tf.get_collection('moving_vars')
self.assertEquals(len(moving_vars), 2)
def testReuseUpdateOps(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
ops.batch_norm(images, scope='bn')
self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 2)
ops.batch_norm(images, scope='bn', reuse=True)
self.assertEquals(len(tf.get_collection(ops.UPDATE_OPS_COLLECTION)), 4)
def testCreateMovingVars(self):
height, width = 3, 3
with self.test_session():
images = tf.random_uniform((5, height, width, 3), seed=1)
_ = ops.batch_norm(images, moving_vars='moving_vars')
moving_mean = tf.get_collection('moving_vars',
'BatchNorm/moving_mean')
self.assertEquals(len(moving_mean), 1)
self.assertEquals(moving_mean[0].op.name, 'BatchNorm/moving_mean')
moving_variance = tf.get_collection('moving_vars',
'BatchNorm/moving_variance')
self.assertEquals(len(moving_variance), 1)
self.assertEquals(moving_variance[0].op.name, 'BatchNorm/moving_variance')
def testComputeMovingVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = ops.batch_norm(images, decay=0.1)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
output = tf.identity(output)
# Initialize all variables
sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
for _ in range(10):
sess.run([output])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEvalMovingVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = ops.batch_norm(images, decay=0.1, is_training=False)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
output = tf.identity(output)
# Initialize all variables
sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# Simulate assigment from saver restore.
init_assigns = [tf.assign(moving_mean, expected_mean),
tf.assign(moving_variance, expected_var)]
sess.run(init_assigns)
for _ in range(10):
sess.run([output], {images: np.random.rand(*image_shape)})
mean = moving_mean.eval()
variance = moving_variance.eval()
# Although we feed different images, the moving_mean and moving_variance
# shouldn't change.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testReuseVars(self):
height, width = 3, 3
with self.test_session() as sess:
image_shape = (10, height, width, 3)
image_values = np.random.rand(*image_shape)
expected_mean = np.mean(image_values, axis=(0, 1, 2))
expected_var = np.var(image_values, axis=(0, 1, 2))
images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
output = ops.batch_norm(images, decay=0.1, is_training=False)
update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
with tf.control_dependencies(update_ops):
output = tf.identity(output)
# Initialize all variables
sess.run(tf.global_variables_initializer())
moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 3)
self.assertAllClose(variance, [1] * 3)
# Simulate assigment from saver restore.
init_assigns = [tf.assign(moving_mean, expected_mean),
tf.assign(moving_variance, expected_var)]
sess.run(init_assigns)
for _ in range(10):
sess.run([output], {images: np.random.rand(*image_shape)})
mean = moving_mean.eval()
variance = moving_variance.eval()
# Although we feed different images, the moving_mean and moving_variance
# shouldn't change.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
if __name__ == '__main__':
tf.test.main()
| [
"fafaoc@me.com"
] | fafaoc@me.com |
e063920acaa40258008dba8ae5ed79c9bd2b66b7 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/ENTERASYS-VLAN-AUTHORIZATION-MIB.py | c846ff1eb1ce291ffe2d355f4fb5cea046a7128a | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 12,071 | py | #
# PySNMP MIB module ENTERASYS-VLAN-AUTHORIZATION-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ENTERASYS-VLAN-AUTHORIZATION-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:04:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint")
dot1dBasePortEntry, = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dBasePortEntry")
etsysModules, = mibBuilder.importSymbols("ENTERASYS-MIB-NAMES", "etsysModules")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
MibIdentifier, Bits, NotificationType, IpAddress, TimeTicks, Counter64, iso, Integer32, Counter32, ObjectIdentity, Unsigned32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Bits", "NotificationType", "IpAddress", "TimeTicks", "Counter64", "iso", "Integer32", "Counter32", "ObjectIdentity", "Unsigned32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
etsysVlanAuthorizationMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48))
etsysVlanAuthorizationMIB.setRevisions(('2004-06-02 19:22',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setRevisionsDescriptions(('The initial version of this MIB module',))
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setLastUpdated('200406021922Z')
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setOrganization('Enterasys Networks, Inc')
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setContactInfo('Postal: Enterasys Networks, Inc. 50 Minuteman Rd. Andover, MA 01810-1008 USA Phone: +1 978 684 1000 E-mail: support@enterasys.com WWW: http://www.enterasys.com')
if mibBuilder.loadTexts: etsysVlanAuthorizationMIB.setDescription("This MIB module defines a portion of the SNMP MIB under Enterasys Networks' enterprise OID pertaining to proprietary extensions to the IETF Q-BRIDGE-MIB, as specified in RFC2674, pertaining to VLAN authorization, as specified in RFC3580. Specifically, the enabling and disabling of support for the VLAN Tunnel-Type attribute returned from a RADIUS authentication, and how that attribute is applied to the port which initiated the authentication.")
class VlanAuthEgressStatus(TextualConvention, Integer32):
description = 'The possible egress configurations which may be applied in response to a successful authentication. none(1) No egress manipulation will be made. tagged(2) The authenticating port will be added to the current egress for the VLAN-ID returned. untagged(3) The authenticating port will be added to the current untagged egress for the VLAN-ID returned. dynamic(4) The authenticating port will use information returned in the authentication response to modify the current egress lists.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("none", 1), ("tagged", 2), ("untagged", 3), ("dynamic", 4))
etsysVlanAuthorizationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1))
etsysVlanAuthorizationSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 1))
etsysVlanAuthorizationPorts = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2))
etsysVlanAuthorizationEnable = MibScalar((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 1, 1), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysVlanAuthorizationEnable.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationEnable.setDescription('The enable/disable state for the VLAN authorization feature. When disabled, no modifications to the VLAN attributes related to packet switching should be enforced.')
etsysVlanAuthorizationTable = MibTable((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1), )
if mibBuilder.loadTexts: etsysVlanAuthorizationTable.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationTable.setDescription('Extensions to the table that contains information about every port that is associated with this transparent bridge.')
etsysVlanAuthorizationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1), )
dot1dBasePortEntry.registerAugmentions(("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationEntry"))
etsysVlanAuthorizationEntry.setIndexNames(*dot1dBasePortEntry.getIndexNames())
if mibBuilder.loadTexts: etsysVlanAuthorizationEntry.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationEntry.setDescription('A list of extensions that support the management of proprietary features for each port of a transparent bridge. This is indexed by dot1dBasePort.')
etsysVlanAuthorizationStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 1), EnabledStatus().clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysVlanAuthorizationStatus.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationStatus.setDescription('The enabled/disabled status for the application of VLAN authorization on this port, if disabled, the information returned in the VLAN-Tunnel-Type from the authentication will not be applied to the port (although it should be represented in this table). If enabled, those results will be applied to the port.')
etsysVlanAuthorizationAdminEgress = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 2), VlanAuthEgressStatus().clone('untagged')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: etsysVlanAuthorizationAdminEgress.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationAdminEgress.setDescription('Controls the modification of the current vlan egress list (of the vlan returned in the VLAN-Tunnel-Type, and reported by etsysVlanAuthorizationVlanID) upon successful authentication in the following manner: none(1) No egress manipulation will be made. tagged(2) The authenticating port will be added to the current egress for the VLAN-ID returned. untagged(3) The authenticating port will be added to the current untagged egress for the VLAN-ID returned. dynamic(4) The authenticating port will use information returned in the authentication response to modify the current egress lists. This value is supported only if the device supports a mechanism through which the egress status may be returned through the RADIUS response. Should etsysVlanAuthorizationEnable become disabled, etsysVlanAuthorizationStatus become disabled for a port, or should etsysVlanAuthorizationVlanID become 0 or 4095, all effect on the port egress MUST be removed.')
etsysVlanAuthorizationOperEgress = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 3), VlanAuthEgressStatus().clone('none')).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysVlanAuthorizationOperEgress.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationOperEgress.setDescription('Reports the current state of modification to the current vlan egress list (of the vlan returned in the VLAN-Tunnel-Type) upon successful authentication, if etsysVlanAuthorizationStatus is enabled, in the following manner: none(1) No egress manipulation will be made. tagged(2) The authenticating port will be added to the current egress for the VLAN-ID returned. untagged(3) The authenticating port will be added to the current untagged egress for the VLAN-ID returned. The purpose of this leaf is to report, specifically when etsysVlanAuthorizationAdminEgress has been set to dynamic(4), the currently enforced egress modification. If the port is unauthenticated, or no VLAN-ID has been applied, this leaf should return none(1).')
etsysVlanAuthorizationVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 4094), ValueRangeConstraint(4095, 4095), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: etsysVlanAuthorizationVlanID.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationVlanID.setDescription('The 12 bit VLAN identifier for a given port, used to override the PVID of the given port, obtained as a result of an authentication. A value of zero indicates that there is no authenticated VLAN ID for the given port. Should a port become unauthenticated this value MUST be returned to zero. A value of 4095 indicates that a the port has been authenticated, but that the VLAN returned could not be applied to the port (possibly because of resource constraints or misconfiguration). In this instance, the original PVID should still be applied. Should the feature become disabled or the session terminate, all effect on the Port VLAN ID MUST be removed.')
etsysVlanAuthorizationConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2))
etsysVlanAuthorizationGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 1))
etsysVlanAuthorizationCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 2))
etsysVlanAuthorizationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 1, 1)).setObjects(("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationEnable"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationStatus"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationAdminEgress"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationOperEgress"), ("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationVlanID"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysVlanAuthorizationGroup = etsysVlanAuthorizationGroup.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationGroup.setDescription('A collection of objects relating to VLAN Authorization.')
etsysVlanAuthorizationCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 5624, 1, 2, 48, 2, 2, 1)).setObjects(("ENTERASYS-VLAN-AUTHORIZATION-MIB", "etsysVlanAuthorizationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
etsysVlanAuthorizationCompliance = etsysVlanAuthorizationCompliance.setStatus('current')
if mibBuilder.loadTexts: etsysVlanAuthorizationCompliance.setDescription('The compliance statement for devices that support the Enterasys VLAN Authorization MIB.')
mibBuilder.exportSymbols("ENTERASYS-VLAN-AUTHORIZATION-MIB", etsysVlanAuthorizationVlanID=etsysVlanAuthorizationVlanID, etsysVlanAuthorizationGroup=etsysVlanAuthorizationGroup, etsysVlanAuthorizationEnable=etsysVlanAuthorizationEnable, etsysVlanAuthorizationOperEgress=etsysVlanAuthorizationOperEgress, etsysVlanAuthorizationAdminEgress=etsysVlanAuthorizationAdminEgress, etsysVlanAuthorizationConformance=etsysVlanAuthorizationConformance, VlanAuthEgressStatus=VlanAuthEgressStatus, etsysVlanAuthorizationPorts=etsysVlanAuthorizationPorts, etsysVlanAuthorizationStatus=etsysVlanAuthorizationStatus, etsysVlanAuthorizationCompliance=etsysVlanAuthorizationCompliance, etsysVlanAuthorizationMIB=etsysVlanAuthorizationMIB, etsysVlanAuthorizationGroups=etsysVlanAuthorizationGroups, etsysVlanAuthorizationObjects=etsysVlanAuthorizationObjects, etsysVlanAuthorizationTable=etsysVlanAuthorizationTable, etsysVlanAuthorizationSystem=etsysVlanAuthorizationSystem, etsysVlanAuthorizationEntry=etsysVlanAuthorizationEntry, etsysVlanAuthorizationCompliances=etsysVlanAuthorizationCompliances, PYSNMP_MODULE_ID=etsysVlanAuthorizationMIB)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
00ff92f5206a95948024ec7a6e5ea5fa74bdedc7 | d5f31dbe958d5e8ddcf0dd042050925b5206c7c7 | /.vscode/寻找空间中的向量投影.py | 23e5f2dd1e0d29100620dc3deb4baaeee142ffec | [] | no_license | dertaek/a-b-test-projects | b88910ffca421b8b5d3c47d84142ebe6fa6f0239 | 5637b833064b8992d9eb7ba115477b7a9723d492 | refs/heads/master | 2023-03-17T00:45:30.337383 | 2020-10-26T05:26:11 | 2020-10-26T05:26:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | # 利用矩阵来描述投影。
| [
"w664578037@gmial.com"
] | w664578037@gmial.com |
9258811529068e0ef737d4531c5f0d6ea7426561 | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /leetcode/1692.number-of-ways-to-reorder-array-to-get-same-bst/1692.number-of-ways-to-reorder-array-to-get-same-bst.py | 00644cb83012c5b2e15d2232e9f6f7f861427b4f | [] | no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | class Solution:
def numOfWays(self, nums: List[int]) -> int:
| [
"guibasconti@gmail.com"
] | guibasconti@gmail.com |
e19cb658c575b3bf49becb8695d95500b966fddc | 0967182e0b2c59448305870aaa193e051dd0eafa | /visualizer.py | 79f1191df0144cfdc55d0ebf0481a7b094b9d15b | [] | no_license | DT021/fake-tradingview | 43dcd483328193fb7d401b783bfa390c02c539d2 | 4c1c2ba1a58263c85545ac11abff555fa747c09a | refs/heads/master | 2023-01-30T22:15:48.790656 | 2020-12-04T15:06:23 | 2020-12-04T15:06:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | import datetime
import logging
import time
import numpy as np
import pyqtgraph as pg
from pyqtgraph import QtCore, QtGui
from pyqtgraph.dockarea import *
from candlestickItem import CandlestickItem
from utils import Worker, logger
from volumeProfileItem import VolumeProfileItem
class Visualizer(DockArea):
def __init__(self, parent):
super().__init__()
self.db = parent.db
# Candlestick init
self.candlestick = CandlestickItem(self.db)
self.volumeProfile = VolumeProfileItem(self.db)
self.volumeProfile.onUpdate.connect(self.candlestick.update)
self.candlestickWidget = pg.PlotWidget(
self, axisItems={"bottom": pg.DateAxisItem()}
)
self.candlestickWidget.addItem(self.candlestick)
self.candlestickWidget.addItem(self.volumeProfile)
self.candlestickWidget.setMouseEnabled(x=True, y=False)
self.candlestickWidget.enableAutoRange(x=False, y=True)
self.candlestickWidget.setAutoVisible(x=False, y=True)
self.candlestickWidget.showAxis("right")
self.candlestickWidget.hideAxis("left")
self.candlestickWidget.showGrid(True, True, 0.2)
self.candlestickWidget.scene().sigMouseMoved.connect(
self.candlestick.onMouseMoved
)
# Add dock
self.d = Dock("OHLC", widget=self.candlestickWidget)
self.addDock(self.d)
def setIndex(self, index):
worker = Worker(self.candlestick.setIndex, index)
QtCore.QThreadPool.globalInstance().start(worker)
self.volumeProfile.removeAll()
def setInterval(self, interval):
worker = Worker(self.candlestick.setInterval, interval)
QtCore.QThreadPool.globalInstance().start(worker)
def refresh(self):
self.candlestick.refresh()
| [
"minh020698@gmail.com"
] | minh020698@gmail.com |
39b208a82a1ddf4a4bdc0162879768521bb9893d | 30b0b79b5d5258aefbeb4faf129483fae456c9fa | /shoe/models.py | 40a94f5d75503d82fd4a3ab753daf5b3f513b6dc | [] | no_license | arturAdr/easyResource | bcee8fefeffb13bc2de648a2d0220eb3dc1b1d71 | ffff2818c481191a0133b8b44b9b3e53f9de9e94 | refs/heads/master | 2020-06-10T08:44:01.352198 | 2019-06-30T02:19:45 | 2019-06-30T02:19:45 | 193,625,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import models
from EasyResource.fields import JSONSchemaField
class Shoe(models.Model):
sku = models.CharField(max_length=100, unique=True)
name = models.CharField(max_length=100)
details = models.CharField(max_length=5000)
informations = JSONField()
tags = ArrayField(models.CharField(max_length=200), blank=True)
price = models.FloatField()
sizes = JSONSchemaField(schema = {
"type": "array",
"items": {
"type": "object",
"required": [
"size",
"available_quantity"
],
"additionalProperties": False,
"properties": {
"size": {
"type": "integer",
},
"available_quantity": {
"type": "integer"
}
}
}
})
class Meta:
verbose_name = u'Shoe'
verbose_name_plural = u'Shoes'
def __str__(self):
return self.name | [
"artur.adr@hotmail.com"
] | artur.adr@hotmail.com |
1e4bc752b2c1a4e95cfc85a70366502bdad4f7cf | dfc4dc5d823dada86216bc7df451127bffab00bb | /authors/apps/products/views.py | f7e50a3347930b55f6672439a7e79b8ee513a0c9 | [
"BSD-3-Clause"
] | permissive | hoslack/jua-kali_Backend | 311250360574495052adab9267dc7c07f48ba0e7 | e0e92aa0287c4a17b303fdde941f457b28c51223 | refs/heads/master | 2022-12-21T02:17:35.808370 | 2019-02-05T12:26:05 | 2019-02-05T12:26:05 | 158,075,591 | 0 | 0 | BSD-3-Clause | 2022-12-08T01:20:29 | 2018-11-18T11:13:48 | Python | UTF-8 | Python | false | false | 374 | py | from .models import Product
from .serializers import ProductSerializer
from rest_framework import generics
class ProductList(generics.ListCreateAPIView):
queryset = Product.objects.all()
serializer_class = ProductSerializer
class ProductDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Product.objects.all()
serializer_class = ProductSerializer
| [
"hoslackochieng@gmail.com"
] | hoslackochieng@gmail.com |
315d23bf96cfe201a6c72c58d0333896da2bea03 | 649eabe3d4bef9c866c2884474f58c997a64f8d5 | /papers.py | 4dec4263add2da77a2e1abc163a3cbb8e88b5b83 | [] | no_license | ValeriaFakhrutd/Memory_model | 091c78a537b5ea4f7391c0d686c976711f43d339 | 62d787eb6e5d02899c727d28d401fb0d169ebede | refs/heads/master | 2022-12-07T12:31:54.710748 | 2020-09-03T21:25:13 | 2020-09-03T21:25:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,178 | py | """
=== Module Description ===
This module contains a new class, PaperTree, which is used to model data on
publications in a particular area of Computer Science Education research.
This data is adapted from a dataset presented at SIGCSE 2019.
You can find the full dataset here: https://www.brettbecker.com/sigcse2019/
Although this data is very different from filesystem data, it is still
hierarchical. This means we are able to model it using a TMTree subclass,
and we can then run it through our treemap visualisation tool to get a nice
interactive graphical representation of this data.
"""
import csv
from typing import List, Dict
from tm_trees import TMTree
# Filename for the dataset
DATA_FILE = 'cs1_papers.csv'
class PaperTree(TMTree):
"""A tree representation of Computer Science Education research paper data.
=== Private Attributes ===
_authors:
authors of the paper, does not keep any information for categories.
_doi:
link of the paper, '' for the categories
_by_year:
stores in formation if sorting is doing by year or not.
These should store information about this paper's <authors> and <doi>.
=== Inherited Attributes ===
rect:
The pygame rectangle representing this node in the treemap
visualization.
data_size:
The size of the data represented by this tree.
_colour:
The RGB colour value of the root of this tree.
_name:
The root value of this tree, or None if this tree is empty.
_subtrees:
The subtrees of this tree.
_parent_tree:
The parent tree of this tree; i.e., the tree that contains this tree
as a subtree, or None if this tree is not part of a larger tree.
_expanded:
Whether or not this tree is considered expanded for visualization.
=== Representation Invariants ===
- All TMTree RIs are inherited.
"""
_authors: str
_doi: str
_by_year: bool
def __init__(self, name: str, subtrees: List[TMTree], authors: str = '',
doi: str = '', citations: int = 0, by_year: bool = True,
all_papers: bool = False) -> None:
"""Initialize a new PaperTree with the given <name> and <subtrees>,
<authors> and <doi>, and with <citations> as the size of the data.
If <all_papers> is True, then this tree is to be the root of the paper
tree. In that case, load data about papers from DATA_FILE to build the
tree.
If <all_papers> is False, Do NOT load new data.
<by_year> indicates whether or not the first level of subtrees should be
the years, followed by each category, subcategory, and so on. If
<by_year> is False, then the year in the dataset is simply ignored.
"""
if subtrees == []:
TMTree.__init__(self, name, subtrees, citations) # i.e our file is a
self._doi = doi
self._authors = authors
# self._citation = citations ### Data_size
self._by_year = by_year
if not all_papers and subtrees != []:
TMTree.__init__(self, name, subtrees, citations)
self._doi = doi
self._authors = authors
self._by_year = by_year
if all_papers:
x = _get_paper_list(by_year)
subtrees = _build_tree_from_dict(x)
TMTree.__init__(self, name, subtrees, citations)
self._doi = doi
self._authors = authors
self._by_year = by_year
def get_separator(self) -> str:
"""Return the file separator for this OS.
"""
return ": "
def get_suffix(self) -> str:
"""Return the final descriptor of this tree.
"""
if len(self._subtrees) == 0:
return '(paper)'
else:
return '(category)'
def _build_tree_from_dict(nested_dict: Dict) -> List[PaperTree]:
"""Return a list of trees from the nested dictionary <nested_dict>.
"""
lst = []
for items in nested_dict:
if nested_dict[items] == {}:
temp_tree = PaperTree(items[1], [],
items[0], items[2],
items[3],
False, False)
# put here data for authors, size ect
lst.append(temp_tree)
else:
temp_tree = PaperTree(items,
_build_tree_from_dict(nested_dict[items]),
by_year=False, all_papers=False)
temp_tree.update_data_sizes()
lst.append(temp_tree)
return lst
def _get_paper_list(by_year: bool) -> dict:
"""
'hey'
"""
dic = {}
with open(DATA_FILE, newline='') as csv_file:
csv_file.readline()
reader = csv.reader(csv_file)
for line in reader:
author, title, year, categories, url, size = line
size = int(size)
# year = int(year)
categories = categories.split(":")
for i in range(len(categories)):
categories[i] = categories[i].strip()
tup1 = (author, title, url, size)
categories.append(tup1)
if by_year:
categories.insert(0, year)
new = categories
dic = _convert_dict(new, dic)
# print(dic)
csv_file.close()
return dic
def _convert_dict(lst: List, dics: Dict) -> Dict:
if len(lst) == 0:
pass
elif len(lst) == 1:
if lst[0] in dics:
pass
else:
d = {lst[0]: {}}
dics.update(d)
else:
if lst[0] in dics:
dics[lst[0]] = _convert_dict(lst[1:], dics[lst[0]])
else:
dics[lst[0]] = _convert_dict(lst[1:], {})
return dics
if __name__ == '__main__':
import python_ta
python_ta.check_all(config={
'allowed-import-modules': ['python_ta', 'typing', 'csv', 'tm_trees'],
'allowed-io': ['_load_papers_to_dict', '_get_paper_list'],
'max-args': 8
})
# x = _get_paper_list()
# y = _build_tree_from_dict(x)
# print(y)
| [
"valeriia.fakhrutdinova@mail.utoronto.ca"
] | valeriia.fakhrutdinova@mail.utoronto.ca |
fc72058027cff3d6df1073e107bb3a426e164f7b | 85b6e009c45f2dd530d8ae186feb7e6e67d076a8 | /cohesity_management_sdk/models/protection_job_request.py | 3109e3d98f4406b033242dbb266e3567bd18c46e | [
"MIT"
] | permissive | priyambiswas0/management-sdk-python | 4a60153b038d0a04de02f2308362a2531b0ff9cb | 5807c85e003f271ce069b52529b31abfd08ec153 | refs/heads/master | 2021-10-20T05:43:34.626369 | 2018-05-22T06:04:20 | 2019-02-25T23:56:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,335 | py | # Copyright 2019 Cohesity Inc.
# -*- coding: utf-8 -*-
import cohesity_management_sdk.models.alerting_config
import cohesity_management_sdk.models.cloud_parameters
import cohesity_management_sdk.models.environment_specific_common_job_parameters
import cohesity_management_sdk.models.time_of_day
import cohesity_management_sdk.models.indexing_policy
import cohesity_management_sdk.models.backup_script
import cohesity_management_sdk.models.remote_adapter
import cohesity_management_sdk.models.source_special_parameters
class ProtectionJobRequest(object):
"""Implementation of the 'Protection Job Request.' model.
Specifies information about a Protection Job.
Attributes:
abort_in_blackout_period (bool): If true, the Cohesity Cluster aborts
any currently executing Job Runs of this Protection Job when a
blackout period specified for this Job starts, even if the Job Run
started before the blackout period began. If false, a Job Run
continues to execute, if the Job Run started before the blackout
period starts.
alerting_config (AlertingConfig): Specifies optional settings for
alerting.
alerting_policy (list of AlertingPolicyEnum): Array of Job Events.
During Job Runs, the following Job Events are generated: 1) Job
succeeds 2) Job fails 3) Job violates the SLA These Job Events can
cause Alerts to be generated. 'kSuccess' means the Protection Job
succeeded. 'kFailure' means the Protection Job failed.
'kSlaViolation' means the Protection Job took longer than the time
period specified in the SLA.
cloud_parameters (CloudParameters): Specifies Cloud parameters that
are applicable to all Protection Sources in a Protection Job in
certain scenarios.
continue_on_quiesce_failure (bool): Whether to continue backing up on
quiesce failure.
dedup_disabled_source_ids (list of long|int): List of source ids for
which source side dedup is disabled from the backup job.
description (string): Specifies a text description about the
Protection Job.
end_time_usecs (long|int): Specifies the epoch time (in microseconds)
after which the Protection Job becomes dormant.
environment (Environment10Enum): Specifies the environment type (such
as kVMware or kSQL) of the Protection Source this Job is
protecting. Supported environment types such as 'kView', 'kSQL',
'kVMware', etc. NOTE: 'kPuppeteer' refers to Cohesity's Remote
Adapter. 'kVMware' indicates the VMware Protection Source
environment. 'kHyperV' indicates the HyperV Protection Source
environment. 'kSQL' indicates the SQL Protection Source
environment. 'kView' indicates the View Protection Source
environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter.
'kPhysical' indicates the physical Protection Source environment.
'kPure' indicates the Pure Storage Protection Source environment.
'kAzure' indicates the Microsoft's Azure Protection Source
environment. 'kNetapp' indicates the Netapp Protection Source
environment. 'kAgent' indicates the Agent Protection Source
environment. 'kGenericNas' indicates the Genreric Network Attached
Storage Protection Source environment. 'kAcropolis' indicates the
Acropolis Protection Source environment. 'kPhsicalFiles' indicates
the Physical Files Protection Source environment. 'kIsilon'
indicates the Dell EMC's Isilon Protection Source environment.
'kKVM' indicates the KVM Protection Source environment. 'kAWS'
indicates the AWS Protection Source environment. 'kExchange'
indicates the Exchange Protection Source environment. 'kHyperVVSS'
indicates the HyperV VSS Protection Source environment. 'kOracle'
indicates the Oracle Protection Source environment. 'kGCP'
indicates the Google Cloud Platform Protection Source environment.
'kFlashBlade' indicates the Flash Blade Protection Source
environment. 'kAWSNative' indicates the AWS Native Protection
Source environment. 'kVCD' indicates the VMware's Virtual cloud
Director Protection Source environment. 'kO365' indicates the
Office 365 Protection Source environment. 'kO365Outlook' indicates
Office 365 outlook Protection Source environment. 'kHyperFlex'
indicates the Hyper Flex Protection Source environment.
'kGCPNative' indicates the GCP Native Protection Source
environment. 'kAzureNative' indicates the Azure Native Protection
Source environment.
environment_parameters (EnvironmentSpecificCommonJobParameters):
Specifies additional parameters that are common to all Protection
Sources in a Protection Job created for a particular environment
type.
exclude_source_ids (list of long|int): Array of Excluded Source
Objects. List of Object ids from a Protection Source that should
not be protected and are excluded from being backed up by the
Protection Job. Leaf and non-leaf Objects may be in this list and
an Object in this list must have an ancestor in the sourceId
list.
exclude_vm_tag_ids (list of long|int): Array of Arrays of VM Tag Ids
that Specify VMs to Exclude. Optionally specify a list of VMs to
exclude from protecting by listing Protection Source ids of VM
Tags in this two dimensional array. Using this two dimensional
array of Tag ids, the Cluster generates a list of VMs to exclude
from protecting, which are derived from intersections of the inner
arrays and union of the outer array, as shown by the following
example. For example a Datacenter is selected to be protected but
you want to exclude all the 'Former Employees' VMs in the East and
West but keep all the VMs for 'Former Employees' in the South
which are also stored in this Datacenter, by specifying the
following tag id array: [ [1000, 2221], [1000, 3031] ], where 1000
is the 'Former Employee' VM Tag id, 2221 is the 'East' VM Tag id
and 3031 is the 'West' VM Tag id. The first inner array [1000,
2221] produces a list of VMs that are both tagged with 'Former
Employees' and 'East' (an intersection). The second inner array
[1000, 3031] produces a list of VMs that are both tagged with
'Former Employees' and 'West' (an intersection). The outer array
combines the list of VMs from the two inner arrays. The list of
resulting VMs are excluded from being protected this Job.
full_protection_sla_time_mins (long|int): If specified, this setting
is number of minutes that a Job Run of a Full (no CBT) backup
schedule is expected to complete, which is known as a
Service-Level Agreement (SLA). A SLA violation is reported when
the run time of a Job Run exceeds the SLA time period specified
for this backup schedule.
full_protection_start_time (TimeOfDay): Specifies the time of day to
start the Full Protection Schedule. This is optional and only
applicable if the Protection Policy defines a monthly or a daily
Full (no CBT) Protection Schedule. Default value is 02:00 AM.
deprecated: true
incremental_protection_sla_time_mins (long|int): If specified, this
setting is number of minutes that a Job Run of a CBT-based backup
schedule is expected to complete, which is known as a
Service-Level Agreement (SLA). A SLA violation is reported when
the run time of a Job Run exceeds the SLA time period specified
for this backup schedule.
incremental_protection_start_time (TimeOfDay): Specifies the time of
day to start the CBT-based Protection Schedule. This is optional
and only applicable if the Protection Policy defines a monthly or
a daily CBT-based Protection Schedule. Default value is 02:00 AM.
deprecated: true
indexing_policy (IndexingPolicy): Specifies settings for indexing
files found in an Object (such as a VM) so these files can be
searched and recovered. This also specifies inclusion and
exclusion rules that determine the directories to index.
leverage_storage_snapshots (bool): Specifies whether to leverage the
storage array based snapshots for this backup job. To leverage
storage snapshots, the storage array has to be registered as a
source. If storage based snapshots can not be taken, job will
fallback to the default backup method.
leverage_storage_snapshots_for_hyperflex (bool): Specifies whether to
leverage Hyperflex as the storage snapshot array
name (string): Specifies the name of the Protection Job.
parent_source_id (long|int): Specifies the id of the registered
Protection Source that is the parent of the Objects that may be
protected by this Job. For example when a vCenter Server is
registered on a Cohesity Cluster, the Cohesity Cluster assigns a
unique id to this field that represents the vCenter Server.
perform_source_side_dedup (bool): Specifies whether source side dedupe
should be performed or not.
policy_id (string): Specifies the unique id of the Protection Policy
associated with the Protection Job. The Policy provides retry
settings, Protection Schedules, Priority, SLA, etc. The Job
defines the Storage Domain (View Box), the Objects to Protect (if
applicable), Start Time, Indexing settings, etc.
post_backup_script (BackupScript): Specifies the script associated
with the backup job. This field must be specified for 'kPhysical'
jobs. This script will be executed post backup run.
pre_backup_script (BackupScript): Specifies the script associated with
the backup job. This field must be specified for 'kPhysical' jobs.
This script will be executed pre backup run. The 'remoteScript'
field will be used for remote adapter jobs and 'preBackupScript'
field will be used for 'kPhysical' jobs.
priority (PriorityEnum): Specifies the priority of execution for a
Protection Job. Cohesity supports concurrent backups but if the
number of Jobs exceeds the ability to process Jobs, the specified
priority determines the execution Job priority. This field also
specifies the replication priority. 'kLow' indicates lowest
execution priority for a Protection job. 'kMedium' indicates
medium execution priority for a Protection job. 'kHigh' indicates
highest execution priority for a Protection job.
qos_type (QosTypeEnum): Specifies the QoS policy type to use for this
Protection Job. 'kBackupHDD' indicates the Cohesity Cluster writes
data directly to the HDD tier for this Protection Job. This is the
recommended setting. 'kBackupSSD' indicates the Cohesity Cluster
writes data directly to the SSD tier for this Protection Job. Only
specify this policy if you need fast ingest speed for a small
number of Protection Jobs.
quiesce (bool): Indicates if the App-Consistent option is enabled for
this Job. If the option is enabled, the Cohesity Cluster quiesces
the file system and applications before taking
Application-Consistent Snapshots. VMware Tools must be installed
on the guest Operating System.
remote_script (RemoteAdapter): For a Remote Adapter 'kPuppeteer' Job,
this field specifies the settings about the remote script that
will be executed by this Job. Only specify this field for Remote
Adapter 'kPuppeteer' Jobs.
source_ids (list of long|int): Array of Protected Source Objects.
Specifies the list of Object ids from the Protection Source to
protect (or back up) by the Protection Job. An Object in this list
may be descendant of another Object in this list. For example a
Datacenter could be selected but its child Host excluded. However,
a child VM under the Host could be explicitly selected to be
protected. Both the Datacenter and the VM are listed.
source_special_parameters (list of SourceSpecialParameters): Array of
Special Source Parameters. Specifies additional settings that can
apply to a subset of the Sources listed in the Protection Job. For
example, you can specify a list of files and folders to protect
instead of protecting the entire Physical Server. If this field's
setting conflicts with environmentParameters, then this setting
will be used.
start_time (TimeOfDay): Specifies the time of day to start the
Protection Schedule. This is optional and only applicable if the
Protection Policy defines a monthly or a daily Protection
Schedule. Default value is 02:00 AM.
timezone (string): Specifies the timezone to use when calculating time
for this Protection Job such as the Job start time. Specify the
timezone in the following format: "Area/Location", for example:
"America/New_York".
view_box_id (long|int): Specifies the Storage Domain (View Box) id
where this Job writes data.
view_name (string): For a Remote Adapter 'kPuppeteer' Job or a 'kView'
Job, this field specifies a View name that should be protected.
Specify this field when creating a Protection Job for the first
time for a View. If this field is specified, ParentSourceId,
SourceIds, and ExcludeSourceIds should not be specified.
vm_tag_ids (list of long|int): Array of Arrays of VMs Tags Ids that
Specify VMs to Protect. Optionally specify a list of VMs to
protect by listing Protection Source ids of VM Tags in this two
dimensional array. Using this two dimensional array of Tag ids,
the Cluster generates a list of VMs to protect which are derived
from intersections of the inner arrays and union of the outer
array, as shown by the following example. To protect only 'Eng'
VMs in the East and all the VMs in the West, specify the following
tag id array: [ [1101, 2221], [3031] ], where 1101 is the 'Eng' VM
Tag id, 2221 is the 'East' VM Tag id and 3031 is the 'West' VM Tag
id. The inner array [1101, 2221] produces a list of VMs that are
both tagged with 'Eng' and 'East' (an intersection). The outer
array combines the list from the inner array with list of VMs
tagged with 'West' (a union). The list of resulting VMs are
protected by this Job.
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"policy_id":'policyId',
"view_box_id":'viewBoxId',
"abort_in_blackout_period":'abortInBlackoutPeriod',
"alerting_config":'alertingConfig',
"alerting_policy":'alertingPolicy',
"cloud_parameters":'cloudParameters',
"continue_on_quiesce_failure":'continueOnQuiesceFailure',
"dedup_disabled_source_ids":'dedupDisabledSourceIds',
"description":'description',
"end_time_usecs":'endTimeUsecs',
"environment":'environment',
"environment_parameters":'environmentParameters',
"exclude_source_ids":'excludeSourceIds',
"exclude_vm_tag_ids":'excludeVmTagIds',
"full_protection_sla_time_mins":'fullProtectionSlaTimeMins',
"full_protection_start_time":'fullProtectionStartTime',
"incremental_protection_sla_time_mins":'incrementalProtectionSlaTimeMins',
"incremental_protection_start_time":'incrementalProtectionStartTime',
"indexing_policy":'indexingPolicy',
"leverage_storage_snapshots":'leverageStorageSnapshots',
"leverage_storage_snapshots_for_hyperflex":'leverageStorageSnapshotsForHyperflex',
"parent_source_id":'parentSourceId',
"perform_source_side_dedup":'performSourceSideDedup',
"post_backup_script":'postBackupScript',
"pre_backup_script":'preBackupScript',
"priority":'priority',
"qos_type":'qosType',
"quiesce":'quiesce',
"remote_script":'remoteScript',
"source_ids":'sourceIds',
"source_special_parameters":'sourceSpecialParameters',
"start_time":'startTime',
"timezone":'timezone',
"view_name":'viewName',
"vm_tag_ids":'vmTagIds'
}
def __init__(self,
name=None,
policy_id=None,
view_box_id=None,
abort_in_blackout_period=None,
alerting_config=None,
alerting_policy=None,
cloud_parameters=None,
continue_on_quiesce_failure=None,
dedup_disabled_source_ids=None,
description=None,
end_time_usecs=None,
environment=None,
environment_parameters=None,
exclude_source_ids=None,
exclude_vm_tag_ids=None,
full_protection_sla_time_mins=None,
full_protection_start_time=None,
incremental_protection_sla_time_mins=None,
incremental_protection_start_time=None,
indexing_policy=None,
leverage_storage_snapshots=None,
leverage_storage_snapshots_for_hyperflex=None,
parent_source_id=None,
perform_source_side_dedup=None,
post_backup_script=None,
pre_backup_script=None,
priority=None,
qos_type=None,
quiesce=None,
remote_script=None,
source_ids=None,
source_special_parameters=None,
start_time=None,
timezone=None,
view_name=None,
vm_tag_ids=None):
"""Constructor for the ProtectionJobRequest class"""
# Initialize members of the class
self.abort_in_blackout_period = abort_in_blackout_period
self.alerting_config = alerting_config
self.alerting_policy = alerting_policy
self.cloud_parameters = cloud_parameters
self.continue_on_quiesce_failure = continue_on_quiesce_failure
self.dedup_disabled_source_ids = dedup_disabled_source_ids
self.description = description
self.end_time_usecs = end_time_usecs
self.environment = environment
self.environment_parameters = environment_parameters
self.exclude_source_ids = exclude_source_ids
self.exclude_vm_tag_ids = exclude_vm_tag_ids
self.full_protection_sla_time_mins = full_protection_sla_time_mins
self.full_protection_start_time = full_protection_start_time
self.incremental_protection_sla_time_mins = incremental_protection_sla_time_mins
self.incremental_protection_start_time = incremental_protection_start_time
self.indexing_policy = indexing_policy
self.leverage_storage_snapshots = leverage_storage_snapshots
self.leverage_storage_snapshots_for_hyperflex = leverage_storage_snapshots_for_hyperflex
self.name = name
self.parent_source_id = parent_source_id
self.perform_source_side_dedup = perform_source_side_dedup
self.policy_id = policy_id
self.post_backup_script = post_backup_script
self.pre_backup_script = pre_backup_script
self.priority = priority
self.qos_type = qos_type
self.quiesce = quiesce
self.remote_script = remote_script
self.source_ids = source_ids
self.source_special_parameters = source_special_parameters
self.start_time = start_time
self.timezone = timezone
self.view_box_id = view_box_id
self.view_name = view_name
self.vm_tag_ids = vm_tag_ids
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
policy_id = dictionary.get('policyId')
view_box_id = dictionary.get('viewBoxId')
abort_in_blackout_period = dictionary.get('abortInBlackoutPeriod')
alerting_config = cohesity_management_sdk.models.alerting_config.AlertingConfig.from_dictionary(dictionary.get('alertingConfig')) if dictionary.get('alertingConfig') else None
alerting_policy = dictionary.get('alertingPolicy')
cloud_parameters = cohesity_management_sdk.models.cloud_parameters.CloudParameters.from_dictionary(dictionary.get('cloudParameters')) if dictionary.get('cloudParameters') else None
continue_on_quiesce_failure = dictionary.get('continueOnQuiesceFailure')
dedup_disabled_source_ids = dictionary.get('dedupDisabledSourceIds')
description = dictionary.get('description')
end_time_usecs = dictionary.get('endTimeUsecs')
environment = dictionary.get('environment')
environment_parameters = cohesity_management_sdk.models.environment_specific_common_job_parameters.EnvironmentSpecificCommonJobParameters.from_dictionary(dictionary.get('environmentParameters')) if dictionary.get('environmentParameters') else None
exclude_source_ids = dictionary.get('excludeSourceIds')
exclude_vm_tag_ids = dictionary.get('excludeVmTagIds')
full_protection_sla_time_mins = dictionary.get('fullProtectionSlaTimeMins')
full_protection_start_time = cohesity_management_sdk.models.time_of_day.TimeOfDay.from_dictionary(dictionary.get('fullProtectionStartTime')) if dictionary.get('fullProtectionStartTime') else None
incremental_protection_sla_time_mins = dictionary.get('incrementalProtectionSlaTimeMins')
incremental_protection_start_time = cohesity_management_sdk.models.time_of_day.TimeOfDay.from_dictionary(dictionary.get('incrementalProtectionStartTime')) if dictionary.get('incrementalProtectionStartTime') else None
indexing_policy = cohesity_management_sdk.models.indexing_policy.IndexingPolicy.from_dictionary(dictionary.get('indexingPolicy')) if dictionary.get('indexingPolicy') else None
leverage_storage_snapshots = dictionary.get('leverageStorageSnapshots')
leverage_storage_snapshots_for_hyperflex = dictionary.get('leverageStorageSnapshotsForHyperflex')
parent_source_id = dictionary.get('parentSourceId')
perform_source_side_dedup = dictionary.get('performSourceSideDedup')
post_backup_script = cohesity_management_sdk.models.backup_script.BackupScript.from_dictionary(dictionary.get('postBackupScript')) if dictionary.get('postBackupScript') else None
pre_backup_script = cohesity_management_sdk.models.backup_script.BackupScript.from_dictionary(dictionary.get('preBackupScript')) if dictionary.get('preBackupScript') else None
priority = dictionary.get('priority')
qos_type = dictionary.get('qosType')
quiesce = dictionary.get('quiesce')
remote_script = cohesity_management_sdk.models.remote_adapter.RemoteAdapter.from_dictionary(dictionary.get('remoteScript')) if dictionary.get('remoteScript') else None
source_ids = dictionary.get('sourceIds')
source_special_parameters = None
if dictionary.get('sourceSpecialParameters') != None:
source_special_parameters = list()
for structure in dictionary.get('sourceSpecialParameters'):
source_special_parameters.append(cohesity_management_sdk.models.source_special_parameters.SourceSpecialParameters.from_dictionary(structure))
start_time = cohesity_management_sdk.models.time_of_day.TimeOfDay.from_dictionary(dictionary.get('startTime')) if dictionary.get('startTime') else None
timezone = dictionary.get('timezone')
view_name = dictionary.get('viewName')
vm_tag_ids = dictionary.get('vmTagIds')
# Return an object of this model
return cls(name,
policy_id,
view_box_id,
abort_in_blackout_period,
alerting_config,
alerting_policy,
cloud_parameters,
continue_on_quiesce_failure,
dedup_disabled_source_ids,
description,
end_time_usecs,
environment,
environment_parameters,
exclude_source_ids,
exclude_vm_tag_ids,
full_protection_sla_time_mins,
full_protection_start_time,
incremental_protection_sla_time_mins,
incremental_protection_start_time,
indexing_policy,
leverage_storage_snapshots,
leverage_storage_snapshots_for_hyperflex,
parent_source_id,
perform_source_side_dedup,
post_backup_script,
pre_backup_script,
priority,
qos_type,
quiesce,
remote_script,
source_ids,
source_special_parameters,
start_time,
timezone,
view_name,
vm_tag_ids)
| [
"ashish@cohesity.com"
] | ashish@cohesity.com |
7b97ec507375533d56ca683bf1a913138e0a7955 | e52bf115107bc31cd812cb5573bfa85900ecfaff | /eval-parcial-primer-bimestre/Ejercicio_1.py | e12c51a086dfa575494f7ea564cfe9d2509f06cc | [] | no_license | cfjimbo/fp-utpl-18-evaluaciones | cd0cbc793cb11f0d297c9dd2c445991d2b183e9a | 0353656bae322848d7732edb39d7d7f25e1bb275 | refs/heads/master | 2020-03-14T04:43:58.676649 | 2018-05-01T06:36:55 | 2018-05-01T06:36:55 | 131,448,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | """Realizar un programa en Java que permita ingresar por teclado la longitud y la anchura de una
habitación, realizar los procesos respectivos que permita obtener la superficie de la misma, además
se debe presentar en pantalla el valor de la superficie, finalmente tomar en consideración que se
debe presentar el valor con 3 decimales
"""
longitud = float(input("Longitud de la habitacion: "))
ancho = float(input("Ancho de la habitacion: "))
superficie = longitud * ancho
print("La superficie de la habitacion es: {}\n".format(round(superficie, 3))) | [
"noreply@github.com"
] | cfjimbo.noreply@github.com |
e30926a419b5d166b02a76f3f5c8ed329de20e60 | ff9fedd28f7436ba9945421e061fd2e1dadbf5c3 | /Alogithms/Dijkstra/dijkstra.py | 3d1510e8e6c59b494d2b934513ca7381f575586b | [] | no_license | ritwikbadola/Empirical-Analysis-Of-Algorithms | 0ed1b9c2c92813d11af33405527a4ecced8b2845 | 7ffb7a03e9d356d5368d2d79a49a8dabf49ed6c7 | refs/heads/master | 2022-08-19T12:39:24.875859 | 2020-05-16T03:53:35 | 2020-05-16T03:53:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,979 | py | # Python program for Dijkstra's single
# source shortest path algorithm. The program is
# for adjacency matrix representation of the graph
# Library for INT_MAX
import sys
class Graph():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
def printSolution(self, dist):
print "Vertex \tDistance from Source"
for node in range(self.V):
print node, "\t", dist[node]
# A utility function to find the vertex with
# minimum distance value, from the set of vertices
# not yet included in shortest path tree
def minDistance(self, dist, sptSet):
# Initilaize minimum distance for next node
min = sys.maxint
# Search not nearest vertex not in the
# shortest path tree
for v in range(self.V):
if dist[v] < min and sptSet[v] == False:
min = dist[v]
min_index = v
return min_index
# Funtion that implements Dijkstra's single source
# shortest path algorithm for a graph represented
# using adjacency matrix representation
def dijkstra(self, src):
dist = [sys.maxint] * self.V
dist[src] = 0
sptSet = [False] * self.V
for cout in range(self.V):
# Pick the minimum distance vertex from
# the set of vertices not yet processed.
# u is always equal to src in first iteration
u = self.minDistance(dist, sptSet)
# Put the minimum distance vertex in the
# shotest path tree
sptSet[u] = True
# Update dist value of the adjacent vertices
# of the picked vertex only if the current
# distance is greater than new distance and
# the vertex in not in the shotest path tree
for v in range(self.V):
if self.graph[u][v] > 0 and sptSet[v] == False and \
dist[v] > dist[u] + self.graph[u][v]:
dist[v] = dist[u] + self.graph[u][v]
# self.printSolution(dist)
# Driver program
g = Graph(25)
g.graph = [ [0, 156, 0, 0, 246, 0, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 462, 0, 0, 171, 0, 157, 0, 363],
[156, 0, 323, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 323, 0, 151, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 151, 0, 0, 545, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[246, 0, 0, 0, 0, 174, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 545, 174, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[184, 0, 0, 0, 0, 0, 0, 83, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 192, 0, 0, 0],
[0, 0, 0, 0, 100, 0, 83, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 224, 0, 0, 209, 0, 0, 0, 0, 217, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 209, 0, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 116, 0, 180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180, 0, 157, 251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 157, 0, 342, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 251, 342, 0, 111, 208, 0, 0, 0, 0, 0, 382, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 217, 0, 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 208, 0, 0, 335, 462, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 335, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[462, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 462, 0, 0, 212, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 212, 0, 135, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 135, 0, 174, 0, 0, 0, 0],
[171, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 174, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 192, 0, 0, 0, 0, 0, 0, 382, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171, 0, 0],
[363, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ];
g.dijkstra(0);
# This code is contributed by Divyanshu Mehta
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.