blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5278d0ebc39489eb80a4b0a82ecaf609f72027a7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03813/s431513228.py | c4298e11e8a394fafce0121831f9fbfa51e6a6ab | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py |
def read_int():
return int(input().strip())
def read_ints():
return list(map(int, input().strip().split(' ')))
def solve():
x = read_int()
if x < 1200:
return 'ABC'
return 'ARC'
if __name__ == '__main__':
print(solve())
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a9280167358863b7781ffed296d8c21e28ee3189 | f0a1b45309785a89bf74eccb75e229fd38a0aaa8 | /math_utils.py | 48245f597c0819e491d64268d76f8b6fe85245c1 | [] | no_license | mikaelmello/python-rsa-implementation | f8f8ecb7f79d8808b4d9ed520c14666560315a2b | 7b501253e41fc4c9f07db02d73896c9c189c8ebf | refs/heads/master | 2020-09-21T13:06:19.271833 | 2020-04-07T18:51:35 | 2020-04-07T18:53:23 | 224,797,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | import secrets
import math
def extended_euclid(a, b):
"""Extended euclidean algorithm"""
s = 0
t = 1
r = b
old_s = 1
old_t = 0
old_r = a
while r != 0:
quotient = old_r // r
old_r, r = (r, old_r - quotient * r)
old_s, s = (s, old_s - quotient * s)
old_t, t = (t, old_t - quotient * t)
return old_r, old_s, old_t
def mrand(l, r):
"""Generates a random number between l and r, inclusive"""
sz = r - l + 1
return l + secrets.randbelow(sz)
def bits(a):
count = 0
while a > 0:
a = a // 2
count += 1
return count
def mulmod(a, b, c):
"""Returns (a*b)%c"""
return (a*b) % c
def fexp(num, exponent, mod):
"""Fast exponentiation, returns (num^exponent)%mod."""
res = 1
while exponent > 0:
if exponent % 2 == 1:
res = (res * num) % mod
exponent = exponent // 2
num = (num * num) % mod
return res
def is_prime(n):
"""Checks whether a number is prime"""
if n <= 1:
return False
if n <= 3:
return True
s = 0
d = n - 1
while(d % 2 == 0):
d = d // 2
s = s + 1
for k in range(64):
a = mrand(2, n)
x = fexp(a, d, n)
if x != 1 and x != n-1:
for r in range(1, s):
x = mulmod(x, x, n)
if x == 1:
return False
if x == n-1:
break
if x != n-1:
return False
return True
def gen_prime(bit_length):
prime = secrets.randbits(bit_length)
prime = prime | 1
prime = prime | (1 << (bit_length-1))
prime = prime | (1 << (bit_length-2))
while not is_prime(prime):
prime += 2
return prime
def get_large_primes():
"""Generates two large primes"""
n = mrand(246, 256)
base_num = 2**n
x = base_num
while(not is_prime(x)):
x = x + 1
while(not is_prime(base_num)):
base_num = base_num - 1
return (x, base_num)
| [
"git@mikaelmello.com"
] | git@mikaelmello.com |
d035e9fd898748e129cda959b3d579e13d600d7b | aae29f1c7c8eabda658a00e39f471377b6da0052 | /pygame_project/main.py | dec238616052d072703a724685d1660c382f7741 | [] | no_license | dreamerror/practice_projects | 52041645ba3c7421b9c7fb1a1f6b376915bd9fd9 | 9d4010e92cf82f49e7938995ee74a8365ef28c80 | refs/heads/master | 2023-06-27T20:33:13.936427 | 2021-07-31T03:48:11 | 2021-07-31T03:48:11 | 389,886,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,446 | py | import pygame
import random
import sys
from pygame.locals import DOUBLEBUF, KEYUP, K_RIGHT, K_LEFT, K_UP, K_DOWN, K_SPACE, QUIT
pygame.init()
NUM_SHAPES = 5
PUZZLE_COLUMNS = 6
PUZZLE_ROWS = 12
SHAPE_WIDTH = 50
SHAPE_HEIGHT = 50
FPS = 15
WINDOW_WIDTH = PUZZLE_COLUMNS * SHAPE_WIDTH
WINDOW_HEIGHT = PUZZLE_ROWS * SHAPE_HEIGHT + 75
BACKGROUND = pygame.image.load("images/background.png")
JEWEL_1 = pygame.image.load("images/jewel_1.png")
JEWEL_2 = pygame.image.load("images/jewel_2.png")
JEWEL_3 = pygame.image.load("images/jewel_3.png")
JEWEL_4 = pygame.image.load("images/jewel_4.png")
JEWEL_5 = pygame.image.load("images/jewel_5.png")
SHAPES_LIST = [JEWEL_1, JEWEL_2, JEWEL_3, JEWEL_4, JEWEL_5]
BLANK = pygame.image.load("images/background.png")
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
FONT_SIZE = 36
TEXT_OFFSET = 5
MINIMUM_MATCH = 3
SINGLE_POINTS = 1
DOUBLE_POINTS = 3
TRIPLE_POINTS = 9
EXTRA_LENGTH_POINTS = 0
RANDOM_POINTS = 1
FPS_CLOCK = pygame.time.Clock()
DISPLAY_SURFACE = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT), DOUBLEBUF)
pygame.display.set_caption("Puzzle")
def main():
global score
global selector
jewels_board = generate_random_board()
selector = (0, 0)
score = 0
last_move_time = pygame.time.get_ticks()
blit_board(jewels_board)
draw_selector(selector)
remove_matches(jewels_board, selector)
blit_board(jewels_board)
blit_score(score)
draw_selector(selector)
while True:
for event in pygame.event.get():
if event.type == KEYUP:
if event.key == K_RIGHT and selector[0] < (PUZZLE_COLUMNS - 2):
selector = (selector[0] + 1, selector[1])
if event.key == K_LEFT and selector[0] > 0:
selector = (selector[0] - 1, selector[1])
if event.key == K_DOWN and selector[1] < (PUZZLE_ROWS - 1):
selector = (selector[0], selector[1] + 1)
if event.key == K_UP and selector[1] > 0:
selector = (selector[0], selector[1] - 1)
if event.key == K_SPACE:
last_move_time = pygame.time.get_ticks()
swap_pieces(selector, jewels_board)
if find_matches(jewels_board):
remove_matches(jewels_board, selector)
else:
swap_pieces(selector, jewels_board)
if event.type == QUIT:
pygame.quit()
sys.exit()
blit_board(jewels_board)
blit_score(score)
draw_selector(selector)
pygame.display.update()
FPS_CLOCK.tick(FPS)
def generate_random_board():
return [[random.choice(SHAPES_LIST) for _ in range(PUZZLE_COLUMNS)] for _ in range(PUZZLE_ROWS)]
def blit_score(score):
font = pygame.font.Font(None, FONT_SIZE)
text = font.render("Score: " + str(score), True, BLACK)
DISPLAY_SURFACE.blit(text, (TEXT_OFFSET, WINDOW_HEIGHT - FONT_SIZE))
def blit_board(board):
DISPLAY_SURFACE.blit(BACKGROUND, (0, 0))
row_num = 0
for row in board:
column_num = 0
for shape in row:
DISPLAY_SURFACE.blit(shape, (SHAPE_WIDTH * column_num, SHAPE_HEIGHT * row_num))
column_num += 1
row_num += 1
def draw_selector(position):
top_left = (position[0] * SHAPE_WIDTH, position[1] * SHAPE_HEIGHT)
top_right = (top_left[0] + SHAPE_WIDTH * 2, top_left[1])
bottom_left = (top_left[0], top_left[1] + SHAPE_HEIGHT)
bottom_right = (top_right[0], top_right[1] + SHAPE_HEIGHT)
pygame.draw.lines(DISPLAY_SURFACE, WHITE, True, [top_left, top_right, bottom_right, bottom_left], 3)
def swap_pieces(position, board):
x, y = position
board[y][x + 1], board[y][x] = board[y][x], board[y][x + 1]
def remove_matches(board, selector):
matches = find_matches(board)
while matches:
score_matches(board, selector, matches)
clear_matches(board, matches)
refill_columns(board)
matches = find_matches(board)
selector = (0, 0)
def score_matches(board, selector, matches):
global score
player_matches = []
selector = (selector[1], selector[0])
for match in matches:
for position in match:
if (position == selector or position == (selector[0], selector[1] + 1)) and (not match in player_matches):
player_matches.append(match)
if len(player_matches) == 1:
score += SINGLE_POINTS
elif len(player_matches) == 2:
score += DOUBLE_POINTS
elif len(player_matches) == 3:
score += TRIPLE_POINTS
for match in player_matches:
score += int((len(match) - MINIMUM_MATCH) * EXTRA_LENGTH_POINTS)
for match in matches:
if match not in player_matches:
score += RANDOM_POINTS
def find_matches(board):
clear_list = []
for column in range(PUZZLE_COLUMNS):
length = 1
for row in range(1, PUZZLE_ROWS):
if board[row][column] == board[row - 1][column]:
length += 1
if not board[row][column] == board[row - 1][column]:
if length >= MINIMUM_MATCH:
match = []
for clearRow in range(row - length, row):
match.append((clearRow, column))
clear_list.append(match)
length = 1
if row == PUZZLE_ROWS - 1:
if length >= MINIMUM_MATCH:
match = []
for clearRow in range(row - (length - 1), row + 1):
match.append((clearRow, column))
clear_list.append(match)
for row in range(PUZZLE_ROWS):
length = 1
for column in range(1, PUZZLE_COLUMNS):
if board[row][column] == board[row][column - 1]:
length += 1
if not board[row][column] == board[row][column - 1]:
if length >= MINIMUM_MATCH:
match = []
for clear_column in range(column - length, column):
match.append((row, clear_column))
clear_list.append(match)
length = 1
if column == PUZZLE_COLUMNS - 1:
if length >= MINIMUM_MATCH:
match = []
for clear_column in range(column - (length - 1), column + 1):
match.append((row, clear_column))
clear_list.append(match)
return clear_list
def clear_matches(board, matches):
for match in matches:
for position in match:
row, column = position
board[row][column] = BLANK
def refill_columns(board):
for column in range(PUZZLE_COLUMNS):
for row in range(PUZZLE_ROWS):
if board[row][column] == BLANK:
test = 0
length = 0
while row + test < PUZZLE_ROWS and board[row + test][column] == BLANK:
length += 1
test += 1
for blank_row in range(row, PUZZLE_ROWS):
try:
board[blank_row][column] = board[blank_row + length][column]
except IndexError:
board[blank_row][column] = SHAPES_LIST[random.randrange(0, len(SHAPES_LIST))]
if __name__ == '__main__':
main()
| [
"66074257+dreamerror@users.noreply.github.com"
] | 66074257+dreamerror@users.noreply.github.com |
9e2010ab225e7382d1bcaaae37d358d2bdd39500 | 042962d67c8742d01f66f254375b1ceb9136d182 | /sensibleServer/__init__.py | b4047c4be8d5508123a5789445b9f26a212307ed | [] | no_license | Sensibility/sensibleServer | d8b0afefd846e31d454912f579067fc90526d046 | a4125fa36e9a47e3ea05287ab6b54ca22110eb0d | refs/heads/master | 2021-05-26T07:46:00.499527 | 2019-11-07T17:00:31 | 2019-11-07T17:00:31 | 127,950,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,868 | py | """
This package provides a simple http(s) server that serves content with optional cgi scripting available.
Usage: sensibleServer DOCUMENT_ROOT [ -c --enable-cgi ]
"""
__author__ = "ocket8888"
__version__ = "0.0.1"
import argparse
ROOT, CGI = None, None
def main() -> int:
"""
Runs the program.
"""
global ROOT, CGI, __version__
parser = argparse.ArgumentParser(description="A simple http(s) server that serves content with optional cgi scripting available.")
parser.add_argument("DOCUMENT_ROOT", type=str, help="The root of the web content to serve.")
parser.add_argument("-c", "--enable-cgi", action="store_const", const=True, default=False, help="Enable the running of python scripts as Common Gateway Interface scripts.")
parser.add_argument("-v", "--version", action="store_const", const=True, default=False, help="Print out the version number and exit.")
args = parser.parse_args()
if args.version:
print(__version__)
return 0
import os
from sys import stderr
ROOT = os.path.abspath(args.DOCUMENT_ROOT)
if not os.path.isdir(ROOT):
print("No such directory: '%s'" % ROOT, file=stderr)
return 1
os.chdir(ROOT)
CGI = args.enable_cgi
if CGI:
from . import server
os.environ['DOCUMENT_ROOT'] = ROOT
handler = server.CGIServer
with Server.HTTPServer(("", 8000), handler) as server:
print("Serving at port 8000")
try:
server.serve_forever()
except KeyboardInterrupt:
pass
else:
from .server import Server, RequestHandler
with Server(("", 8080), RequestHandler) as server:
print("Serving at port 8080", file=stderr)
if __debug__:
from time import time
print("II:", time(), "Directory Contents:", ' '.join(i for i in os.listdir(ROOT)), sep='\t', file=stderr)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
return 0
if __name__ == '__main__':
exit(main())
| [
"ocket8888@gmail.com"
] | ocket8888@gmail.com |
10ba62e4e2abefa1a7435eb6853c7606548f54aa | 92d5555325b328549b25d3c2af9b8c577d1511cd | /log/config.py | e612e8fe669bfae1ce5e9f6fbdc77cedebcd275c | [] | no_license | zhang29845987/tpshop_login_api_1 | a5d213ead57376fb1e2b91d0554b2bc6541f06f7 | 58a78a60e6378d960ad03c9de4a4f3a006078f89 | refs/heads/master | 2020-09-29T18:12:52.191624 | 2019-12-10T10:32:26 | 2019-12-10T10:32:26 | 227,091,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | import logging
import os
# 获取项目根目录
from logging.handlers import TimedRotatingFileHandler
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
print(os.path.dirname(os.path.abspath(__file__)))
# 初始化日志配置
def init_log_config():
#添加日志器 默认日志root
logger=logging.getLogger()
logger.setLevel(logging.INFO)
#处理器 输出到控制台
shl=logging.StreamHandler()
#处理器输出到文件到指定路径,when以小时为单位存文件,interval一个小时存一个文件,backcount默认保存全部文件,encoding 使用utf8编码格式
trfhl=TimedRotatingFileHandler(filename=BASE_DIR+"/log/mylog.log",when="h",interval=1,backupCount=0,encoding="utf8")
#格式化器
fmter=logging.Formatter(fmt="%(asctime)s %(levelname)s [%(name)s] [%(filename)s(%(funcName)s:%(lineno)d)] - %(message)s")
#处理器添加格式化器
shl.setFormatter(fmter)
# 处理器添加格式化器
trfhl.setFormatter(fmter)
#日志器添加处理器
logger.addHandler(shl)
#日志器添加处理器
logger.addHandler(trfhl) | [
"zpb0990@163.com"
] | zpb0990@163.com |
b9aae4c3e88a7792acd442dd2d9d158dd3d47ae4 | fb8cbebdf034b2f478943752d5443afc82c6eef5 | /tuirer/venv/lib/python3.6/site-packages/IPython/core/inputtransformer.py | 44ec5a1aae19b8708a617982258ffa6ec5f11bd0 | [] | no_license | fariasjr/CitiTuirer | f64e0ec93ef088f8140bb0961d2ad4ed3b59448a | deb3f7a9c2d45b8a7f54639037f097b99abdac11 | refs/heads/master | 2020-03-24T05:10:36.261050 | 2018-08-01T20:24:30 | 2018-08-01T20:24:30 | 142,477,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,242 | py | """Input transformer classes to support IPython special syntax.
This includes the machinery to recognise and transform ``%magic`` commands,
``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
"""
import abc
import functools
import re
from io import StringIO
from IPython.core.splitinput import LineInfo
from IPython.utils import tokenize2
from IPython.utils.tokenize2 import TokenError, generate_tokens, untokenize
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# The escape sequences that define the syntax transformations IPython will
# apply to user input. These can NOT be just changed here: many regular
# expressions and other parts of the code may use their hardcoded values, and
# for all intents and purposes they constitute the 'IPython syntax', so they
# should be considered fixed.
ESC_SHELL = '!' # Send line to underlying system shell
ESC_SH_CAP = '!!' # Send line to system shell and capture output
ESC_HELP = '?' # Find information about object
ESC_HELP2 = '??' # Find extra-detailed information about object
ESC_MAGIC = '%' # Call magic function
ESC_MAGIC2 = '%%' # Call cell-magic function
ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
ESC_QUOTE2 = ';' # Quote all args as a single string, call
ESC_PAREN = '/' # Call first argument with rest of line as arguments
ESC_SEQUENCES = [ESC_SHELL, ESC_SH_CAP, ESC_HELP ,\
ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,\
ESC_QUOTE, ESC_QUOTE2, ESC_PAREN ]
class InputTransformer(metaclass=abc.ABCMeta):
"""Abstract base class for line-based input transformers."""
@abc.abstractmethod
def push(self, line):
"""Send a line of input to the transformer, returning the transformed
input or None if the transformer is waiting for more input.
Must be overridden by subclasses.
Implementations may raise ``SyntaxError`` if the input is invalid. No
other exceptions may be raised.
"""
pass
@abc.abstractmethod
def reset(self):
"""Return, transformed any lines that the transformer has accumulated,
and reset its internal state.
Must be overridden by subclasses.
"""
pass
@classmethod
def wrap(cls, func):
"""Can be used by subclasses as a decorator, to return a factory that
will allow instantiation with the decorated object.
"""
@functools.wraps(func)
def transformer_factory(**kwargs):
return cls(func, **kwargs)
return transformer_factory
class StatelessInputTransformer(InputTransformer):
"""Wrapper for a stateless input transformer implemented as a function."""
def __init__(self, func):
self.func = func
def __repr__(self):
return "StatelessInputTransformer(func={0!r})".format(self.func)
def push(self, line):
"""Send a line of input to the transformer, returning the
transformed input."""
return self.func(line)
def reset(self):
"""No-op - exists for compatibility."""
pass
class CoroutineInputTransformer(InputTransformer):
"""Wrapper for an input transformer implemented as a coroutine."""
def __init__(self, coro, **kwargs):
# Prime it
self.coro = coro(**kwargs)
next(self.coro)
def __repr__(self):
return "CoroutineInputTransformer(coro={0!r})".format(self.coro)
def push(self, line):
"""Send a line of input to the transformer, returning the
transformed input or None if the transformer is waiting for more
input.
"""
return self.coro.send(line)
def reset(self):
"""Return, transformed any lines that the transformer has
accumulated, and reset its internal state.
"""
return self.coro.send(None)
class TokenInputTransformer(InputTransformer):
"""Wrapper for a token-based input transformer.
func should accept a list of tokens (5-tuples, see tokenize docs), and
return an iterable which can be passed to tokenize.untokenize().
"""
def __init__(self, func):
self.func = func
self.buf = []
self.reset_tokenizer()
def reset_tokenizer(self):
it = iter(self.buf)
self.tokenizer = generate_tokens(it.__next__)
def push(self, line):
self.buf.append(line + '\n')
if all(l.isspace() for l in self.buf):
return self.reset()
tokens = []
stop_at_NL = False
try:
for intok in self.tokenizer:
tokens.append(intok)
t = intok[0]
if t == tokenize2.NEWLINE or (stop_at_NL and t == tokenize2.NL):
# Stop before we try to pull a line we don't have yet
break
elif t == tokenize2.ERRORTOKEN:
stop_at_NL = True
except TokenError:
# Multi-line statement - stop and try again with the next line
self.reset_tokenizer()
return None
return self.output(tokens)
def output(self, tokens):
self.buf.clear()
self.reset_tokenizer()
return untokenize(self.func(tokens)).rstrip('\n')
def reset(self):
l = ''.join(self.buf)
self.buf.clear()
self.reset_tokenizer()
if l:
return l.rstrip('\n')
class assemble_python_lines(TokenInputTransformer):
def __init__(self):
super(assemble_python_lines, self).__init__(None)
def output(self, tokens):
return self.reset()
@CoroutineInputTransformer.wrap
def assemble_logical_lines():
"""Join lines following explicit line continuations (\)"""
line = ''
while True:
line = (yield line)
if not line or line.isspace():
continue
parts = []
while line is not None:
if line.endswith('\\') and (not has_comment(line)):
parts.append(line[:-1])
line = (yield None) # Get another line
else:
parts.append(line)
break
# Output
line = ''.join(parts)
# Utilities
def _make_help_call(target, esc, lspace, next_input=None):
"""Prepares a pinfo(2)/psearch call from a target name and the escape
(i.e. ? or ??)"""
method = 'pinfo2' if esc == '??' \
else 'psearch' if '*' in target \
else 'pinfo'
arg = " ".join([method, target])
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = arg.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
if next_input is None:
return '%sget_ipython().run_line_magic(%r, %r)' % (lspace, t_magic_name, t_magic_arg_s)
else:
return '%sget_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
(lspace, next_input, t_magic_name, t_magic_arg_s)
# These define the transformations for the different escape characters.
def _tr_system(line_info):
"Translate lines escaped with: !"
cmd = line_info.line.lstrip().lstrip(ESC_SHELL)
return '%sget_ipython().system(%r)' % (line_info.pre, cmd)
def _tr_system2(line_info):
"Translate lines escaped with: !!"
cmd = line_info.line.lstrip()[2:]
return '%sget_ipython().getoutput(%r)' % (line_info.pre, cmd)
def _tr_help(line_info):
"Translate lines escaped with: ?/??"
# A naked help line should just fire the intro help screen
if not line_info.line[1:]:
return 'get_ipython().show_usage()'
return _make_help_call(line_info.ifun, line_info.esc, line_info.pre)
def _tr_magic(line_info):
"Translate lines escaped with: %"
tpl = '%sget_ipython().run_line_magic(%r, %r)'
if line_info.line.startswith(ESC_MAGIC2):
return line_info.line
cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = cmd.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
return tpl % (line_info.pre, t_magic_name, t_magic_arg_s)
def _tr_quote(line_info):
"Translate lines escaped with: ,"
return '%s%s("%s")' % (line_info.pre, line_info.ifun,
'", "'.join(line_info.the_rest.split()) )
def _tr_quote2(line_info):
"Translate lines escaped with: ;"
return '%s%s("%s")' % (line_info.pre, line_info.ifun,
line_info.the_rest)
def _tr_paren(line_info):
"Translate lines escaped with: /"
return '%s%s(%s)' % (line_info.pre, line_info.ifun,
", ".join(line_info.the_rest.split()))
tr = { ESC_SHELL : _tr_system,
ESC_SH_CAP : _tr_system2,
ESC_HELP : _tr_help,
ESC_HELP2 : _tr_help,
ESC_MAGIC : _tr_magic,
ESC_QUOTE : _tr_quote,
ESC_QUOTE2 : _tr_quote2,
ESC_PAREN : _tr_paren }
@StatelessInputTransformer.wrap
def escaped_commands(line):
"""Transform escaped commands - %magic, !system, ?help + various autocalls.
"""
if not line or line.isspace():
return line
lineinf = LineInfo(line)
if lineinf.esc not in tr:
return line
return tr[lineinf.esc](lineinf)
_initial_space_re = re.compile(r'\s*')
_help_end_re = re.compile(r"""(%{0,2}
[a-zA-Z_*][\w*]* # Variable name
(\.[a-zA-Z_*][\w*]*)* # .etc.etc
)
(\?\??)$ # ? or ??
""",
re.VERBOSE)
# Extra pseudotokens for multiline strings and data structures
_MULTILINE_STRING = object()
_MULTILINE_STRUCTURE = object()
def _line_tokens(line):
"""Helper for has_comment and ends_in_comment_or_string."""
readline = StringIO(line).readline
toktypes = set()
try:
for t in generate_tokens(readline):
toktypes.add(t[0])
except TokenError as e:
# There are only two cases where a TokenError is raised.
if 'multi-line string' in e.args[0]:
toktypes.add(_MULTILINE_STRING)
else:
toktypes.add(_MULTILINE_STRUCTURE)
return toktypes
def has_comment(src):
"""Indicate whether an input line has (i.e. ends in, or is) a comment.
This uses tokenize, so it can distinguish comments from # inside strings.
Parameters
----------
src : string
A single line input string.
Returns
-------
comment : bool
True if source has a comment.
"""
return (tokenize2.COMMENT in _line_tokens(src))
def ends_in_comment_or_string(src):
"""Indicates whether or not an input line ends in a comment or within
a multiline string.
Parameters
----------
src : string
A single line input string.
Returns
-------
comment : bool
True if source ends in a comment or multiline string.
"""
toktypes = _line_tokens(src)
return (tokenize2.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
@StatelessInputTransformer.wrap
def help_end(line):
"""Translate lines with ?/?? at the end"""
m = _help_end_re.search(line)
if m is None or ends_in_comment_or_string(line):
return line
target = m.group(1)
esc = m.group(3)
lspace = _initial_space_re.match(line).group(0)
# If we're mid-command, put it back on the next prompt for the user.
next_input = line.rstrip('?') if line.strip() != m.group(0) else None
return _make_help_call(target, esc, lspace, next_input)
@CoroutineInputTransformer.wrap
def cellmagic(end_on_blank_line=False):
"""Captures & transforms cell magics.
After a cell magic is started, this stores up any lines it gets until it is
reset (sent None).
"""
tpl = 'get_ipython().run_cell_magic(%r, %r, %r)'
cellmagic_help_re = re.compile('%%\w+\?')
line = ''
while True:
line = (yield line)
# consume leading empty lines
while not line:
line = (yield line)
if not line.startswith(ESC_MAGIC2):
# This isn't a cell magic, idle waiting for reset then start over
while line is not None:
line = (yield line)
continue
if cellmagic_help_re.match(line):
# This case will be handled by help_end
continue
first = line
body = []
line = (yield None)
while (line is not None) and \
((line.strip() != '') or not end_on_blank_line):
body.append(line)
line = (yield None)
# Output
magic_name, _, first = first.partition(' ')
magic_name = magic_name.lstrip(ESC_MAGIC2)
line = tpl % (magic_name, first, u'\n'.join(body))
def _strip_prompts(prompt_re, initial_re=None, turnoff_re=None):
"""Remove matching input prompts from a block of input.
Parameters
----------
prompt_re : regular expression
A regular expression matching any input prompt (including continuation)
initial_re : regular expression, optional
A regular expression matching only the initial prompt, but not continuation.
If no initial expression is given, prompt_re will be used everywhere.
Used mainly for plain Python prompts, where the continuation prompt
``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
If initial_re and prompt_re differ,
only initial_re will be tested against the first line.
If any prompt is found on the first two lines,
prompts will be stripped from the rest of the block.
"""
if initial_re is None:
initial_re = prompt_re
line = ''
while True:
line = (yield line)
# First line of cell
if line is None:
continue
out, n1 = initial_re.subn('', line, count=1)
if turnoff_re and not n1:
if turnoff_re.match(line):
# We're in e.g. a cell magic; disable this transformer for
# the rest of the cell.
while line is not None:
line = (yield line)
continue
line = (yield out)
if line is None:
continue
# check for any prompt on the second line of the cell,
# because people often copy from just after the first prompt,
# so we might not see it in the first line.
out, n2 = prompt_re.subn('', line, count=1)
line = (yield out)
if n1 or n2:
# Found a prompt in the first two lines - check for it in
# the rest of the cell as well.
while line is not None:
line = (yield prompt_re.sub('', line, count=1))
else:
# Prompts not in input - wait for reset
while line is not None:
line = (yield line)
@CoroutineInputTransformer.wrap
def classic_prompt():
"""Strip the >>>/... prompts of the Python interactive shell."""
# FIXME: non-capturing version (?:...) usable?
prompt_re = re.compile(r'^(>>>|\.\.\.)( |$)')
initial_re = re.compile(r'^>>>( |$)')
# Any %magic/!system is IPython syntax, so we needn't look for >>> prompts
turnoff_re = re.compile(r'^[%!]')
return _strip_prompts(prompt_re, initial_re, turnoff_re)
@CoroutineInputTransformer.wrap
def ipy_prompt():
"""Strip IPython's In [1]:/...: prompts."""
# FIXME: non-capturing version (?:...) usable?
prompt_re = re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)')
# Disable prompt stripping inside cell magics
turnoff_re = re.compile(r'^%%')
return _strip_prompts(prompt_re, turnoff_re=turnoff_re)
@CoroutineInputTransformer.wrap
def leading_indent():
"""Remove leading indentation.
If the first line starts with a spaces or tabs, the same whitespace will be
removed from each following line until it is reset.
"""
space_re = re.compile(r'^[ \t]+')
line = ''
while True:
line = (yield line)
if line is None:
continue
m = space_re.match(line)
if m:
space = m.group(0)
while line is not None:
if line.startswith(space):
line = line[len(space):]
line = (yield line)
else:
# No leading spaces - wait for reset
while line is not None:
line = (yield line)
_assign_pat = \
r'''(?P<lhs>(\s*)
([\w\.]+) # Initial identifier
(\s*,\s*
\*?[\w\.]+)* # Further identifiers for unpacking
\s*?,? # Trailing comma
)
\s*=\s*
'''
assign_system_re = re.compile(r'{}!\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
assign_system_template = '%s = get_ipython().getoutput(%r)'
@StatelessInputTransformer.wrap
def assign_from_system(line):
"""Transform assignment from system commands (e.g. files = !ls)"""
m = assign_system_re.match(line)
if m is None:
return line
return assign_system_template % m.group('lhs', 'cmd')
assign_magic_re = re.compile(r'{}%\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
assign_magic_template = '%s = get_ipython().run_line_magic(%r, %r)'
@StatelessInputTransformer.wrap
def assign_from_magic(line):
"""Transform assignment from magic commands (e.g. a = %who_ls)"""
m = assign_magic_re.match(line)
if m is None:
return line
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
m_lhs, m_cmd = m.group('lhs', 'cmd')
t_magic_name, _, t_magic_arg_s = m_cmd.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
return assign_magic_template % (m_lhs, t_magic_name, t_magic_arg_s)
| [
"jornadaciti@ug4c08.windows.cin.ufpe.br"
] | jornadaciti@ug4c08.windows.cin.ufpe.br |
0e10cbdfa5b1cd030ad2bdd01ff695d9fdb60938 | d88868b88864e4d10009c58b27323034715b0277 | /projects/barter/deployments/docker/barter/test.py | 1ff68bc01c1f1c3d50a8992e741a015cb27fa14d | [
"Apache-2.0"
] | permissive | shamal112mn/packer-1 | 795ebd9e0fee444f0cbb01897a50e199b73f1307 | be2720e9cb61bf1110a880e94e32a6767a341588 | refs/heads/master | 2023-07-10T09:33:10.516559 | 2021-08-17T02:25:50 | 2021-08-17T02:25:50 | 327,763,823 | 0 | 0 | null | 2021-01-08T01:08:49 | 2021-01-08T01:08:48 | null | UTF-8 | Python | false | false | 388 | py | import requests
import json
url = "https://investors-exchange-iex-trading.p.rapidapi.com/stock/tsla/effective-spread"
headers = {
'x-rapidapi-key': "158cd4f9cdmsh0d92f8b92b1d427p1947b6jsn857aa1252e0b",
'x-rapidapi-host': "investors-exchange-iex-trading.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers)
print(json.dumps(response.json(), indent=2)) | [
"you@example.com"
] | you@example.com |
5a7644e93929e110f2439ee13de2a41444e4af81 | bd749eca9a6aa414745bf13106c948fb2794d72b | /blog_app/migrations/0002_auto_20190910_1519.py | eb89b3692ac1da5756d047aa055b459d93bc40d4 | [] | no_license | DreamInit/my_blog | 115b07b40a32fc1d50fb642281a501dcce1ef46c | 1309664c9504a93f36cbf7bbf30cc87cb096608d | refs/heads/master | 2022-11-03T09:28:26.098193 | 2019-09-11T11:54:56 | 2019-09-11T11:54:56 | 207,740,133 | 0 | 1 | null | 2022-10-09T12:20:34 | 2019-09-11T06:33:17 | Python | UTF-8 | Python | false | false | 340 | py | # Generated by Django 2.0 on 2019-09-10 07:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog_app', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='blog',
options={'ordering': ['-created_time']},
),
]
| [
"chuan.huang@scaleflux.com"
] | chuan.huang@scaleflux.com |
633d089ce731df7f5e583fa5d1a666bf329b9e9a | 446d500d0c7ca5b46c21c70db820d1b2dac11a38 | /comp11 - intro/proj1/helper.py | 0859f8f24e5f4e69c0e1b3f1565de9c0b3ee4771 | [] | no_license | vladhugec/Course-Work | 3397d52bb4a649196e336c48d0cab340126b2d8b | 27356f32d33aae497ede109fa9e66a19516f79fb | refs/heads/master | 2020-12-18T12:59:25.542999 | 2020-01-21T21:58:34 | 2020-01-21T21:58:34 | 235,391,035 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | # helper.py
# Written by: Kalina Allen, kallen07
# 02/23/2018
#
# Purpose: provide a helpful function for comp11 project 1
from string import ascii_letters
# arguments:
# mystr: string to clean
# returns: s but with all non-alphabet characters removed
def clean_word(mystr):
return (''.join(char for char in mystr if char in ascii_letters)).lower()
| [
"vladhugec@me.com"
] | vladhugec@me.com |
e1136957d862e88999704359ee05863d30e39f15 | 4c99cd9846d6c9f196c7cbfa06f0576fb221a104 | /virtuellenv ihkb/lib/python2.7/site-packages/whoosh/fields.py | 0c6789cffdaa2519cb35c068b74592287435b576 | [] | no_license | Gitlena/ichhabkeinblog | 4622637773d5c4f3647455659058aab90d5bd050 | 1cfd7a030f9d41279a120724d29e1f48aa511562 | refs/heads/master | 2016-09-08T01:56:39.943817 | 2014-03-22T19:52:57 | 2014-03-22T19:52:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,659 | py | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
""" Contains functions and classes related to fields.
"""
import datetime, fnmatch, re, struct, sys
from array import array
from decimal import Decimal
from whoosh import analysis, columns, formats
from whoosh.compat import u, b, PY3
from whoosh.compat import with_metaclass
from whoosh.compat import itervalues, xrange
from whoosh.compat import bytes_type, string_type, integer_types, text_type
from whoosh.system import emptybytes
from whoosh.system import pack_byte, unpack_byte
from whoosh.util.numeric import to_sortable, from_sortable
from whoosh.util.numeric import typecode_max, NaN
from whoosh.util.text import utf8encode, utf8decode
from whoosh.util.times import datetime_to_long, long_to_datetime
# Exceptions
class FieldConfigurationError(Exception):
pass
class UnknownFieldError(Exception):
pass
# Field Types
class FieldType(object):
"""Represents a field configuration.
The FieldType object supports the following attributes:
* format (formats.Format): the storage format for the field's contents.
* analyzer (analysis.Analyzer): the analyzer to use to turn text into
terms.
* vector (formats.Format): the storage format for the field's vectors
(forward index), or None if the field should not store vectors.
* scorable (boolean): whether searches against this field may be scored.
This controls whether the index stores per-document field lengths for
this field.
* stored (boolean): whether the content of this field is stored for each
document. For example, in addition to indexing the title of a document,
you usually want to store the title so it can be presented as part of
the search results.
* unique (boolean): whether this field's value is unique to each document.
For example, 'path' or 'ID'. IndexWriter.update_document() will use
fields marked as 'unique' to find the previous version of a document
being updated.
* multitoken_query is a string indicating what kind of query to use when
a "word" in a user query parses into multiple tokens. The string is
interpreted by the query parser. The strings understood by the default
query parser are "first" (use first token only), "and" (join the tokens
with an AND query), "or" (join the tokens with OR), "phrase" (join
the tokens with a phrase query), and "default" (use the query parser's
default join type).
The constructor for the base field type simply lets you supply your own
configured field format, vector format, and scorable and stored values.
Subclasses may configure some or all of this for you.
"""
analyzer = format = vector = scorable = stored = unique = None
indexed = True
multitoken_query = "default"
sortable_typecode = None
spelling = False
column_type = None
def __init__(self, format, analyzer, vector=None, scorable=False,
stored=False, unique=False, multitoken_query="default",
sortable=False):
assert isinstance(format, formats.Format)
self.format = format
self.analyzer = analyzer
self.vector = vector
self.scorable = scorable
self.stored = stored
self.unique = unique
self.multitoken_query = multitoken_query
self.set_sortable(sortable)
def __repr__(self):
temp = "%s(format=%r, vector=%r, scorable=%s, stored=%s, unique=%s)"
return temp % (self.__class__.__name__, self.format, self.vector,
self.scorable, self.stored, self.unique)
def __eq__(self, other):
return all((isinstance(other, FieldType),
(self.format == other.format),
(self.vector == other.vector),
(self.scorable == other.scorable),
(self.stored == other.stored),
(self.unique == other.unique),
(self.column_type == other.column_type)))
def __ne__(self, other):
return not(self.__eq__(other))
# Column methods
def set_sortable(self, sortable):
if sortable:
if isinstance(sortable, columns.Column):
self.column_type = sortable
else:
self.column_type = self.default_column()
else:
self.column_type = None
def default_column(self):
return columns.VarBytesColumn()
# Methods for converting input into indexing information
def index(self, value, **kwargs):
"""Returns an iterator of (btext, frequency, weight, encoded_value)
tuples for each unique word in the input value.
The default implementation uses the ``analyzer`` attribute to tokenize
the value into strings, then encodes them into bytes using UTF-8.
"""
if not self.format:
raise Exception("%s field %r cannot index without a format"
% (self.__class__.__name__, self))
if not isinstance(value, (text_type, list, tuple)):
raise ValueError("%r is not unicode or sequence" % value)
assert isinstance(self.format, formats.Format)
if "mode" not in kwargs:
kwargs["mode"] = "index"
word_values = self.format.word_values
ana = self.analyzer
for tstring, freq, wt, vbytes in word_values(value, ana, **kwargs):
yield (utf8encode(tstring)[0], freq, wt, vbytes)
def process_text(self, qstring, mode='', **kwargs):
"""Analyzes the given string and returns an iterator of token texts.
>>> field = fields.TEXT()
>>> list(field.process_text("The ides of March"))
["ides", "march"]
"""
if not self.format:
raise Exception("%s field has no format" % self)
return (t.text for t in self.tokenize(qstring, mode=mode, **kwargs))
def tokenize(self, value, **kwargs):
"""Analyzes the given string and returns an iterator of Token objects
(note: for performance reasons, actually the same token yielded over
and over with different attributes).
"""
if not self.analyzer:
raise Exception("%s field has no analyzer" % self.__class__)
return self.analyzer(value, **kwargs)
def to_bytes(self, value):
"""Returns a bytes representation of the given value, appropriate to be
written to disk. The default implementation assumes a unicode value and
encodes it using UTF-8.
"""
if isinstance(value, (list, tuple)):
value = value[0]
if not isinstance(value, bytes_type):
value = utf8encode(value)[0]
return value
def to_column_value(self, value):
"""Returns an object suitable to be inserted into the document values
column for this field. The default implementation simply calls
``self.to_bytes(value)``.
"""
return self.to_bytes(value)
def from_column_value(self, value):
return self.from_bytes(value)
def from_bytes(self, bs):
return utf8decode(bs)[0]
# Methods related to query parsing
def self_parsing(self):
"""Subclasses should override this method to return True if they want
the query parser to call the field's ``parse_query()`` method instead
of running the analyzer on text in this field. This is useful where
the field needs full control over how queries are interpreted, such
as in the numeric field type.
"""
return False
def parse_query(self, fieldname, qstring, boost=1.0):
"""When ``self_parsing()`` returns True, the query parser will call
this method to parse basic query text.
"""
raise NotImplementedError(self.__class__.__name__)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
"""When ``self_parsing()`` returns True, the query parser will call
this method to parse range query text. If this method returns None
instead of a query object, the parser will fall back to parsing the
start and end terms using process_text().
"""
return None
# Methods related to sortings
def sortable_terms(self, ixreader, fieldname):
"""Returns an iterator of the "sortable" tokens in the given reader and
field. These values can be used for sorting. The default implementation
simply returns all tokens in the field.
This can be overridden by field types such as NUMERIC where some values
in a field are not useful for sorting.
"""
return ixreader.lexicon(fieldname)
# Methods related to spelling
def separate_spelling(self):
"""Returns True if this field requires special handling of the words
that go into the field's word graph.
The default behavior is to return True if the field is "spelled" but
not indexed, or if the field is indexed but the analyzer has
morphological transformations (e.g. stemming). Exotic field types may
need to override this behavior.
This method should return False if the field does not support spelling
(i.e. the ``spelling`` attribute is False).
"""
return self.spelling and self.analyzer.has_morph()
def spellable_words(self, value):
"""Returns an iterator of each unique word (in sorted order) in the
input value, suitable for inclusion in the field's word graph.
The default behavior is to call the field analyzer with the keyword
argument ``no_morph=True``, which should make the analyzer skip any
morphological transformation filters (e.g. stemming) to preserve the
original form of the words. Exotic field types may need to override
this behavior.
"""
if isinstance(value, (list, tuple)):
words = value
else:
words = [token.text for token
in self.analyzer(value, no_morph=True)]
return iter(sorted(set(words)))
def has_morph(self):
"""Returns True if this field by default performs morphological
transformations on its terms, e.g. stemming.
"""
if self.analyzer:
return self.analyzer.has_morph()
else:
return False
# Methods related to the posting/vector formats
def supports(self, name):
"""Returns True if the underlying format supports the given posting
value type.
>>> field = TEXT()
>>> field.supports("positions")
True
>>> field.supports("characters")
False
"""
return self.format.supports(name)
def clean(self):
"""Clears any cached information in the field and any child objects.
"""
if self.format and hasattr(self.format, "clean"):
self.format.clean()
if self.vector and hasattr(self.vector, "clean"):
self.vector.clean()
# Event methods
def on_add(self, schema, fieldname):
pass
def on_remove(self, schema, fieldname):
pass
class ID(FieldType):
"""Configured field type that indexes the entire value of the field as one
token. This is useful for data you don't want to tokenize, such as the path
of a file.
"""
__inittypes__ = dict(stored=bool, unique=bool, field_boost=float)
def __init__(self, stored=False, unique=False, field_boost=1.0,
spelling=False, sortable=False, analyzer=None):
"""
:param stored: Whether the value of this field is stored with the
document.
"""
self.analyzer = analyzer or analysis.IDAnalyzer()
self.format = formats.Existence(field_boost=field_boost)
self.stored = stored
self.unique = unique
self.spelling = spelling
self.set_sortable(sortable)
class IDLIST(FieldType):
"""Configured field type for fields containing IDs separated by whitespace
and/or punctuation (or anything else, using the expression param).
"""
__inittypes__ = dict(stored=bool, unique=bool, expression=bool,
field_boost=float)
def __init__(self, stored=False, unique=False, expression=None,
field_boost=1.0, spelling=False):
"""
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
:param expression: The regular expression object to use to extract
tokens. The default expression breaks tokens on CRs, LFs, tabs,
spaces, commas, and semicolons.
"""
expression = expression or re.compile(r"[^\r\n\t ,;]+")
self.analyzer = analysis.RegexAnalyzer(expression=expression)
self.format = formats.Existence(field_boost=field_boost)
self.stored = stored
self.unique = unique
self.spelling = spelling
class NUMERIC(FieldType):
"""Special field type that lets you index integer or floating point
numbers in relatively short fixed-width terms. The field converts numbers
to sortable bytes for you before indexing.
You specify the numeric type of the field (``int`` or ``float``) when you
create the ``NUMERIC`` object. The default is ``int``. For ``int``, you can
specify a size in bits (``32`` or ``64``). For both ``int`` and ``float``
you can specify a ``signed`` keyword argument (default is ``True``).
>>> schema = Schema(path=STORED, position=NUMERIC(int, 64, signed=False))
>>> ix = storage.create_index(schema)
>>> with ix.writer() as w:
... w.add_document(path="/a", position=5820402204)
...
You can also use the NUMERIC field to store Decimal instances by specifying
a type of ``int`` or ``long`` and the ``decimal_places`` keyword argument.
This simply multiplies each number by ``(10 ** decimal_places)`` before
storing it as an integer. Of course this may throw away decimal prcesision
(by truncating, not rounding) and imposes the same maximum value limits as
``int``/``long``, but these may be acceptable for certain applications.
>>> from decimal import Decimal
>>> schema = Schema(path=STORED, position=NUMERIC(int, decimal_places=4))
>>> ix = storage.create_index(schema)
>>> with ix.writer() as w:
... w.add_document(path="/a", position=Decimal("123.45")
...
"""
def __init__(self, numtype=int, bits=32, stored=False, unique=False,
field_boost=1.0, decimal_places=0, shift_step=4, signed=True,
sortable=False, default=None):
"""
:param numtype: the type of numbers that can be stored in this field,
either ``int``, ``float``. If you use ``Decimal``,
use the ``decimal_places`` argument to control how many decimal
places the field will store.
:param bits: When ``numtype`` is ``int``, the number of bits to use to
store the number: 8, 16, 32, or 64.
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
:param decimal_places: specifies the number of decimal places to save
when storing Decimal instances. If you set this, you will always
get Decimal instances back from the field.
:param shift_steps: The number of bits of precision to shift away at
each tiered indexing level. Values should generally be 1-8. Lower
values yield faster searches but take up more space. A value
of `0` means no tiered indexing.
:param signed: Whether the numbers stored in this field may be
negative.
"""
# Allow users to specify strings instead of Python types in case
# docstring isn't clear
if numtype == "int":
numtype = int
if numtype == "float":
numtype = float
# Raise an error if the user tries to use a type other than int or
# float
if numtype is Decimal:
numtype = int
if not decimal_places:
raise TypeError("To store Decimal instances, you must set the "
"decimal_places argument")
elif numtype not in (int, float):
raise TypeError("Can't use %r as a type, use int or float"
% numtype)
# Sanity check
if numtype is float and decimal_places:
raise Exception("A float type and decimal_places argument %r are "
"incompatible" % decimal_places)
intsizes = [8, 16, 32, 64]
intcodes = ["B", "H", "I", "Q"]
# Set up field configuration based on type and size
if numtype is float:
bits = 64 # Floats are converted to 64 bit ints
else:
if bits not in intsizes:
raise Exception("Invalid bits %r, use 8, 16, 32, or 64"
% bits)
# Type code for the *sortable* representation
self.sortable_typecode = intcodes[intsizes.index(bits)]
self._struct = struct.Struct(">" + self.sortable_typecode)
self.numtype = numtype
self.bits = bits
self.stored = stored
self.unique = unique
self.decimal_places = decimal_places
self.shift_step = shift_step
self.signed = signed
self.analyzer = analysis.IDAnalyzer()
self.format = formats.Existence(field_boost=field_boost)
self.min_value, self.max_value = self._min_max()
# Column configuration
if default is None:
if numtype is int:
default = typecode_max[self.sortable_typecode]
else:
default = NaN
elif not self.is_valid(default):
raise Exception("The default %r is not a valid number for this "
"field" % default)
self.default = default
self.set_sortable(sortable)
def __getstate__(self):
d = self.__dict__.copy()
if "_struct" in d:
del d["_struct"]
return d
def __setstate__(self, d):
self.__dict__.update(d)
self._struct = struct.Struct(">" + self.sortable_typecode)
if "min_value" not in d:
d["min_value"], d["max_value"] = self._min_max()
def _min_max(self):
numtype = self.numtype
bits = self.bits
signed = self.signed
# Calculate the minimum and maximum possible values for error checking
min_value = from_sortable(numtype, bits, signed, 0)
max_value = from_sortable(numtype, bits, signed, 2 ** bits - 1)
return min_value, max_value
def default_column(self):
return columns.NumericColumn(self.sortable_typecode,
default=self.default)
def is_valid(self, x):
try:
x = self.to_bytes(x)
except ValueError:
return False
except OverflowError:
return False
return True
def index(self, num, **kwargs):
# If the user gave us a list of numbers, recurse on the list
if isinstance(num, (list, tuple)):
for n in num:
for item in self.index(n):
yield item
return
# word, freq, weight, valuestring
if self.shift_step:
for shift in xrange(0, self.bits, self.shift_step):
yield (self.to_bytes(num, shift), 1, 1.0, emptybytes)
else:
yield (self.to_bytes(num), 1, 1.0, emptybytes)
def prepare_number(self, x):
if x == emptybytes or x is None:
return x
dc = self.decimal_places
if dc and isinstance(x, (string_type, Decimal)):
x = Decimal(x) * (10 ** dc)
elif isinstance(x, Decimal):
raise TypeError("Can't index a Decimal object unless you specified "
"decimal_places on the field")
try:
x = self.numtype(x)
except OverflowError:
raise ValueError("Value %r overflowed number type %r"
% (x, self.numtype))
if x < self.min_value or x > self.max_value:
raise ValueError("Numeric field value %s out of range [%s, %s]"
% (x, self.min_value, self.max_value))
return x
def unprepare_number(self, x):
dc = self.decimal_places
if dc:
s = str(x)
x = Decimal(s[:-dc] + "." + s[-dc:])
return x
def to_column_value(self, x):
if isinstance(x, (list, tuple, array)):
x = x[0]
x = self.prepare_number(x)
return to_sortable(self.numtype, self.bits, self.signed, x)
def from_column_value(self, x):
x = from_sortable(self.numtype, self.bits, self.signed, x)
return self.unprepare_number(x)
def to_bytes(self, x, shift=0):
# Try to avoid re-encoding; this sucks because on Python 2 we can't
# tell the difference between a string and encoded bytes, so we have
# to require the user use unicode when they mean string
if isinstance(x, bytes_type):
return x
if x == emptybytes or x is None:
return self.sortable_to_bytes(0)
x = self.prepare_number(x)
x = to_sortable(self.numtype, self.bits, self.signed, x)
return self.sortable_to_bytes(x, shift)
def sortable_to_bytes(self, x, shift=0):
if shift:
x >>= shift
return pack_byte(shift) + self._struct.pack(x)
def from_bytes(self, bs):
x = self._struct.unpack(bs[1:])[0]
x = from_sortable(self.numtype, self.bits, self.signed, x)
x = self.unprepare_number(x)
return x
def process_text(self, text, **kwargs):
return (self.to_bytes(text),)
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
from whoosh.qparser.common import QueryParserError
if qstring == "*":
return query.Every(fieldname, boost=boost)
if not self.is_valid(qstring):
raise QueryParserError("%r is not a valid number" % qstring)
token = self.to_bytes(qstring)
return query.Term(fieldname, token, boost=boost)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
from whoosh import query
from whoosh.qparser.common import QueryParserError
if start is not None:
if not self.is_valid(start):
raise QueryParserError("Range start %r is not a valid number"
% start)
start = self.prepare_number(start)
if end is not None:
if not self.is_valid(end):
raise QueryParserError("Range end %r is not a valid number"
% end)
end = self.prepare_number(end)
return query.NumericRange(fieldname, start, end, startexcl, endexcl,
boost=boost)
def sortable_terms(self, ixreader, fieldname):
zero = b("\x00")
for token in ixreader.lexicon(fieldname):
if token[0:1] != zero:
# Only yield the full-precision values
break
yield token
class DATETIME(NUMERIC):
"""Special field type that lets you index datetime objects. The field
converts the datetime objects to sortable text for you before indexing.
Since this field is based on Python's datetime module it shares all the
limitations of that module, such as the inability to represent dates before
year 1 in the proleptic Gregorian calendar. However, since this field
stores datetimes as an integer number of microseconds, it could easily
represent a much wider range of dates if the Python datetime implementation
ever supports them.
>>> schema = Schema(path=STORED, date=DATETIME)
>>> ix = storage.create_index(schema)
>>> w = ix.writer()
>>> w.add_document(path="/a", date=datetime.now())
>>> w.commit()
"""
__inittypes__ = dict(stored=bool, unique=bool)
def __init__(self, stored=False, unique=False, sortable=False):
"""
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
"""
super(DATETIME, self).__init__(int, 64, stored=stored,
unique=unique, shift_step=8,
sortable=sortable)
def prepare_datetime(self, x):
from whoosh.util.times import floor
if isinstance(x, text_type):
# For indexing, support same strings as for query parsing --
# convert unicode to datetime object
x = self._parse_datestring(x)
x = floor(x) # this makes most sense (unspecified = lowest)
if isinstance(x, datetime.datetime):
return datetime_to_long(x)
elif isinstance(x, bytes_type):
return x
else:
raise Exception("%r is not a datetime" % (x,))
def to_column_value(self, x):
if isinstance(x, bytes_type):
raise Exception("%r is not a datetime" % (x,))
if isinstance(x, (list, tuple)):
x = x[0]
return self.prepare_datetime(x)
def from_column_value(self, x):
return long_to_datetime(x)
def to_bytes(self, x, shift=0):
x = self.prepare_datetime(x)
return NUMERIC.to_bytes(self, x, shift=shift)
def from_bytes(self, bs):
x = NUMERIC.from_bytes(self, bs)
return long_to_datetime(x)
def _parse_datestring(self, qstring):
# This method parses a very simple datetime representation of the form
# YYYY[MM[DD[hh[mm[ss[uuuuuu]]]]]]
from whoosh.util.times import adatetime, fix, is_void
qstring = qstring.replace(" ", "").replace("-", "").replace(".", "")
year = month = day = hour = minute = second = microsecond = None
if len(qstring) >= 4:
year = int(qstring[:4])
if len(qstring) >= 6:
month = int(qstring[4:6])
if len(qstring) >= 8:
day = int(qstring[6:8])
if len(qstring) >= 10:
hour = int(qstring[8:10])
if len(qstring) >= 12:
minute = int(qstring[10:12])
if len(qstring) >= 14:
second = int(qstring[12:14])
if len(qstring) == 20:
microsecond = int(qstring[14:])
at = fix(adatetime(year, month, day, hour, minute, second,
microsecond))
if is_void(at):
raise Exception("%r is not a parseable date" % qstring)
return at
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
from whoosh.util.times import is_ambiguous
try:
at = self._parse_datestring(qstring)
except:
e = sys.exc_info()[1]
return query.error_query(e)
if is_ambiguous(at):
startnum = datetime_to_long(at.floor())
endnum = datetime_to_long(at.ceil())
return query.NumericRange(fieldname, startnum, endnum)
else:
return query.Term(fieldname, at, boost=boost)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
from whoosh import query
if start is None and end is None:
return query.Every(fieldname, boost=boost)
if start is not None:
startdt = self._parse_datestring(start).floor()
start = datetime_to_long(startdt)
if end is not None:
enddt = self._parse_datestring(end).ceil()
end = datetime_to_long(enddt)
return query.NumericRange(fieldname, start, end, boost=boost)
class BOOLEAN(FieldType):
"""Special field type that lets you index boolean values (True and False).
The field converts the boolean values to text for you before indexing.
>>> schema = Schema(path=STORED, done=BOOLEAN)
>>> ix = storage.create_index(schema)
>>> w = ix.writer()
>>> w.add_document(path="/a", done=False)
>>> w.commit()
"""
bytestrings = (b("f"), b("t"))
trues = frozenset(u("t true yes 1").split())
falses = frozenset(u("f false no 0").split())
__inittypes__ = dict(stored=bool, field_boost=float)
def __init__(self, stored=False, field_boost=1.0):
"""
:param stored: Whether the value of this field is stored with the
document.
"""
self.stored = stored
self.field_boost = field_boost
self.format = formats.Existence(field_boost=field_boost)
def _obj_to_bool(self, x):
# We special case strings such as "true", "false", "yes", "no", but
# otherwise call bool() on the query value. This lets you pass objects
# as query values and do the right thing.
if isinstance(x, string_type) and x.lower() in self.trues:
x = True
elif isinstance(x, string_type) and x.lower() in self.falses:
x = False
else:
x = bool(x)
return x
def to_bytes(self, x):
if isinstance(x, bytes_type):
return x
elif isinstance(x, string_type):
x = x.lower() in self.trues
else:
x = bool(x)
bs = self.bytestrings[int(x)]
return bs
def index(self, bit, **kwargs):
if isinstance(bit, string_type):
bit = bit.lower() in self.trues
else:
bit = bool(bit)
# word, freq, weight, valuestring
return [(self.bytestrings[int(bit)], 1, 1.0, emptybytes)]
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
if qstring == "*":
return query.Every(fieldname, boost=boost)
return query.Term(fieldname, self._obj_to_bool(qstring), boost=boost)
class STORED(FieldType):
"""Configured field type for fields you want to store but not index.
"""
indexed = False
stored = True
def __init__(self):
pass
class COLUMN(FieldType):
"""Configured field type for fields you want to store as a per-document
value column but not index.
"""
indexed = False
stored = False
def __init__(self, columnobj=None):
if columnobj is None:
columnobj = columns.VarBytesColumn()
if not isinstance(columnobj, columns.Column):
raise TypeError("%r is not a column object" % (columnobj,))
self.column_type = columnobj
def to_bytes(self, v):
return v
def from_bytes(self, b):
return b
class KEYWORD(FieldType):
"""Configured field type for fields containing space-separated or
comma-separated keyword-like data (such as tags). The default is to not
store positional information (so phrase searching is not allowed in this
field) and to not make the field scorable.
"""
__inittypes__ = dict(stored=bool, lowercase=bool, commas=bool,
scorable=bool, unique=bool, field_boost=float)
def __init__(self, stored=False, lowercase=False, commas=False,
vector=None, scorable=False, unique=False, field_boost=1.0,
spelling=False, sortable=False):
"""
:param stored: Whether to store the value of the field with the
document.
:param comma: Whether this is a comma-separated field. If this is False
(the default), it is treated as a space-separated field.
:param scorable: Whether this field is scorable.
"""
self.analyzer = analysis.KeywordAnalyzer(lowercase=lowercase,
commas=commas)
self.format = formats.Frequency(field_boost=field_boost)
self.scorable = scorable
self.stored = stored
self.unique = unique
self.spelling = spelling
if vector:
if type(vector) is type:
vector = vector()
elif isinstance(vector, formats.Format):
pass
else:
vector = self.format
else:
vector = None
self.vector = vector
if sortable:
self.column_type = self.default_column()
class TEXT(FieldType):
"""Configured field type for text fields (for example, the body text of an
article). The default is to store positional information to allow phrase
searching. This field type is always scorable.
"""
__inittypes__ = dict(analyzer=analysis.Analyzer, phrase=bool,
vector=object, stored=bool, field_boost=float)
def __init__(self, analyzer=None, phrase=True, chars=False, vector=None,
stored=False, field_boost=1.0, multitoken_query="default",
spelling=False, sortable=False, lang=None):
"""
:param analyzer: The analysis.Analyzer to use to index the field
contents. See the analysis module for more information. If you omit
this argument, the field uses analysis.StandardAnalyzer.
:param phrase: Whether the store positional information to allow phrase
searching.
:param chars: Whether to store character ranges along with positions.
If this is True, "phrase" is also implied.
:param vector: A :class:`whoosh.formats.Format` object to use to store
term vectors, or ``True`` to store vectors using the same format as
the inverted index, or ``None`` or ``False`` to not store vectors.
By default, fields do not store term vectors.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param spelling: Whether to generate word graphs for this field to make
spelling suggestions much faster.
:param sortable: If True, make this field sortable using the default
column type. If you pass a :class:`whoosh.columns.Column` instance
instead of True, the field will use the given column type.
:param lang: automaticaly configure a
:class:`whoosh.analysis.LanguageAnalyzer` for the given language.
This is ignored if you also specify an ``analyzer``.
"""
if analyzer:
self.analyzer = analyzer
elif lang:
self.analyzer = analysis.LanguageAnalyzer(lang)
else:
self.analyzer = analysis.StandardAnalyzer()
if chars:
formatclass = formats.Characters
elif phrase:
formatclass = formats.Positions
else:
formatclass = formats.Frequency
self.format = formatclass(field_boost=field_boost)
if vector:
if type(vector) is type:
vector = vector()
elif isinstance(vector, formats.Format):
pass
else:
vector = formatclass()
else:
vector = None
self.vector = vector
if sortable:
if isinstance(sortable, columns.Column):
self.column_type = sortable
else:
self.column_type = columns.VarBytesColumn()
else:
self.column_type = None
self.multitoken_query = multitoken_query
self.scorable = True
self.stored = stored
self.spelling = spelling
class NGRAM(FieldType):
"""Configured field that indexes text as N-grams. For example, with a field
type NGRAM(3,4), the value "hello" will be indexed as tokens
"hel", "hell", "ell", "ello", "llo". This field type chops the entire text
into N-grams, including whitespace and punctuation. See :class:`NGRAMWORDS`
for a field type that breaks the text into words first before chopping the
words into N-grams.
"""
__inittypes__ = dict(minsize=int, maxsize=int, stored=bool,
field_boost=float, queryor=bool, phrase=bool)
scorable = True
def __init__(self, minsize=2, maxsize=4, stored=False, field_boost=1.0,
queryor=False, phrase=False, sortable=False):
"""
:param minsize: The minimum length of the N-grams.
:param maxsize: The maximum length of the N-grams.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param queryor: if True, combine the N-grams with an Or query. The
default is to combine N-grams with an And query.
:param phrase: store positions on the N-grams to allow exact phrase
searching. The default is off.
"""
formatclass = formats.Frequency
if phrase:
formatclass = formats.Positions
self.analyzer = analysis.NgramAnalyzer(minsize, maxsize)
self.format = formatclass(field_boost=field_boost)
self.stored = stored
self.queryor = queryor
self.set_sortable(sortable)
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
terms = [query.Term(fieldname, g)
for g in self.process_text(qstring, mode='query')]
cls = query.Or if self.queryor else query.And
return cls(terms, boost=boost)
class NGRAMWORDS(NGRAM):
"""Configured field that chops text into words using a tokenizer,
lowercases the words, and then chops the words into N-grams.
"""
__inittypes__ = dict(minsize=int, maxsize=int, stored=bool,
field_boost=float, tokenizer=analysis.Tokenizer,
at=str, queryor=bool)
scorable = True
def __init__(self, minsize=2, maxsize=4, stored=False, field_boost=1.0,
tokenizer=None, at=None, queryor=False, sortable=False):
"""
:param minsize: The minimum length of the N-grams.
:param maxsize: The maximum length of the N-grams.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param tokenizer: an instance of :class:`whoosh.analysis.Tokenizer`
used to break the text into words.
:param at: if 'start', only takes N-grams from the start of the word.
If 'end', only takes N-grams from the end. Otherwise the default
is to take all N-grams from each word.
:param queryor: if True, combine the N-grams with an Or query. The
default is to combine N-grams with an And query.
"""
self.analyzer = analysis.NgramWordAnalyzer(minsize, maxsize, tokenizer,
at=at)
self.format = formats.Frequency(field_boost=field_boost)
self.stored = stored
self.queryor = queryor
self.set_sortable(sortable)
# Schema class
class MetaSchema(type):
def __new__(cls, name, bases, attrs):
super_new = super(MetaSchema, cls).__new__
if not any(b for b in bases if isinstance(b, MetaSchema)):
# If this isn't a subclass of MetaSchema, don't do anything special
return super_new(cls, name, bases, attrs)
# Create the class
special_attrs = {}
for key in list(attrs.keys()):
if key.startswith("__"):
special_attrs[key] = attrs.pop(key)
new_class = super_new(cls, name, bases, special_attrs)
fields = {}
for b in bases:
if hasattr(b, "_clsfields"):
fields.update(b._clsfields)
fields.update(attrs)
new_class._clsfields = fields
return new_class
def schema(self):
return Schema(**self._clsfields)
class Schema(object):
"""Represents the collection of fields in an index. Maps field names to
FieldType objects which define the behavior of each field.
Low-level parts of the index use field numbers instead of field names for
compactness. This class has several methods for converting between the
field name, field number, and field object itself.
"""
def __init__(self, **fields):
""" All keyword arguments to the constructor are treated as fieldname =
fieldtype pairs. The fieldtype can be an instantiated FieldType object,
or a FieldType sub-class (in which case the Schema will instantiate it
with the default constructor before adding it).
For example::
s = Schema(content = TEXT,
title = TEXT(stored = True),
tags = KEYWORD(stored = True))
"""
self._fields = {}
self._dyn_fields = {}
for name in sorted(fields.keys()):
self.add(name, fields[name])
def copy(self):
"""Returns a shallow copy of the schema. The field instances are not
deep copied, so they are shared between schema copies.
"""
return self.__class__(**self._fields)
def __eq__(self, other):
return (other.__class__ is self.__class__
and list(self.items()) == list(other.items()))
def __ne__(self, other):
return not(self.__eq__(other))
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.names())
def __iter__(self):
"""Returns the field objects in this schema.
"""
return iter(self._fields.values())
def __getitem__(self, name):
"""Returns the field associated with the given field name.
"""
if name in self._fields:
return self._fields[name]
for expr, fieldtype in itervalues(self._dyn_fields):
if expr.match(name):
return fieldtype
raise KeyError("No field named %r" % (name,))
def __len__(self):
"""Returns the number of fields in this schema.
"""
return len(self._fields)
def __contains__(self, fieldname):
"""Returns True if a field by the given name is in this schema.
"""
# Defined in terms of __getitem__ so that there's only one method to
# override to provide dynamic fields
try:
field = self[fieldname]
return field is not None
except KeyError:
return False
def items(self):
"""Returns a list of ("fieldname", field_object) pairs for the fields
in this schema.
"""
return sorted(self._fields.items())
def names(self, check_names=None):
"""Returns a list of the names of the fields in this schema.
:param check_names: (optional) sequence of field names to check
whether the schema accepts them as (dynamic) field names -
acceptable names will also be in the result list.
Note: You may also have static field names in check_names, that
won't create duplicates in the result list. Unsupported names
will not be in the result list.
"""
fieldnames = set(self._fields.keys())
if check_names is not None:
check_names = set(check_names) - fieldnames
fieldnames.update(fieldname for fieldname in check_names
if fieldname in self)
return sorted(fieldnames)
def clean(self):
for field in self:
field.clean()
def add(self, name, fieldtype, glob=False):
"""Adds a field to this schema.
:param name: The name of the field.
:param fieldtype: An instantiated fields.FieldType object, or a
FieldType subclass. If you pass an instantiated object, the schema
will use that as the field configuration for this field. If you
pass a FieldType subclass, the schema will automatically
instantiate it with the default constructor.
"""
# Check field name
if name.startswith("_"):
raise FieldConfigurationError("Field names cannot start with an "
"underscore")
if " " in name:
raise FieldConfigurationError("Field names cannot contain spaces")
if name in self._fields or (glob and name in self._dyn_fields):
raise FieldConfigurationError("Schema already has a field %r"
% name)
# If the user passed a type rather than an instantiated field object,
# instantiate it automatically
if type(fieldtype) is type:
try:
fieldtype = fieldtype()
except:
e = sys.exc_info()[1]
raise FieldConfigurationError("Error: %s instantiating field "
"%r: %r" % (e, name, fieldtype))
if not isinstance(fieldtype, FieldType):
raise FieldConfigurationError("%r is not a FieldType object"
% fieldtype)
if glob:
expr = re.compile(fnmatch.translate(name))
self._dyn_fields[name] = (expr, fieldtype)
else:
fieldtype.on_add(self, name)
self._fields[name] = fieldtype
def remove(self, fieldname):
if fieldname in self._fields:
self._fields[fieldname].on_remove(self, fieldname)
del self._fields[fieldname]
elif fieldname in self._dyn_fields:
del self._dyn_fields[fieldname]
else:
raise KeyError("No field named %r" % fieldname)
def has_vectored_fields(self):
"""Returns True if any of the fields in this schema store term vectors.
"""
return any(ftype.vector for ftype in self)
def has_scorable_fields(self):
return any(ftype.scorable for ftype in self)
def stored_names(self):
"""Returns a list of the names of fields that are stored.
"""
return [name for name, field in self.items() if field.stored]
def scorable_names(self):
"""Returns a list of the names of fields that store field
lengths.
"""
return [name for name, field in self.items() if field.scorable]
def vector_names(self):
"""Returns a list of the names of fields that store vectors.
"""
return [name for name, field in self.items() if field.vector]
def separate_spelling_names(self):
"""Returns a list of the names of fields that require special handling
for generating spelling graphs... either because they store graphs but
aren't indexed, or because the analyzer is stemmed.
"""
return [name for name, field in self.items()
if field.spelling and field.separate_spelling()]
class SchemaClass(with_metaclass(MetaSchema, Schema)):
"""Allows you to define a schema using declarative syntax, similar to
Django models.py::
class MySchema(SchemaClass):
path = ID
date = DATETIME
content = TEXT
You can use inheritance to share common fields between schemas::
class Parent(SchemaClass):
path = ID(stored=True)
date = DATETIME
class Child1(Parent):
content = TEXT(positions=False)
class Child2(Parent):
tags = KEYWORD
This class overrides ``__new__`` so instantiating your sub-class always
results in an instance of ``Schema``.
>>> class MySchema(SchemaClass):
... title = TEXT(stored=True)
... content = TEXT
...
>>> s = MySchema()
>>> type(s)
<class 'whoosh.fields.Schema'>
"""
def __new__(cls, *args, **kwargs):
obj = super(Schema, cls).__new__(Schema)
kw = getattr(cls, "_clsfields", {})
kw.update(kwargs)
obj.__init__(*args, **kw)
return obj
def ensure_schema(schema):
if isinstance(schema, type) and issubclass(schema, Schema):
schema = schema.schema()
if not isinstance(schema, Schema):
raise FieldConfigurationError("%r is not a Schema" % schema)
return schema
def merge_fielddict(d1, d2):
keyset = set(d1.keys()) | set(d2.keys())
out = {}
for name in keyset:
field1 = d1.get(name)
field2 = d2.get(name)
if field1 and field2 and field1 != field2:
raise Exception("Inconsistent field %r: %r != %r"
% (name, field1, field2))
out[name] = field1 or field2
return out
def merge_schema(s1, s2):
schema = Schema()
schema._fields = merge_fielddict(s1._fields, s2._fields)
schema._dyn_fields = merge_fielddict(s1._dyn_fields, s2._dyn_fields)
return schema
def merge_schemas(schemas):
schema = schemas[0]
for i in xrange(1, len(schemas)):
schema = merge_schema(schema, schemas[i])
return schema
| [
"ubunicorn@googlemail.com"
] | ubunicorn@googlemail.com |
61a68ab16ef1c92be2bcf040f1cde0f7934ad563 | db26b08ad35594a8a2aba36a68bfe37f85728727 | /DjangoAngularAssignment/conversation/views.py | 4f355cbd6325591ee242efd26e5640a770b7f29c | [] | no_license | rafikkamal/django | 83f3217f3e31fd13a5e1c4e81ed583ccfd4971fe | 7d593707f0f33b074e22f75f9ed0a6e0ef0b39d1 | refs/heads/master | 2021-01-20T06:25:30.872254 | 2017-04-30T21:02:12 | 2017-04-30T21:02:12 | 89,877,099 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,687 | py | from django.db.models import Q
from django.shortcuts import render,redirect
from django.http import HttpResponse
from django.views import generic
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.contrib.auth import authenticate, login
from .models import Conversation
from django.views.generic import View
from .forms import UserForm
from django.contrib.auth.models import User
from .models import Conversation,Conversations
#from django.core.urlsolvers import reverse_lazy
# Create your views here.
def index(request):
if request.user.is_authenticated():
users = User.objects.all().exclude(pk=request.user.id)
#return HttpResponse(users)
return render(request, 'conversation/index.html', {'users': users})
else:
return render(request, 'conversation/login.html', {'message': 'HEllo !'})
def login_user(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return render(request, 'conversation/index.html', {'message': 'Logged in Successfully !'})
else:
return render(request, 'conversation/login.html', {'error_message': 'Your account has been disabled'})
else:
return render(request, 'conversation/login.html', {'error_message': 'Invalid login'})
return render(request, 'conversation/login.html')
class DetailView(generic.DetailView):
model = Conversation
template_name = 'conversation/detail.html'
class UserFormView(View):
form_class = UserForm
template_name = 'conversation/registration_form.html'
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('conversation:index')
return render(request, self.template_name, {'form': form})
def VarifyUser(pk):
try:
user = User.objects.get(pk=pk)
except User.DoesNotExist:
users = User.objects.all().exclude(pk=request.user.id)
return render(request, 'conversation/index.html', {'users': users, 'message': 'No Such User Found'})
return user
class ConversationListView(View):
def get(self, request, pk, *args, **kwargs):
user = VarifyUser(pk)
conversations = Conversations.objects.get(Q(created_by=request.user.id, created_for=pk) |
Q(created_by=pk, created_for=request.user.id))
#return HttpResponse(conversations)
if conversations:
conversation = Conversation.objects.filter(conversation=conversations)
return render(request, 'conversation/conversation_template.html',
{'user': user, 'conversation': conversation, 'id': conversations.id})
return render(request, 'conversation/connect.html', {'user': user, 'conversations': conversations})
class ConversationProcessor(View):
def post(self, request, pk, *args, **kwargs):
user = VarifyUser(pk)
if request.method == "POST":
message = request.POST['message']
conversations = Conversations.objects.get(Q(created_by=request.user.id, created_for=pk) |
Q(created_by=pk, created_for=request.user.id))
if str(conversations.id) == request.POST['conversation']:
conversation = Conversation()
conversation.message = message
conversation.user = request.user
conversation.conversation = conversations
conversation.save()
return redirect('conversation:conversation_connect', pk=int(pk))
else:
users = User.objects.all().exclude(pk=request.user.id)
return render(request, 'conversation/index.html', {'users': users, 'message': 'You Dont have access!!!'})
class ConversationProcessorUpdate(View):
def post(self, request, pk, *args, **kwargs):
user = VarifyUser(pk)
if request.method == "POST":
message = request.POST['message']
conversations = Conversations.objects.get(Q(created_by=request.user.id, created_for=pk) |
Q(created_by=pk, created_for=request.user.id))
if str(conversations.id) == request.POST['conversation']:
conversation = Conversation.objects.get(pk=request.POST['conversation_id'])
conversation.message = message
conversation.save()
return redirect('conversation:conversation_connect', pk=int(pk))
class ConversationProcessorDelete(View):
def post(self, request, pk, *args, **kwargs):
user = VarifyUser(pk)
if request.method == "POST":
conversations = Conversations.objects.get(Q(created_by=request.user.id, created_for=pk) |
Q(created_by=pk, created_for=request.user.id))
if str(conversations.id) == request.POST['conversation']:
conversation = Conversation.objects.get(pk=request.POST['conversation_id'])
conversation.delete()
return redirect('conversation:conversation_connect', pk=int(pk))
class ConversationProcessorArchive(View):
def post(self, request, pk, *args, **kwargs):
user = VarifyUser(pk)
if request.method == "POST":
conversations = Conversations.objects.get(Q(created_by=request.user.id, created_for=pk) |
Q(created_by=pk, created_for=request.user.id))
if str(conversations.id) == request.POST['conversation']:
conversation = Conversation.objects.get(pk=request.POST['conversation_id'])
conversation.archive = True
conversation.save()
return redirect('conversation:conversation_connect', pk=int(pk))
| [
"rafik.rkn@gmail.com"
] | rafik.rkn@gmail.com |
23eee071d0d6157fda786b1c2e638cb649ec482e | 10261f7a8864a8db3970d9e8ecc26645cf7f901c | /custom_bandits/custom_bandits/envs/custom_twoArm_env.py | f2b8aeadf05c39267ec261d52a8c36356451f023 | [] | no_license | abhikmr778/CS698R-ABHINAV-KUMAR-16907018-ASSIGN-1 | 6219bf94e86b37a55082e761ec291a788ad50351 | ff722b7d9d27fbe6571063a26e04e48efb8f10f3 | refs/heads/main | 2023-07-29T06:47:26.097700 | 2021-09-11T11:13:32 | 2021-09-11T11:13:32 | 405,250,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,723 | py | ###########################################################################
# #
# Environment Class template followed from #
# https://stable-baselines.readthedocs.io/en/master/guide/custom_env.html #
# #
###########################################################################
import gym
from gym import spaces
import numpy as np
class TwoArmBandit(gym.Env):
# metadata = {'render.modes':['human']}
def __init__(self, alpha, beta, seed):
super(TwoArmBandit, self).__init__()
N_DISCRETE_ACTIONS = 2
LEFT = 0
RIGHT = 1
N_DISCRETE_STATES = 3
self.alpha = alpha
self.beta = beta
self.action_space = spaces.Discrete(N_DISCRETE_ACTIONS)
self.observation_space = spaces.Discrete(N_DISCRETE_STATES)
# Data structure to store MDP of 2-arm Bernoulli Bandit
self.P = {}
self.P[0] = {
LEFT: [[self.alpha, 1, 0, True], [1-self.alpha, 2, 1, True]],
RIGHT: [[self.beta, 2, 1, True], [1-self.beta, 1, 0, True]]
}
self.P[1] = {
LEFT: [[1,1,0,True]],
RIGHT: [[1,1,0,True]]
}
self.P[2] = {
LEFT: [[1,2,0,True]],
RIGHT: [[1,2,0,True]]
}
self.q_value = np.array([1-self.alpha,self.beta])
self.agent_position = self.reset()
self.seed(seed)
def step(self, action):
# get transition from MDP dynamics
probabilities = []
next_states = []
rewards = []
dones = []
# sample transition according to the probabilities of transition function
for dynamic in self.P[self.agent_position][action]:
probabilities.append(dynamic[0])
idx = [i for i in range(len(self.P[self.agent_position][action]))]
j = int(np.random.choice(a=idx,size=1,p=probabilities))
_, observation, reward, done = self.P[self.agent_position][action][j]
# update agent's position
self.agent_position = observation
info = {}
return observation, reward, done, info
def reset(self):
self.agent_position = 0
return self.agent_position # reward, done, info can't be included
def seed(self, seed=0):
np.random.seed(seed)
def render(self, mode='human'):
raise NotImplementedError
def close (self):
raise NotImplementedError
| [
"abhi.kumar778@gmail.com"
] | abhi.kumar778@gmail.com |
46276e90877480976304b67321686d3487c1d5fe | a4fd0e2674264327b50cc913312d959043610ef9 | /My_Social_Project/My_Social_Project/urls.py | 867f51eecae02f6db439256148b143bc629e1ddb | [] | no_license | ztarikul/InstaShohor-Social-Media-Site | 7dedc88ecdffd26501063935ac448482613e3dfd | 08d56d1c185435997930a4a7d8b768442acd4be9 | refs/heads/master | 2023-02-13T12:28:25.359635 | 2021-01-09T23:32:03 | 2021-01-09T23:32:03 | 328,261,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.contrib.staticfiles.urls import static, staticfiles_urlpatterns
from App_Posts import views
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('App_Login.urls')),
path('post/', include('App_Posts.urls')),
path('', views.home, name='home'),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"ztarikulislam@gmail.com"
] | ztarikulislam@gmail.com |
cbf504b8cd17c0a67f5038d1dce21a3000e0629c | 6366498f7fcaf6c73c34fb0428bbbb899c8a8703 | /Math/ProjectEuler005.py | 1378b169df4cb2a308f4608ae22d4592a5586cc2 | [] | no_license | bibhuty-did-this/MySolutions | 2a3da0369cec5ced00c167d80008b2c11e8edbd5 | 4c6cad6853ac5626ada4302dd4770ec0cbcc99f3 | refs/heads/master | 2023-06-14T08:38:29.220996 | 2023-06-01T04:49:05 | 2023-06-01T04:49:05 | 102,737,994 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | # Algorithm:
# You have to find the lcm of all the numbers combinedly
# Generate a lookup table
from fractions import gcd
lcm=[]
lcm.append(1)
for i in range(1,41):
lcm.append(((lcm[i-1]*i)/gcd(lcm[i-1],i)))
# Display the result
for _ in range(int(raw_input())):
print lcm[int(raw_input())] | [
"emailofpanda@yahoo.com"
] | emailofpanda@yahoo.com |
7824b29dc495b887680fcb309cd946bcb8051fb5 | 34229ca69dda0acf53daba3a9696f93ae341d226 | /lab/lab01/lab01.py | 82ec65a20af4baf650c6642bf906bf0d4aaea5b9 | [] | no_license | Unc1eWu/CS61A | 60687f0ac3722997af9d3aef4c1190611942197c | 18a379a30b2d16629f649e091623892dc2c15df7 | refs/heads/master | 2020-04-29T16:59:55.469735 | 2019-09-22T09:08:29 | 2019-09-22T09:08:29 | 176,282,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | """Lab 1: Expressions and Control Structures"""
# Q3
def sum_digits(n):
"""Sum all the digits of n.
>>> sum_digits(10) # 1 + 0 = 1
1
>>> sum_digits(4224) # 4 + 2 + 2 + 4 = 12
12
>>> sum_digits(1234567890)
45
"""
"*** YOUR CODE HERE ***"
sum = 0
while n > 0:
sum += n % 10
n = n // 10
return sum
| [
"Weijie.Wu17@student.xjtlu.edu.cn"
] | Weijie.Wu17@student.xjtlu.edu.cn |
d0e79b2550921ec82c01575be4ab9c242dc8b522 | 0313d27c61d83f4cc783611d23cd11dd9af20d12 | /flyback.py | 4ec898bebf9c00c54e9346439ae546d11eceda39 | [
"MIT"
] | permissive | di2mot/Flyback_registrator | fe45e3fa5701e8550f21a274276a157e2d38c709 | 1cd887b16bf31b60f0cbb8166fc813dd96c812da | refs/heads/main | 2023-03-01T20:46:15.724438 | 2021-02-08T19:20:32 | 2021-02-08T19:20:32 | 337,182,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,310 | py | from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
import time
'''
СУПЕРТ ТУПАЯ ПРОГА для регистрации на форуме flyback.org.ru
Да, решить просто, но на тот момент я не знал методов расчёта, но умел в Python.
Ниже решение за 1 минуту.
Для условия типа: Даны резисторыR1=40ом R2=310ом R3=300ом.
все элементы схемы идеальны, падение напряжение на диоде равно нулю
Входное напряжение: синусоидальное 50гц, с действующим значением 54В.
Найти до какого значения зарядится конденсатор.
Решение: 54В * (R2 / (R1 + R2)) = 47.83
ДА, всё максимально просто. Хотите сложнее? Запустите скрипт)))
'''
text = {
'username': 'you_login',
'email': 'you_email',
'bday_day': '1',
'bday_year': '1970',
'new_password': 'strongPassword',
'password_confirm': 'strongPassword',
}
# если нужно найти вольтаж, то начните с близкого к этому значению, если энергию, то с 0.01
volt = 40.00
start = time.time()
link = 'http://flyback.org.ru/user.php?mode=register&agreed=true'
# Мне было лень думать над первым селектором
selector_0 : str = 'body > table:nth-child(6) > tbody > tr > td > form > table.forumline > tbody > tr:nth-child(7) > td:nth-child(2)'
selector_1 : str = 'username'
selector_2 : str = 'email'
selector_3 : str = 'bday_day'
selector_4 : str = 'bday_month'
selector_5 : str = '6'
selector_6 : str = 'bday_year'
selector_7 : str = 'new_password'
selector_8 : str = 'password_confirm'
selector_9 : str = 'answer'
selector_10 : str = 'apply'
selector_11: str = 'submit'
# Ну так калечно
run = True
try:
# Это с отключённым графическим режимом
chrome_options = Options()
chrome_options.add_argument("--headless")
#стандартная инициализация
browser = webdriver.Chrome(options=chrome_options)
# Это для графичес кого режима
# browser = webdriver.Chrome()
browser.get(link)
# тут получаю текст задания что бы было
task = browser.find_element_by_css_selector(selector_0)
print(task.text)
# вставляю текст в форму
username = browser.find_element_by_name(selector_1)
username.send_keys(text['username'])
# вставляю текст в форму
email = browser.find_element_by_name(selector_2)
email.send_keys(text['email'])
# вставляю текст в форму
bday_day = browser.find_element_by_name(selector_3)
bday_day.clear()
bday_day.send_keys(text['bday_day'])
# выбираем месяц
bday_day = Select(browser.find_element_by_name(selector_4))
bday_day.select_by_value(selector_5)
# вставляю текст в форму
bday_year = browser.find_element_by_name(selector_6)
bday_year.clear()
bday_year.send_keys(text['bday_year'])
# выбираю параметр
apply = browser.find_element_by_name(selector_10)
apply.click()
while run:
# вставляю текст в форму
new_password = browser.find_element_by_name(selector_7)
new_password.send_keys(text['new_password'])
# вставляю текст в форму
password_confirm = browser.find_element_by_name(selector_8)
password_confirm.send_keys(text['password_confirm'])
# вставляю текст в форму
answer = browser.find_element_by_name(selector_9)
answer.clear()
# что бы адекватно округляло
imp_answer = "{0:0.2f}".format(volt)
answer.send_keys(imp_answer)
# клик по кнопке
button = browser.find_element_by_name(selector_11)
button.click()
try:
findText = browser.find_element_by_tag_name('h1')
if findText.text == 'Неверный ответ':
volt += 0.01
browser.back()
else:
print(volt, imp_answer)
run = False
except NoSuchElementException:
print(volt, imp_answer)
except NoSuchElementException:
print('Finish!')
finally:
# А тут чисто что бы глянуть результат
# получаем исходный код
html = browser.page_source
# Пишим в файл. Не надо? Закомитить
with open('page.html', 'w') as file:
file.write(html)
# закрываем браузер после всех манипуляций
browser.quit()
fin = time.time()
rez_time = fin - start
# Просто оценить потраченное время
print(f'Finish!\n It took:\n second: {rez_time}\n minutes: {rez_time/60}\n hour: {rez_time/3600}') | [
"motsar.dm@gmail.com"
] | motsar.dm@gmail.com |
ff1fd7cc8123d0a5423c973f1719f3e816799522 | 2fe08f181c53b7d94c5e97f08016b86705488594 | /app.py | c2170f933f0edeab3758e05f01956f0539cd387b | [] | no_license | KidoThunder/ExportXiamiList | 521871639e67bd2cc235de46ce4078610bb44e65 | b769740e4613f58ff29381f1e8865adbf6368ac2 | refs/heads/master | 2021-06-16T20:00:49.713299 | 2017-04-24T06:40:18 | 2017-04-24T06:40:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,606 | py | # python3
# author: fyl00
# source: https://github.com/fyl00/ExportXiamiList
import logging
import re
import sys
from PyQt5.QtCore import QObject, QThread, pyqtSignal
from PyQt5.QtGui import QTextCursor, QIcon
from PyQt5.QtWidgets import QMainWindow, QApplication, QMessageBox, QFileDialog
from lxml import etree
from XiamiList.tips import *
from XiamiList.xiami import XiamiHandle, XiamiLink
from ui import Ui_MainWindow
import images_qr
# 打印输出到 logTextEdit
class QtLogHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
def emit(self, record):
record = self.format(record)
if record:
EmittingStream.stdout().write('%s\n' % record)
class EmittingStream(QObject):
_stdout = None
_stderr = None
textWritten = pyqtSignal(str)
def write(self, text):
if not self.signalsBlocked():
self.textWritten.emit(str(text))
def flush(self):
pass
def fileno(self):
return -1
@staticmethod
def stdout():
if not EmittingStream._stdout:
EmittingStream._stdout = EmittingStream()
sys.stdout = EmittingStream._stdout
return EmittingStream._stdout
@staticmethod
def stderr():
if not EmittingStream._stderr:
EmittingStream._stderr = EmittingStream()
sys.stderr = EmittingStream._stderr
return EmittingStream._stderr
# 后台抓取,防止界面未响应
class XiamiThread(QThread):
finished = pyqtSignal(str)
def __init__(self, url):
QThread.__init__(self)
self.url = url
def run(self):
xmlstr = XiamiHandle().get_list(self.url)
self.finished.emit(xmlstr)
# 界面窗口
class AppWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self._enbale_source_link()
self.ui.startButton.clicked.connect(self.click_start_button)
EmittingStream.stdout().textWritten.connect(self._logout)
EmittingStream.stderr().textWritten.connect(self._logout)
self.ui.linkLineEdit.setPlaceholderText("请输入歌单链接")
self.ui.linkLineEdit.setFocus()
self._logout(GET_LINK)
# Storing a reference to the thread after it's been created
# http://stackoverflow.com/questions/15702782/qthread-destroyed-while-thread-is-still-running
self.threads = []
def click_start_button(self):
url = self.ui.linkLineEdit.text()
if not self._check_url(url):
return
thread = XiamiThread(url)
self.threads.append(thread)
thread.finished.connect(self._task_finished)
thread.start()
self.ui.startButton.setDisabled(True)
def _check_url(self, url):
link = XiamiLink(url)
if link.is_collect is None:
title = "链接格式错误"
QMessageBox.critical(self, title, LINK_ERROR_TIPS)
return False
return True
def _task_finished(self, value):
self._save_xml(value)
self.ui.startButton.setDisabled(False)
def _enbale_source_link(self):
link_text = "源码:<a href='https://github.com/fyl00/ExportXiamiList'>GitHub</a>"
self.ui.sourceLabel.setText(link_text)
self.ui.sourceLabel.setOpenExternalLinks(True)
def _logout(self, outstr):
cursor = self.ui.logTextEdit.textCursor()
cursor.insertText(outstr)
self.ui.logTextEdit.moveCursor(QTextCursor.End)
def _save_xml(self, xmlstr):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
filename, _ = QFileDialog.getSaveFileName(self, "QFileDialog.getSaveFileName()",
"songs.kgl", "Kugou/Netease Files (*.kgl)",
options=options)
if filename:
r = re.search("\.kgl$", filename)
if not r:
filename = "%s.kgl" % filename
print("** 导出文件位置:%s" % filename)
root = etree.fromstring(xmlstr)
etree.ElementTree(root).write(filename,
xml_declaration=True,
encoding="utf8",
pretty_print=True)
if __name__ == "__main__":
app = QApplication(sys.argv)
window = AppWindow()
app.setWindowIcon(QIcon(':/static/favicon.ico'))
window.show()
sys.exit(app.exec_())
| [
"fyl00lee@gmail.com"
] | fyl00lee@gmail.com |
75f74b2bca9e9f3323f53b9941cda59e24b9064e | 7f7898939786f7dae5693d66d35149b177b06a34 | /mgw7510/forms.py | 4390a9bff113b0c16f4699d6b0a82354e60ef680 | [
"Apache-2.0"
] | permissive | gter1216/mgw7510-web | f8a6ef2cd3793fdafadf28ba6ec4d4493db69a7c | 758e9053b1071655b5e23f714a82f8698cb562e6 | refs/heads/master | 2021-01-12T07:45:40.679559 | 2017-02-03T13:45:04 | 2017-02-03T13:45:04 | 77,008,390 | 2 | 5 | null | 2017-03-01T04:55:55 | 2016-12-21T02:25:14 | HTML | UTF-8 | Python | false | false | 2,143 | py | from django.forms import ModelForm
from mgw7510.models import WebUser
from django import forms
# inherit from class ModelForm
class WebUserForm(ModelForm):
# username = forms.EmailField(error_messages="user name should not be empty")
# password = forms.CharField(error_messages="password should not be empty")
# password is an optional field
password = forms.CharField(required=False)
confirmPassword = forms.CharField(required=False)
newPassword = forms.CharField(required=False)
confirmNewPassword = forms.CharField(required=False)
userWorkDir = forms.CharField(required=False)
pakServerIp = forms.GenericIPAddressField(required=False)
pakServerUsername = forms.CharField(required=False)
pakServerPasswd = forms.CharField(required=False)
pakServerFp = forms.CharField(required=False)
seedVMIp = forms.GenericIPAddressField(required=False)
seedVMUsername = forms.CharField(required=False)
seedVMPasswd = forms.CharField(required=False)
seedVMOpenrcAbsPath = forms.CharField(required=False)
seedVMKeypairAbsPath = forms.CharField(required=False)
yactServerIp = forms.GenericIPAddressField(required=False)
yactServerUsername = forms.CharField(required=False)
yactServerPasswd = forms.CharField(required=False)
yactServerDIFAbsPath = forms.CharField(required=False)
yactServerYactAbsPath = forms.CharField(required=False)
userInputFile = forms.FileField(label='Select a file',
required=False)
tmpPath = forms.CharField(required=False)
userInputFileName = forms.CharField(required=False)
progressBarData = forms.CharField(required=False)
userInputUploadedFlag = forms.CharField(required=False)
ceDeployState = forms.CharField(required=False)
ceSelectRel = forms.CharField(required=False)
ceSelectPak = forms.CharField(required=False)
ceDeployProcess = forms.CharField(required=False)
swImageName = forms.CharField(required=False)
class Meta:
model = WebUser # inherit form WebUser
fields = '__all__'
| [
"xuxiao1216@126.com"
] | xuxiao1216@126.com |
64764f6b4a24d45418ef96845068613a38b12ef6 | 921e479e9fe4f72c0621207b262f7464b6812d9b | /ants.py | c2ba7b96cbf5bdf1c537ec32ec1f333bdb5b6ce0 | [] | no_license | ajai-sharma-backup/ants | af60fc671ffe7c9eede2002189906e6095defc96 | 9df7ebc8907132415d088302e9a9fe2f0bca2116 | refs/heads/master | 2021-06-13T17:52:18.431694 | 2015-06-02T21:17:37 | 2015-06-02T21:17:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,763 | py | """The ants module implements game logic for Ants Vs. SomeBees."""
# Name: Ajai Sharma
# Email: ajai.sharma@gmail.com
import random
import sys
from ucb import main, interact, trace
from collections import OrderedDict
################
# Core Classes #
################
class Place:
"""A Place holds insects and has an exit to another Place."""
def __init__(self, name, exit=None):
"""Create a Place with the given exit.
name -- A string; the name of this Place.
exit -- The Place reached by exiting this Place (may be None).
"""
self.name = name
self.exit = exit
self.bees = [] # A list of Bees
self.ant = None # An Ant
self.entrance = None # A Place
# Phase 1: Add an entrance to the exit
if self.exit is not None:
self.exit.entrance = self
def add_insect(self, insect):
"""Add an Insect to this Place.
There can be at most one Ant in a Place, unless exactly one of them is
a BodyguardAnt (Phase 2), in which case there can be two. If add_insect
tries to add more Ants than is allowed, an assertion error is raised.
There can be any number of Bees in a Place.
"""
if insect.is_ant():
# Phase 2: Special handling for BodyguardAnt
if self.ant:
if self.ant.can_contain(insect):
self.ant.contain_ant(insect)
elif insect.can_contain(self.ant):
insect.contain_ant(self.ant)
self.ant = insect
else:
assert self.ant is None, 'Two ants in {0}'.format(self)
else: self.ant = insect
else:
self.bees.append(insect)
insect.place = self
def remove_insect(self, insect):
"""Remove an Insect from this Place."""
if not insect.is_ant():
self.bees.remove(insect)
elif type(insect) == QueenAnt and insect.armor > 0:
return
else:
assert self.ant == insect, '{0} is not in {1}'.format(insect, self)
if self.ant.container and self.ant.ant:
self.ant = self.ant.ant
else: self.ant = None
insect.place = None
def __str__(self):
return self.name
class Insect:
"""An Insect, the base class of Ant and Bee, has armor and a Place."""
watersafe = False
def __init__(self, armor, place=None):
"""Create an Insect with an armor amount and a starting Place."""
self.armor = armor
self.place = place # set by Place.add_insect and Place.remove_insect
def reduce_armor(self, amount):
"""Reduce armor by amount, and remove the insect from its place if it
has no armor remaining.
>>> test_insect = Insect(5)
>>> test_insect.reduce_armor(2)
>>> test_insect.armor
3
"""
self.armor -= amount
if self.armor <= 0:
game_print('{0} ran out of armor and expired'.format(self))
self.place.remove_insect(self)
def action(self, colony):
"""Perform the default action that this Insect takes each turn.
colony -- The AntColony, used to access game state information.
"""
def is_ant(self):
"""Return whether this Insect is an Ant."""
return False
def __repr__(self):
cname = type(self).__name__
return '{0}({1}, {2})'.format(cname, self.armor, self.place)
class Bee(Insect):
"""A Bee moves from place to place, following exits and stinging ants."""
name = 'Bee'
watersafe = True
def sting(self, ant):
"""Attack an Ant, reducing the Ant's armor by 1."""
ant.reduce_armor(1)
def move_to(self, place):
"""Move from the Bee's current Place to a new Place."""
self.place.remove_insect(self)
place.add_insect(self)
def blocked(self):
"""Return True if this Bee cannot advance to the next Place."""
# Phase 2: Special handling for NinjaAnt
return self.place.ant and self.place.ant.blocks_path
def action(self, colony):
"""A Bee's action stings the Ant that blocks its exit if it is blocked,
or moves to the exit of its current place otherwise.
colony -- The AntColony, used to access game state information.
"""
if self.blocked():
self.sting(self.place.ant)
elif self.place is not colony.hive and self.armor > 0:
self.move_to(self.place.exit)
class Ant(Insect):
"""An Ant occupies a place and does work for the colony."""
implemented = False # Only implemented Ant classes should be instantiated
damage = 0
food_cost = 0
container = False
blocks_path = True
def __init__(self, armor=1):
"""Create an Ant with an armor quantity."""
Insect.__init__(self, armor)
def is_ant(self):
return True
def can_contain(self, other):
return self.container and self.ant is None and not other.container
class HarvesterAnt(Ant):
"""HarvesterAnt produces 1 additional food per turn for the colony."""
name = 'Harvester'
food_cost = 2
implemented = True
def action(self, colony):
"""Produce 1 additional food for the colony.
colony -- The AntColony, used to access game state information.
"""
colony.food += 1
def random_or_none(l):
"""Return a random element of list l, or return None if l is empty."""
return random.choice(l) if l else None
class ThrowerAnt(Ant):
"""ThrowerAnt throws a leaf each turn at the nearest Bee in its range."""
name = 'Thrower'
implemented = True
damage = 1
food_cost = 4
min_range = 0
max_range = 10
def nearest_bee(self, hive):
"""Return the nearest Bee in a Place that is not the Hive, connected to
the ThrowerAnt's Place by following entrances.
This method returns None if there is no such Bee.
Problem B5: This method returns None if there is no Bee in range.
"""
current_place = self.place
distance = 0
while current_place != hive and distance <= self.max_range:
if current_place.bees and distance >= self.min_range:
return random_or_none(current_place.bees)
current_place = current_place.entrance
distance += 1
def throw_at(self, target):
"""Throw a leaf at the target Bee, reducing its armor."""
if target is not None:
target.reduce_armor(self.damage)
def action(self, colony):
"""Throw a leaf at the nearest Bee in range."""
self.throw_at(self.nearest_bee(colony.hive))
class Hive(Place):
"""The Place from which the Bees launch their assault.
assault_plan -- An AssaultPlan; when & where bees enter the colony.
"""
def __init__(self, assault_plan):
self.name = 'Hive'
self.assault_plan = assault_plan
self.bees = []
for bee in assault_plan.all_bees:
self.add_insect(bee)
# The following attributes are always None for a Hive
self.entrance = None
self.ant = None
self.exit = None
def strategy(self, colony):
exits = [p for p in colony.places.values() if p.entrance is self]
for bee in self.assault_plan.get(colony.time, []):
bee.move_to(random.choice(exits))
class AntColony:
"""An ant collective that manages global game state and simulates time.
Attributes:
time -- elapsed time
food -- the colony's available food total
queen -- the place where the queen resides
places -- A list of all places in the colony (including a Hive)
bee_entrances -- A list of places that bees can enter
"""
def __init__(self, strategy, hive, ant_types, create_places, food=2):
"""Create an AntColony for simulating a game.
Arguments:
strategy -- a function to deploy ants to places
hive -- a Hive full of bees
ant_types -- a list of ant constructors
create_places -- a function that creates the set of places
"""
self.time = 0
self.food = food
self.strategy = strategy
self.hive = hive
self.ant_types = OrderedDict((a.name, a) for a in ant_types)
self.configure(hive, create_places)
def configure(self, hive, create_places):
"""Configure the places in the colony."""
self.queen = Place('AntQueen')
self.places = OrderedDict()
self.bee_entrances = []
def register_place(place, is_bee_entrance):
self.places[place.name] = place
if is_bee_entrance:
place.entrance = hive
self.bee_entrances.append(place)
register_place(self.hive, False)
create_places(self.queen, register_place)
def simulate(self):
"""Simulate an attack on the ant colony (i.e., play the game)."""
while len(self.queen.bees) == 0 and len(self.bees) > 0:
self.hive.strategy(self) # Bees invade
self.strategy(self) # Ants deploy
for ant in self.ants: # Ants take actions
if ant.armor > 0:
ant.action(self)
for bee in self.bees: # Bees take actions
if bee.armor > 0:
bee.action(self)
self.time += 1
if len(self.queen.bees) > 0:
game_print('The ant queen has perished. Please try again.')
else:
game_print('All bees are vanquished. You win!')
def deploy_ant(self, place_name, ant_type_name):
"""Place an ant if enough food is available.
This method is called by the current strategy to deploy ants.
"""
constructor = self.ant_types[ant_type_name]
if self.food < constructor.food_cost:
game_print('Not enough food remains to place ' + ant_type_name)
else:
self.places[place_name].add_insect(constructor())
self.food -= constructor.food_cost
def remove_ant(self, place_name):
"""Remove an Ant from the Colony."""
place = self.places[place_name]
if place.ant is not None:
place.remove_insect(place.ant)
@property
def ants(self):
return [p.ant for p in self.places.values() if p.ant is not None]
@property
def bees(self):
return [b for p in self.places.values() for b in p.bees]
@property
def insects(self):
return self.ants + self.bees
def __str__(self):
status = ' (Food: {0}, Time: {1})'.format(self.food, self.time)
return str([str(i) for i in self.ants + self.bees]) + status
def ant_types():
"""Return a list of all implemented Ant classes."""
all_ant_types = []
new_types = [Ant]
while new_types:
new_types = [t for c in new_types for t in c.__subclasses__()]
all_ant_types.extend(new_types)
return [t for t in all_ant_types if t.implemented]
def interactive_strategy(colony):
"""A strategy that starts an interactive session and lets the user make
changes to the colony.
For example, one might deploy a ThrowerAnt to the first tunnel by invoking:
colony.deploy_ant('tunnel_0_0', 'Thrower')
"""
game_print('colony: ' + str(colony))
msg = '<Control>-D (<Control>-Z <Enter> on Windows) completes a turn.\n'
interact(msg)
def start_with_strategy(args, strategy):
"""Reads command-line arguments and starts Ants vs. SomeBees with those
options."""
import argparse
parser = argparse.ArgumentParser(description="Play Ants vs. SomeBees")
parser.add_argument('-t', '--ten', action='store_true',
help='start with ten food')
parser.add_argument('-f', '--full', action='store_true',
help='loads a full layout and assault plan')
parser.add_argument('-w', '--water', action='store_true',
help='loads a full layout with water')
parser.add_argument('-i', '--insane', action='store_true',
help='loads a difficult assault plan')
args = parser.parse_args()
assault_plan = make_test_assault_plan()
layout = test_layout
food = 2
if args.ten:
food = 10
if args.full:
assault_plan = make_full_assault_plan()
layout = dry_layout
if args.water:
layout = mixed_layout
if args.insane:
assault_plan = make_insane_assault_plan()
hive = Hive(assault_plan)
AntColony(strategy, hive, ant_types(), layout, food).simulate()
###########
# Layouts #
###########
def mixed_layout(queen, register_place, length=8, tunnels=3, moat_frequency=3):
"""Register Places with the colony."""
for tunnel in range(tunnels):
exit = queen
for step in range(length):
if moat_frequency != 0 and (step + 1) % moat_frequency == 0:
exit = Water('water_{0}_{1}'.format(tunnel, step), exit)
else:
exit = Place('tunnel_{0}_{1}'.format(tunnel, step), exit)
register_place(exit, step == length - 1)
def test_layout(queen, register_place, length=8, tunnels=1):
mixed_layout(queen, register_place, length, tunnels, 0)
def dry_layout(queen, register_place, length=8, tunnels=3):
mixed_layout(queen, register_place, length, tunnels, 0)
#################
# Assault Plans #
#################
class AssaultPlan(dict):
"""The Bees' plan of attack for the Colony. Attacks come in timed waves.
An AssaultPlan is a dictionary from times (int) to waves (list of Bees).
>>> AssaultPlan().add_wave(4, 2)
{4: [Bee(3, None), Bee(3, None)]}
"""
def __init__(self, bee_armor=3):
self.bee_armor = bee_armor
def add_wave(self, time, count):
"""Add a wave at time with count Bees that have the specified armor."""
bees = [Bee(self.bee_armor) for _ in range(count)]
self.setdefault(time, []).extend(bees)
return self
@property
def all_bees(self):
"""Place all Bees in the hive and return the list of Bees."""
return [bee for wave in self.values() for bee in wave]
def make_test_assault_plan():
return AssaultPlan().add_wave(2, 1).add_wave(3, 1)
def make_full_assault_plan():
plan = AssaultPlan().add_wave(2, 1)
for time in range(3, 15, 2):
plan.add_wave(time, 1)
return plan.add_wave(15, 8)
def make_insane_assault_plan():
plan = AssaultPlan(4).add_wave(1, 2)
for time in range(3, 15):
plan.add_wave(time, 1)
return plan.add_wave(15, 20)
##############
# Extensions #
##############
class Water(Place):
"""Water is a place that can only hold 'watersafe' insects."""
def add_insect(self, insect):
"""Add insect if it is watersafe, otherwise reduce its armor to 0."""
game_print('added', insect, insect.watersafe)
super().add_insect(insect)
if not insect.watersafe:
insect.reduce_armor(insect.armor)
class FireAnt(Ant):
"""FireAnt cooks any Bee in its Place when it expires."""
name = 'Fire'
damage = 3
food_cost = 4
implemented = True
def reduce_armor(self, amount):
if amount >= self.armor:
for bee in self.place.bees[:]:
bee.reduce_armor(self.damage)
super().reduce_armor(amount)
class LongThrower(ThrowerAnt):
"""A ThrowerAnt that only throws leaves at Bees at least 4 places away."""
name = 'Long'
food_cost = 3
min_range = 4
implemented = True
class ShortThrower(ThrowerAnt):
"""A ThrowerAnt that only throws leaves at Bees within 3 places."""
name = 'Short'
food_cost = 3
max_range = 2
implemented = True
class WallAnt(Ant):
"""WallAnt is an Ant which has a large amount of armor."""
name = 'Wall'
food_cost = 4
implemented = True
def __init__(self):
Ant.__init__(self)
self.armor = 4
class NinjaAnt(Ant):
"""NinjaAnt is an Ant which does not block the path and does 1 damage to
all Bees in the exact same Place."""
name = 'Ninja'
food_cost = 6
blocks_path = False
damage = 1
implemented = True
def action(self, colony):
#list(map(lambda bee: getattr(bee, 'reduce_armor')(self.damage), self.place.bees))
print(self.place.bees)
for bee in self.place.bees[:]:
bee.reduce_armor(self.damage)
class ScubaThrower(ThrowerAnt):
"""ScubaThrower is a ThrowerAnt which is watersafe."""
name = 'Scuba'
food_cost = 5
watersafe = True
implemented = True
class HungryAnt(Ant):
"""HungryAnt will take three "turns" to eat a Bee in the same space as it.
While eating, the HungryAnt can't eat another Bee.
"""
name = 'Hungry'
food_cost = 4
time_to_digest = 3
implemented = True
def __init__(self):
Ant.__init__(self)
self.digesting = 0
def eat_bee(self, bee):
bee.reduce_armor(bee.armor)
self.digesting = self.time_to_digest
def action(self, colony):
if self.digesting > 0:
self.digesting -= 1
elif self.place.bees:
self.eat_bee(random_or_none(self.place.bees))
class BodyguardAnt(Ant):
"""BodyguardAnt provides protection to other Ants."""
name = 'Bodyguard'
food_cost = 4
armor = 2
container = True
implemented = True
def __init__(self):
Ant.__init__(self, 2)
self.ant = None # The Ant hidden in this bodyguard
def contain_ant(self, ant):
self.ant = ant
def action(self, colony):
if self.ant: self.ant.action(colony)
class QueenPlace:
"""A place that represents both places in which the bees find the queen.
(1) The original colony queen location at the end of all tunnels, and
(2) The place in which the QueenAnt resides.
"""
def __init__(self, colony_queen, ant_queen):
self.colony_queen = colony_queen
self.ant_queen = ant_queen
@property
def bees(self):
return self.colony_queen.bees + self.ant_queen.bees
class QueenAnt(ScubaThrower):
"""The Queen of the colony. The game is over if a bee enters her place."""
name = 'Queen'
implemented = True
food_cost = 6
imposter = False
def __init__(self):
ScubaThrower.__init__(self)
self.doubled_ants = [self]
if QueenAnt.imposter:
self.armor = 0
QueenAnt.imposter = True
def buff_ant(self, place):
ant_list = [place.ant, place.ant.ant if place.ant and place.ant.container else None]
for ant in ant_list:
if ant and ant not in self.doubled_ants:
self.doubled_ants.append(ant)
ant.damage *= 2
def action(self, colony):
"""A queen ant throws a leaf, but also doubles the damage of ants
in her tunnel. Impostor queens do only one thing: die."""
def iterate_over_tunnel(start, action, direction='both'):
'''Performs an action at every place in the tunnel of the
starting place'''
action(start)
if direction == 'both':
for direction in ('entrance', 'exit'):
next_place = getattr(start, direction)
if next_place: iterate_over_tunnel(next_place, action, direction)
else:
next_place = getattr(start, direction)
if next_place: iterate_over_tunnel(next_place, action, direction)
if type(colony.queen) != QueenPlace:
colony.queen = QueenPlace(colony.queen, self.place)
if self.armor > 0:
iterate_over_tunnel(self.place, self.buff_ant)
super().action(colony)
class AntRemover(Ant):
"""Allows the player to remove ants from the board in the GUI."""
name = 'Remover'
implemented = True
def __init__(self):
Ant.__init__(self, 0)
##################
# Status Effects #
##################
def make_slow(action):
"""Return a new action method that calls action every other turn.
action -- An action method of some Bee
"""
def slowed(colony):
if colony.time % 2 == 0:
action(colony)
return slowed
def make_stun(action):
"""Return a new action method that does nothing.
action -- An action method of some Bee
"""
def stunned(colony):
pass
return stunned
def apply_effect(effect, bee, duration):
"""Apply a status effect to a Bee that lasts for duration turns."""
timer = duration
original = bee.action
altered = effect(original)
def timed_effect(colony):
nonlocal timer
if timer == 0:
bee.action = original
bee.action(colony)
else:
altered(colony)
timer -= 1
bee.action = timed_effect
class SlowThrower(ThrowerAnt):
"""ThrowerAnt that causes Slow on Bees."""
name = 'Slow'
food_cost = 4
implemented = True
def throw_at(self, target):
if target:
apply_effect(make_slow, target, 3)
class StunThrower(ThrowerAnt):
"""ThrowerAnt that causes Stun on Bees."""
name = 'Stun'
food_cost = 6
implemented = True
def throw_at(self, target):
if target:
apply_effect(make_stun, target, 1)
def game_print(*args, **kargs):
print(*args, **kargs)
@main
def run(*args):
start_with_strategy(args, interactive_strategy)
| [
"ajai.sharma@gmail.com"
] | ajai.sharma@gmail.com |
a6f5732bacf0acd92492666f0a0fa0795679cf6c | e19ddbc01a0a8f6295a106ff969da9f4a9ffd408 | /MIT_OCW/Textbook Finger Exercises/2-4-2_Largeset_Odd_Out_of_10.py | cfb1449cf4720b493d02eb99650255d70ff9f6f2 | [] | no_license | Nmeece/MIT_OpenCourseware_Python | 8860049c48090646fc9e98e469047cc5a0b171f1 | 69aecd3e9b448bc5ce2b98e46d72d65b35e212b5 | refs/heads/master | 2023-04-24T07:48:31.216771 | 2021-05-02T17:38:10 | 2021-05-02T17:38:10 | 318,314,220 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 11:44:55 2020
Second Finger Exercise of section 2.4 of the Introduction to Computation and Programming Using Python textbook.
Prompt:
Write a program that asks the user to input 10 integers, and then prints the largest odd number that was entered.
If no odd number was entered, it should print a message to that effect.
@author: Nygel
"""
numInts = 0
List = []
Big = 0
evenCount = 0
while numInts != 10:
numIn = int(input("Enter value number: "))
List.append(numIn)
numInts += 1
for n in List:
if n % 2 != 0 and n > Big:
Big = n
elif n % 2 == 0:
evenCount += 1
else:
break
if evenCount == 10:
print("All Even")
else:
print("The largest odd number is:", Big) | [
"nygelmeece8@gmail.com"
] | nygelmeece8@gmail.com |
c8611799a1e24a658b75c368489cc107c804783e | d8ac9944ff402ec9f0bea82213a8418982c16f39 | /python/Tushare_API_Scripts/DataFrame写入PG数据库.py | 97b330f3c46b13c7ee8b6c6d9117eb63e282fef7 | [] | no_license | zzgbird/PyAndShell_Learning | cdc50d23ac0d05dcb9293171bb3a9946485766d3 | 9f0f7b3a39d2ad8a6a5211dd612d805dac5de64c | refs/heads/master | 2023-03-27T11:30:35.245342 | 2021-03-21T07:02:59 | 2021-03-21T07:02:59 | 300,139,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,244 | py | # DataFrame写入PG数据库
# 提前创建df数据集并安装psycopg2模块
pip install psycopg2
# 1. 使用pandas的to_sql导入
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine('postgresql://postgres:ican00@127.0.0.1:5432/stockam')
start = datetime.datetime.now()
pd.io.sql.to_sql(df, 'stock_basic', engine,index= False, schema='ods',if_exists='append')
# fail 如果表存在,啥也不做
# replace 如果表存在,删了表,再建立一个新表,把数据插入
# append 如果表存在,把数据插入,如果表不存在创建一个表!!
end = datetime.datetime.now()
print('time cost:',(end - start))
# 2.使用copy_from导入 这种方式直接用了psycopg2模块中的copy_from方法,写入速度最快。
# dataframe类型转换为IO缓冲区中的str类型
# 需要先建表并指定插入列名
import psycopg2
from io import StringIO
df = pro.query('namechange',)
output = StringIO()
df.to_csv(output, sep='\t', index=False, header=False)
output1 = output.getvalue()
#conn = pgconnection()
conn = psycopg2.connect(database="stockam", user="postgres", password="ican00", host="127.0.0.1", port="5432")
cur = conn.cursor()
# columns=['label1', 'label2']
cur.copy_from(StringIO(output1),'ods.namechange',)
conn.commit()
cur.close()
conn.close()
# 3. executemany()方法批量输入数据到数据库
import psycopg2
conn = psycopg2.connect(host=***, port=***, database=***, user=***, password=***)
cur = conn.cursor()
sql = "insert into " + table_name + " values(%s, %s, %s)"
cur.executemany(sql, data)
conn.commit()
conn.close()
# 强大的copy_from(),是postgresSQ的内置函数
import psycopg2
from sqlalchemy import create_engine
import pandas as pd
import StringIO
from io import BytesIO
data1 = pd.DataFrame(data)
# dataframe类型转换为IO缓冲区中的str类型
output = BytesIO()
data1.to_csv(output, sep='\t', index=False, header=False)
output1 = output.getvalue()
conn = psycopg2.connect(host=***, user=***, password=***, database=***)
cur = conn.cursor()
cur.copy_from(StringIO.StringIO(output1), table_name1, null='',columns=columns)
conn.commit()
cur.close()
conn.close()
print('done')
这儿需要特别记录下,copy_from | [
"zzg_30th@outlook.com"
] | zzg_30th@outlook.com |
6f5a178c8d1ba0fb6bb65c7f38002457ca8ef23a | e3565e1ce607f60745f2a045aae8026661a6b99b | /resources/Onyx-1.0.511/py/onyx/util/rocutils.py | 48dd8b474c58afe853ee45475fc9479842d375ed | [
"Apache-2.0"
] | permissive | eternity668/speechAD | 4c08d953b2ed06b3357b1c39d8709dd088a2471c | f270a1be86372b7044615e4fd82032029e123bc1 | refs/heads/master | 2021-01-12T22:10:33.358500 | 2014-02-03T16:03:28 | 2014-02-03T16:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,676 | py | ###########################################################################
#
# File: rocutils.py (directory: ./py/onyx/util)
# Date: Mon 10 Mar 2008 18:34
# Author: Ken Basye
# Description: Utility code for generating ROC and DET curves
#
# This file is part of Onyx http://onyxtools.sourceforge.net
#
# Copyright 2008, 2009 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
###########################################################################
"""
Utilities for generating ROC and DET curves
"""
import StringIO
def _uniquify_preserving_first(iterable, eq_pred):
item = iterable.next()
while 1:
try:
next_item = iterable.next()
except:
yield item
break
if not eq_pred(item, next_item):
yield item
item = next_item
def _uniquify_preserving_last(iterable, eq_pred):
item = iterable.next()
while 1:
try:
next_item = iterable.next()
except:
yield item
break
if not eq_pred(item, next_item):
yield item
item = next_item
else:
item = next_item
def make_ROC_data(reference, ratios):
"""
reference is a list of 0/1 values which are the correct classifications
values is a parallel list of numeric values, with higher values intending to
map toward classifications of 1.
Returns data for a ROC curve in the form of a list of triples, where each triple
contains an interesting threshold value, the fraction of correct identifications (true positives)
as a percent, and the fraction of false positives, at that threshold. The triples are
ordered by threshold from lowest (fewest false positives) to highest (most true positives)
Note that a typical ROC curve would plot false_pos on the X axis and true_pos on the Y axis
using a linear scale.
>>> ref = [0,0,0,0,0,1,1,1,1,1]
>>> values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
>>> res = make_ROC_data(ref, values)
>>> res
[(0.0, 0.0, 9), (20.0, 80.0, 4), (80.0, 100.0, 2)]
"""
det_data = make_DET_data(reference, ratios)
roc_data = [(fp, 100-miss, t) for (fp, miss, t) in det_data]
return roc_data
def make_DET_data(reference, ratios):
"""
reference is a list of 0/1 values which are the correct
classifications values is a parallel list of numeric values, with
higher values intending to map toward classifications of 1.
Returns data for a DET curve in the form of a list of triples,
where each triple contains the fraction of false positives as a
percent, the fraction of false negatives, and the threshold value
that generated those rates. The triples are ordered by threshold
from lowest (fewest false positives) to highest (fewest misses)
Note that a typical DET curve would plot false_pos on the X axis
and false_neg on the Y axis, oftentimes with a normal deviate
scale.
>>> ref = [0,0,0,0,0,1,1,1,1,1]
>>> values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
>>> res = make_DET_data(ref, values)
>>> res
[(0.0, 100.0, 9), (20.0, 19.999999999999996, 4), (80.0, 0.0, 2)]
"""
assert( len(reference) == len(ratios) )
num_pos = reference.count(1)
num_neg = reference.count(0)
assert( num_pos + num_neg == len(reference))
full_result = []
# Find the list of interesting threshholds, which is any value in
# the list of ratios
# Seems like there should be an easier way to uniquify a list
all_threshes = set(ratios)
all_threshes = list(all_threshes)
all_threshes.sort()
def count_values_over_thresh(value, ref, ratios, t):
result = 0
for (i, r) in enumerate(ratios):
if ref[i] == value and r > t:
result += 1
return result
# Now find precision and recall at each threshold
for thresh in all_threshes:
num_neg_accepted = count_values_over_thresh(0, reference, ratios, thresh)
num_pos_accepted = count_values_over_thresh(1, reference, ratios, thresh)
full_result.append((100 * float(num_neg_accepted) / num_neg, # false positives
100 * (1 - float(num_pos_accepted) / num_pos), # misses
thresh))
def eq0(x,y): return x[0] == y[0]
def eq1(x,y): return x[1] == y[1]
iter1 = _uniquify_preserving_first(iter(full_result), eq0)
ret = list(_uniquify_preserving_last(iter1, eq1))
ret.reverse()
return ret
def write_data_as_csv(data, stream, header_type = "DET"):
""" Write either ROC or DET data as comma-separated text, suitable for import into
a spreadsheet or other tool. Writes DET header fields be default, use header_type
of "ROC" or None for ROC headers or no headers, respectively.
>>> ref = [0,0,0,0,0,1,1,1,1,1]
>>> values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
>>> res = make_DET_data(ref, values)
>>> s = StringIO.StringIO()
>>> write_data_as_csv(res, s)
>>> out = s.getvalue()
>>> print out
False Alarm Rate, Miss Rate, Threshold
0.0, 100.0, 9
20.0, 20.0, 4
80.0, 0.0, 2
<BLANKLINE>
>>> s.seek(0)
>>> res = make_ROC_data(ref, values)
>>> write_data_as_csv(res, s, header_type="ROC")
>>> out = s.getvalue()
>>> print out
False Pos Rate, True Pos Rate, Threshold
0.0, 0.0, 9
20.0, 80.0, 4
80.0, 100.0, 2
<BLANKLINE>
>>> s.close()
"""
if header_type == "DET":
stream.write("False Alarm Rate, Miss Rate, Threshold")
elif header_type == "ROC":
stream.write("False Pos Rate, True Pos Rate, Threshold")
[stream.write("\n%s, %s, %s" % triple) for triple in data]
stream.write("\n")
def _test0():
ref = [0,0,0,0,0,1,1,1,1,1]
values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
res = make_DET_data(ref, values)
s = open("foo_csv.txt", "w")
write_data_as_csv(res, s)
s.close()
if __name__ == '__main__':
from onyx import onyx_mainstartup
onyx_mainstartup()
# _test0()
| [
"nassos@n12mavra.cs.ntua.gr"
] | nassos@n12mavra.cs.ntua.gr |
9443edd0436310cbd47d8b6a75e21c106482fb9c | 1eee09ce02d848a8e2653f199b7171f74cb7d1c9 | /service/api.py | 6496e828b7afe578a9d14aa5a0c97a8185b15a63 | [
"MIT"
] | permissive | etalab/csv_detective_api | 7416cde112fcd4ed70e703231569bce5901c2161 | 7c96f497374d842226a95a26cb6627ac22cd799b | refs/heads/master | 2023-01-12T09:41:37.519013 | 2020-06-15T14:09:34 | 2020-06-15T14:09:34 | 193,232,263 | 3 | 3 | MIT | 2023-01-04T08:20:45 | 2019-06-22T12:35:26 | Jupyter Notebook | UTF-8 | Python | false | false | 7,487 | py | #!flask/bin/python
import os
import sys
from collections import defaultdict
sys.path.append("./csv_detective_ml") # horrible hack to load my features class to load my ML pipeline :/
from flask import Flask
from flask import request
from flask import jsonify
from flask_restplus import Api, Resource, fields
from flask_cors import CORS
from tempfile import NamedTemporaryFile
import logging
import json
from joblib import load
from utils.reference_matcher import link_reference_datasets
from utils.parsers import file_upload
from csv_detective_ml.analyze_csv_cli import analyze_csv
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
logger.addHandler(logging.StreamHandler())
app = Flask(__name__)
CORS(app)
api = Api(app=app,
version="0.1",
title="CSV Detective API",
description="Get info about the data contained in a DGF CSV file.")
ns_csv_detective = api.namespace('csv_detective', description='Get data from DGF CSVs')
resource_model = api.model('Analysis parameters',
{'resource_id': fields.String(required=True,
description="DGF Resource ID or CSV path",
help="Resource ID cannot be blank")
})
type_model = api.model('Type analysis parameters',
{'target_type': fields.String(required=True,
description="Target type to find among resources/datasets",
help="Resource ID cannot be blank")
})
DATASET_CSV_INFO = {}
TYPE_CSV_INFO = defaultdict(lambda: defaultdict(dict))
ML_PIPELINE = None
def load_ml_model():
global ML_PIPELINE
logger.info("Loading ML model...")
ML_PIPELINE = load('./csv_detective_ml/models/model.joblib')
return ML_PIPELINE
load_ml_model()
@ns_csv_detective.route("/dataset_id")
class CSVDetectiveDataset(Resource):
@api.expect(resource_model)
def get(self):
global DATASET_CSV_INFO
try:
resource_id = request.args.get('resource_id')
if resource_id in DATASET_CSV_INFO:
response = DATASET_CSV_INFO[resource_id]
response = reformat_response(response)
response = link_reference_datasets(response)
return jsonify(response)
else:
logger.info("Resource id not found in 'database'.")
return jsonify({"error": "ID {} not found".format(resource_id)})
except Exception as e:
return jsonify({"error": str(e)})
@ns_csv_detective.route("/resource_id")
class CSVDetectiveResource(Resource):
@api.expect(resource_model)
def get(self):
global DATASET_CSV_INFO
try:
resource_id = request.args.get('resource_id')
if resource_id in DATASET_CSV_INFO:
response = DATASET_CSV_INFO[resource_id]
response = reformat_response(response)
response = link_reference_datasets(response)
return jsonify(response)
else:
logger.info("Resource id not found in 'database'.")
return jsonify({"error": "ID {} not found".format(resource_id)})
except Exception as e:
return jsonify({"error": str(e)})
@ns_csv_detective.expect(file_upload)
def post(self):
args = file_upload.parse_args()
if "resource_csv" in args and args["resource_csv"].mimetype != "text/csv":
return jsonify({"error": "No uploaded file or the file seems to not be a CSV."})
if ML_PIPELINE is None:
analysis_type = "rule"
else:
analysis_type = "both"
uploaded_csv = args["resource_csv"]
tmp = NamedTemporaryFile(delete=False)
try:
tmp.write(uploaded_csv.read())
tmp.close()
_, response = analyze_csv(tmp.name, analysis_type=analysis_type, pipeline=ML_PIPELINE, num_rows=500)
finally:
os.remove(tmp.name)
response = reformat_response(response)
response = link_reference_datasets(response)
return jsonify(response)
@ns_csv_detective.route("/type")
class CSVDetectiveType(Resource):
@api.expect(type_model)
def get(self):
global TYPE_CSV_INFO
try:
target_type = request.args.get('target_type')
if target_type in TYPE_CSV_INFO:
response = TYPE_CSV_INFO[target_type]
return jsonify(response)
else:
logger.info("Type not found in 'database'.")
return jsonify({"error": "Type {} not found".format(target_type)})
except Exception as e:
return jsonify({"error": str(e)})
@ns_csv_detective.route("/isAlive")
class IsAlive(Resource):
def get(self):
return "True"
def reformat_response(response):
response = dict(response)
new_response = {}
if "columns_rb" in response:
reformatted_rb = {k: v[0] for k, v in response["columns_rb"].items()}
new_response["columns_rb"] = reformatted_rb
response.pop("columns_rb")
if "columns_ml" in response:
reformatted_ml = {k: v[0] for k, v in response["columns_ml"].items()}
new_response["columns_ml"] = reformatted_ml
response.pop("columns_ml")
new_response["metadata"] = dict(response)
return new_response
def load_result_dict():
global DATASET_CSV_INFO
try:
with open("./data/interim/2019-10-25-11_59_dgf_friendly.json", "r") as filo:
logger.info("Loading JSON file with csv info...")
DATASET_CSV_INFO = json.load(filo)
except Exception as e:
logger.error("Error reading JSON data file: {0}".format(str(e)))
exit(1)
return DATASET_CSV_INFO
def crate_type_index(dataset_csv_info):
"""
Invert the results dict to have a mapping of types --> dataset (and resource). Something like this:
{
type1: {
datasetID1: { resourceID1 : {csv_detective results}, {...} }
}
:return:
"""
results_keynames = ["columns_rb", "columns_ml"]
def extract_types_detected(csv_detective_results):
detected_types = set([])
for res in results_keynames:
if res not in csv_detective_results:
continue
detected_types.update([f[0] for f in csv_detective_results[res].values()])
return detected_types
for dataset_id, resources in dataset_csv_info.items():
for resource_id, csv_detective_result in resources.items():
if not any([f in csv_detective_result for f in results_keynames]):
continue
for type_detected in extract_types_detected(csv_detective_result):
TYPE_CSV_INFO[type_detected][dataset_id][resource_id] = csv_detective_result
return TYPE_CSV_INFO
if __name__ == '__main__':
# load csv_detective info json
DATASET_CSV_INFO = load_result_dict()
TYPE_CSV_INFO = crate_type_index(DATASET_CSV_INFO)
if 'ENVIRONMENT' in os.environ:
if os.environ['ENVIRONMENT'] == 'production':
app.run(port=80, host='0.0.0.0')
if os.environ['ENVIRONMENT'] == 'local':
app.run(port=5000, host='0.0.0.0')
else:
app.run(port=5000, host='0.0.0.0')
| [
"pavel.soriano@data.gouv.fr"
] | pavel.soriano@data.gouv.fr |
d1ecf38656a166455efc2f5666bd6b3ebcedb4e8 | c124e35f3d8dbbdb0423b4b7c30d84baaa04a208 | /python/decorators.py | 4d8bcc0067b39bc9310c7754fa2809d787c3332c | [] | no_license | Tettipappus/harvard-cs50 | 29914c929743c27011c5205b6cbaca997f1e6295 | c8fc42c874cc5989fd99ccf3ce59fd60d3c809b8 | refs/heads/main | 2023-06-25T08:54:55.932761 | 2021-07-19T13:58:49 | 2021-07-19T13:58:49 | 376,718,943 | 0 | 0 | null | 2021-06-14T09:20:41 | 2021-06-14T06:02:01 | HTML | UTF-8 | Python | false | false | 209 | py | def announce(f):
def wrapper():
print("About to run a funtion...")
f()
print("Done with the function.")
return wrapper
@announce
def hello():
print("Hello, world!")
hello() | [
"sig.d.skjerve@gmail.com"
] | sig.d.skjerve@gmail.com |
4438a410880898850073b4bc83f77e73ca792121 | eadd15064aa74811e7a3718b617636627ef4fd47 | /web/migrations/0020_rename_index_indexpage.py | 8efb4605a936d00a29a46a2c95ef6c4263e63c65 | [] | no_license | topsai/plasrefine_backstage | 262f7bb032daa4d018aac1519e1139cb060c3f91 | 1eb34dd0b13ebdc2a42dd6ed1aaa2d08c18ab5fb | refs/heads/master | 2023-04-12T13:24:22.710108 | 2021-05-08T14:16:41 | 2021-05-08T14:16:41 | 361,993,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | # Generated by Django 3.2 on 2021-05-02 08:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('web', '0019_auto_20210502_1558'),
]
operations = [
migrations.RenameModel(
old_name='Index',
new_name='IndexPage',
),
]
| [
"hurte@foxmail.com"
] | hurte@foxmail.com |
061a39f3108aeed656ad2dd3e0779406a17ee0f6 | 414daef846cbb984cb97c1183e7ea557ce466575 | /alpha_sigma-master/record_node_backup/new_MCTS.py | 45046ddcd18f5acf49821148d967979c791aa016 | [] | no_license | yyh1999070901/GoAI | c0e0cf0d471bb65a56f83796707f9c58219554c5 | d0551c8faa4c9a37a7a50d140a45727354a54fd2 | refs/heads/master | 2023-02-03T00:30:38.628257 | 2020-12-22T11:17:30 | 2020-12-22T11:17:30 | 323,603,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,440 | py | import numpy as np
import random
import sys
import utils
from five_stone_game import main_process as five_stone_game
import time
num2char = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 7: "h", 8: "i", 9: "j", 10: "k", 11: "l", 12: "m",
13: "n", 14: "o", 15: "p", 16: "q", 17: "r", 18: "s", 19: "t", 20: "u"}
class edge:
def __init__(self, action, parent_node, priorP, MCTS_pointer):
self.action = action
self.counter = 1.0
self.parent_node = parent_node
self.priorP = priorP
self.MCTS_ptr = MCTS_pointer
self.child_node = None # self.search_and_get_child_node()
self.action_value = 0.0
def backup(self, v):
self.action_value += v
self.counter += 1
self.parent_node.backup(-v)
def get_child(self):
if self.child_node is None:
self.counter += 1
self.child_node = self.search_and_get_child_node()
return self.child_node, True
else:
self.counter += 1
return self.child_node, False
def search_and_get_child_node(self):
new_state_name = utils.generate_new_state(self.parent_node.get_state(), self.action, self.parent_node.node_player)
search_result = self.MCTS_ptr.search_node(new_state_name)
if search_result:
return search_result
else:
new_node = node(new_state_name, self, -self.parent_node.node_player, self.MCTS_ptr)
self.MCTS_ptr.add_node(new_node)
return new_node
def UCB_value(self):
if self.action_value:
Q = self.action_value / self.counter
else:
Q = 0
return Q + utils.Cpuct * self.priorP * np.sqrt(self.parent_node.counter) / (1 + self.counter)
class node:
def __init__(self, state, parent, player, MCTS_pointer):
self.state_name = state
self.parent = parent
self.value = 0.0
self.counter = 0.0
self.child = {}
self.MCTS_pointer = MCTS_pointer
self.node_player = player
def get_state(self):
return self.state_name
def add_child(self, action, priorP):
action_name = utils.move_to_str(action)
self.child[action_name] = edge(action=action, parent_node=self, priorP=priorP, MCTS_pointer=self.MCTS_pointer)
def get_child(self, action):
return self.child[action].child_node
def eval_or_not(self):
return len(self.child)==0
def backup(self, v):
self.value += v
self.counter += 1
if self.parent:
self.parent.backup(v)
def get_distribution(self): ## used to get the step distribution of current
for key in self.child.keys():
self.MCTS_pointer.distribution_calculater.push(key, self.child[key].counter)
return self.MCTS_pointer.distribution_calculater.get()
def UCB_sim(self):
UCB_max = -sys.maxsize
UCB_max_key = None
for key in self.child.keys():
if self.child[key].UCB_value() > UCB_max:
UCB_max_key = key
UCB_max = self.child[key].UCB_value()
this_node, expand = self.child[UCB_max_key].get_child()
return this_node, expand, self.child[UCB_max_key].action
class MCTS:
def __init__(self, board_size=11, simulation_per_step=400, neural_network=None, init_state="", init_node=None):
self.board_size = board_size
self.s_per_step = simulation_per_step
self.database = {0: {"":node(init_state, init_node, 1, self)}} # here we haven't complete a whole database that can be
self.current_node = self.database[0][""] # used to search the exist node
self.NN = neural_network
self.game_process = five_stone_game(board_size=board_size)
self.simulate_game = five_stone_game(board_size=board_size)
self.distribution_calculater = utils.distribution_calculater(self.board_size)
def renew(self):
self.database = {0: {"": node("", None, 1, self)}}
self.current_node = self.database[0][""]
self.game_process.renew()
def search_node(self, node_name):
if len(node_name) in self.database.keys():
if node_name in self.database[len(node_name)].keys():
return self.database[len(node_name)][node_name]
return None
def add_node(self, node):
if len(node.state_name) in self.database.keys():
self.database[len(node.state_name)][node.state_name] = node
else:
self.database[len(node.state_name)] = {node.state_name:node}
def MCTS_step(self, action):
next_node = self.current_node.get_child(action)
return next_node
def simulation(self):
for _ in range(self.s_per_step):
expand, game_continue = False, True
this_node = self.current_node
self.simulate_game.simulate_reset(self.game_process.current_board_state(True))
state = self.simulate_game.current_board_state()
while game_continue and not expand:
if this_node.eval_or_not():
state_prob, _ = self.NN.eval(state)
valid_move = utils.valid_move(state)
for move in valid_move:
this_node.add_child(action=move, priorP=state_prob[0, move[0]*self.board_size + move[1]])
this_node, expand, action = this_node.UCB_sim()
game_continue, state = self.simulate_game.step(action)
if not game_continue:
this_node.backup(1)
elif expand:
_, state_v = self.NN.eval(state)
this_node.backup(state_v)
def game(self):
game_continue = True
game_record = []
begin_time = int(time.time())
while game_continue:
self.simulation()
action, distribution = self.current_node.get_distribution()
game_continue, state = self.game_process.step(utils.str_to_move(action))
self.current_node = self.MCTS_step(action)
game_record.append({"distribution": distribution, "action":action})
self.renew()
end_time = int(time.time())
min = int((end_time - begin_time)/60)
second = (end_time - begin_time) % 60
print("In last game, we cost {}:{}".format(min, second))
return game_record | [
"18710113218@163.com"
] | 18710113218@163.com |
bd0f712c9aea151cb5ebee92c726e449ed9bc413 | 5cefde592dcc5c40b73734d9488199f084622912 | /MNIST/attacks_base.py | 63dba65da2e82fb07601154582ce167dbff86b16 | [] | no_license | RemiBERNHARD/Membership_Inference_Attacks | d41f2e1f1966c76cc94f0f6f65f858c30ecb7f5a | a4b68a5bd1d0993ca60a2bacb2d5e445b51cdb10 | refs/heads/main | 2023-03-01T13:14:04.257446 | 2021-02-12T13:00:39 | 2021-02-12T13:00:39 | 338,311,636 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,851 | py | import numpy as np
from keras.utils import np_utils
from keras.models import Model
from keras.layers import Dense, Input
from keras.optimizers import Adam
#########################
def attack_label(model, X_vec, y_vec):
y_pred = np.argmax(model.predict(X_vec), axis=1)
memb_pred = np.equal(y_pred, y_vec)
return(memb_pred)
#########################
def c_ent(vec):
vec = np.clip(vec, 1e-12, 1. - 1e-12)
ce = -np.sum(vec*np.log(vec+1e-9))
return(ce)
def attack_ce(model, X_vec, threshold):
conf_pred = model.predict(X_vec)
ce_pred = np.apply_along_axis(c_ent, 1, conf_pred)
memb_pred = np.less_equal(ce_pred, threshold)
return(memb_pred)
#########################
def c_ent_loss(vec_pred, vec_true):
vec_pred = np.clip(vec_pred, 1e-12, 1. - 1e-12)
ce = -np.sum(vec_true*np.log(vec_pred+1e-9))
return(ce)
def attack_lossce(model, X_vec, y_vec, threshold):
Y_vec = np_utils.to_categorical(y_vec,10)
conf_pred = model.predict(X_vec)
lossce_pred = np.zeros(conf_pred.shape[0])
for i in range(0, conf_pred.shape[0]):
lossce_pred[i] = c_ent_loss(conf_pred[i], Y_vec[i])
memb_pred = np.less_equal(lossce_pred, threshold)
return(memb_pred)
#########################
def attack_conf(model, X_vec, y_vec, threshold):
Y_vec = np_utils.to_categorical(y_vec,10)
conf_pred = model.predict(X_vec)
conf_label_pred = conf_pred[np.where(Y_vec==1)]
memb_pred = np.greater_equal(conf_label_pred, threshold)
return(memb_pred)
#########################
def mentr(conf_vec, Y_vec):
conf_y = conf_vec[np.where(Y_vec==1)][0]
conf_noty = conf_vec[np.where(Y_vec!=1)]
mentr_val = -(1 - conf_y)*np.log(conf_y) - c_ent(conf_noty)
return(mentr_val)
def attack_mentr(model, X_vec, y_vec, threshold):
Y_vec = np_utils.to_categorical(y_vec,10)
conf_pred = model.predict(X_vec)
mentr_pred = np.array([mentr(conf_pred[i], Y_vec[i]) for i in range(0, conf_pred.shape[0])])
memb_pred = np.less_equal(mentr_pred, threshold)
return(memb_pred)
#########################
def attack_shokri(model, X_vec, memb_true):
conf_pred = model.predict(X_vec)
Y_member = np_utils.to_categorical(memb_true[memb_true==1],2)
Y_nonmember = np_utils.to_categorical(memb_true[memb_true==0],2)
X_data = conf_pred
Y_data = np.concatenate((Y_member, Y_nonmember))
input_shape = (10,)
inputs = Input(shape=input_shape)
l = Dense(64, activation="tanh")(inputs)
outputs = Dense(2, activation="softmax")(l)
model_f = Model(inputs, outputs)
model_f.compile(loss='categorical_crossentropy', optimizer=Adam(0.001), metrics=['accuracy'])
model_f.fit(X_data, Y_data, epochs=50, batch_size=10, verbose=0)
memb_pred = np.argmax(model_f.predict(X_data), axis=1)
return(memb_pred, model_f)
| [
"noreply@github.com"
] | RemiBERNHARD.noreply@github.com |
75747144a3735564364a1962ef1c25bd322f6760 | ed2234df3d7e1eecd1b198df66f4fd7263145d18 | /MergeSort.py | 5e0ed44e686d16540544f188564254a64609b9b5 | [] | no_license | Manohar-Gunturu/AlgoPython | bb887f1508c24798c5ae4b1d55fc75d41d674254 | 5800336886b1cde4deb577f4dc671ce993a0e7f4 | refs/heads/master | 2021-07-06T10:31:44.984945 | 2020-07-18T15:45:03 | 2020-07-18T15:45:27 | 132,955,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | arr = [0, 3, 1, 5, 2, 4, 6, 1, 3, 0]
# arr = [5, 2, 4, 6, 1, 3]
def merge(array, begin, mid, end):
# size of first array [begin, mid]
n1 = mid - begin + 1 # +1 as index starts at 0
# size of first array [mid+1, end]
n2 = end - mid
# temp array to hold left and right
leftarr = [0] * n1
rightarr = [0] * n2
for i2 in range(0, n1):
leftarr[i2] = array[i2 + begin]
for i1 in range(0, n2):
rightarr[i1] = array[i1 + mid + 1]
# leftarr[n1] = -1
# rightarr[n2] = -1
i = 0
j = 0
k = begin
while i < n1 and j < n2:
if leftarr[i] <= rightarr[j]:
array[k] = leftarr[i]
i = i + 1
else:
array[k] = rightarr[j]
j = j + 1
k = k + 1
while i < n1:
array[k] = leftarr[i]
i = i + 1
k = k + 1
while j < n2:
array[k] = rightarr[j]
j = j + 1;
k = k + 1;
def mergesort(array, l, r):
if l < r:
mid = (l + r) >> 1
mergesort(array, l, mid)
mergesort(array, mid + 1, r)
merge(array, l, mid, r)
mergesort(arr, 0, len(arr) - 1)
for i in range(0, len(arr)):
print(arr[i])
| [
"gunturumanohar2@gmail.com"
] | gunturumanohar2@gmail.com |
e3ea3a268a625b1c2669d93b5c29caa0a984cda4 | f63e53c9d7a23c03ecbf488e9ea156c5c98f3f33 | /bookstore_oop.py | a6eeea598ec9a34570aa17f2e95f7d001407c3bb | [] | no_license | matramir/bookstore | 957841664becc9bf00113e51eaed97373d8c8920 | c0a19c767a784d9c1c2fafb6267dbdfaf3ff9b03 | refs/heads/master | 2020-03-27T16:19:10.401750 | 2018-08-30T17:44:12 | 2018-08-30T17:44:12 | 146,773,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,386 | py | from tkinter import *
from backend_oop import Database
"""
Program that stores Title, Author, Year, ISBN
User can View, Search, Add, Update, Delete, close
"""
database = Database("books.db")
def get_selected_row(event):
try:
global selected_tuple
index=list1.curselection()[0]
selected_tuple = list1.get(index)
e1.delete(0,END)
e1.insert(END,selected_tuple[1])
e2.delete(0,END)
e2.insert(END,selected_tuple[2])
e3.delete(0,END)
e3.insert(END,selected_tuple[3])
e4.delete(0,END)
e4.insert(END,selected_tuple[4])
except:
pass
def view_all():
for row in database.view():
list1.delete(0,END)
list1.insert(END, row)
def search_entry():
try:
list1.delete(0, END)
for row in database.search(title_text.get(), author_text.get(), year_text.get(), isbn_text.get()):
list1.insert(END, row)
except:
list1.delete(0, END)
list.insert(END, ("Search Error",))
def add_entry():
try:
database.insert(title_text.get(), author_text.get(), year_text.get(), isbn_text.get())
list1.delete(0, END)
list.insert(END,(title_text.get(), author_text.get(), year_text.get(), isbn_text.get()))
except:
list1.delete(0, END)
list.insert(END, "Add Error")
def update_entry():
try:
database.update(selected_tuple[0],title_text.get(), author_text.get(), year_text.get(), isbn_text.get())
except:
list1.delete(0, END)
list.insert(END, "Update Error")
def delete_data():
try:
database.delete(selected_tuple[0])
except:
list1.delete(0, END)
list.insert(END, "Delete Error")
#def close_app():
window = Tk()
window.title("Bookstore")
#labels
lb1 = Label(window,text="Title")
lb2 = Label(window,text="Year")
lb3 = Label(window,text="Author")
lb4 = Label(window,text="ISBN")
#entry values
title_text = StringVar()
year_text = StringVar()
author_text = StringVar()
isbn_text = StringVar()
#entries
e1 = Entry(window, textvariable=title_text)
e2 = Entry(window, textvariable=year_text)
e3 = Entry(window, textvariable=author_text)
e4 = Entry(window, textvariable=isbn_text)
#buttons
b1 = Button(window, width=12, text="View All", command=view_all)
b2 = Button(window, width=12, text="Search Entry", command=search_entry)
b3 = Button(window, width=12, text="Add Entry", command=add_entry)
b4 = Button(window, width=12, text="Update Selected", command=update_entry)
b5 = Button(window, width=12, text="Delete Selected", command=delete_data)
b6 = Button(window, width=12, text="Close",command=window.destroy)
#fist entry column
lb1.grid(row=0, column=0)
e1.grid(row=0, column=1)
lb2.grid(row=1, column=0)
e2.grid(row=1, column=1)
#second entry column
lb3.grid(row=0, column=2)
e3.grid(row=0, column=3)
lb4.grid(row=1, column=2)
e4.grid(row=1, column=3)
#button column
b1.grid(row=2, column=3)
b2.grid(row=3, column=3)
b3.grid(row=4, column=3)
b4.grid(row=5, column=3)
b5.grid(row=6, column=3)
b6.grid(row=7, column=3)
#listbox
list1=Listbox(window, height=6,width=35)
list1.grid(row=2,column=0,rowspan=6,columnspan=2)
#scrollbar
sb1=Scrollbar(window)
sb1.grid(row=2,column=2,rowspan=6)
list1.configure(yscrollcommand=sb1.set)
sb1.configure(command=list1.yview)
list1.bind('<<ListboxSelect>>',get_selected_row)
window.mainloop()
| [
"matias.ramirez.parisi@gmail.com"
] | matias.ramirez.parisi@gmail.com |
241ac7b70c2142fba7ff196677ed61d5e0910d2f | 587290dbd33c5fb60a154eacd2155e681a3f9ecf | /js/gatsby/index.cgi | db257841537410c023fb93f34bb0b3e0a10dcd00 | [] | no_license | jaredly/prog | b6408db52c16e9d3c322933f0624c23663d33ce0 | e8fe82ccd1abe42371adbb3f317576facac546ca | refs/heads/master | 2021-01-20T09:12:48.931999 | 2013-08-29T04:24:36 | 2013-08-29T04:24:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,523 | cgi | #!/usr/bin/python
print 'Content-type:text/html\n'
import cgi,cgitb,os,sys,re
cgitb.enable()
def load_chapter(chap):
return open('chapter%s.txt'%chap).read()
def load_chapters():
d=[]
for i in range(1,10):
d.append([i,load_chapter(i)])
return d
def print_entry(at,chap,pageat,item):
if at==1:nd="<sup>st</sup>"
elif at==2:nd="<sup>nd</sup>"
elif at==3:nd="<sup>rd</sup>"
else:nd="<sup>th</sup>"
return "<br><br><br><b>%s%s</b> paragraph in chapter <b>%s</b> (around page %s)<br><br>\n"%(at, nd, chap, pageat)+item
form = cgi.FieldStorage()
print """
<html><head><title>Great Gatsby Search</title></head><body>
<style>
span {
font-weight: bold;
font-size: 1.1em;
color: black;
background-color: #ccc;
}
h2 {
text-align:center;
}
div.searchform {
background-color:#BBFFAA;
border:2px solid green;
padding:15px;
position:absolute;
right:0px;
top:0px;
}
form {
margin: 0px;
}
</style>
<h1>Search the Great Gatsby</h1>
<div class="searchform">
<form method="GET">
Search For: <input name="s" value="%s"> <input type="checkbox" name="whole" value="1"> Whole word
<input type="submit" value="Search">
</form>
</div>
<br>"""%(form.has_key("s") and form["s"].value or "")
pages = [1, 23, 39, 61, 81, 97, 113, 147, 163, 180 ] ## None ## [3, 16, 26, 39, 52, 62, 93, 103]
retr = ""
num = 0
if form.has_key('s'):
term = form['s'].value.strip()
iterm=term
if form.has_key('whole'):
term='(?<=\W)'+term+'(?=\W)'
for chapter,text in load_chapters():
for i,body in enumerate(text.split('\n')):
all = re.search(term,body,re.I|re.S)
if pages:
pchap = pages[chapter-1]
#print text.find(body),len(text)
pat = int(round(float(pages[chapter]-pchap)* (text.find(body)/float(len(text)))+pchap))
else:
pat = ""
rgx = re.compile(term,re.I)
bdy = rgx.sub(lambda x:'<span>'+x.group()+'</span>', body)+'<br><br>'
## bdy = re.sub(term, lambda x:'<span>'+x.group()+'</span>', body)+'<br><br>'
if all:
## print (text.find(body)/float(len(text))),float(pages[chapter]-pchap)
## print float(pages[chapter]-pchap)*(text.find(body)/float(len(text)))+pchap
retr += print_entry(i,chapter,pat,bdy)
num+=1
print "<h3>Found %d results for %s</h3>"%(num,iterm)
print retr | [
"jared@jaredforsyth.com"
] | jared@jaredforsyth.com |
21bc2d0fbd981fbefdd919c846357da41182c5ac | e48eac671ea8335f696ec5fd0511b12800b0fca0 | /accounts/models.py | 6625a130b79945d6d4613bcc9a0b047c32339541 | [] | no_license | linusidom/django-basic-reservation-system | 8fba10708cebd42d5ad308c0ef838a0fe1ac8778 | 5dd5db4832fe83e862424af18aae3aad6cf3f1ed | refs/heads/master | 2020-03-26T08:37:15.052362 | 2018-08-14T11:34:19 | 2018-08-14T11:34:19 | 144,711,607 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | from django.db import models
# Create your models here.
from django.contrib.auth.models import AbstractUser
from django.shortcuts import reverse
class Profile(AbstractUser):
ideal_weight = models.IntegerField(default=185)
def __str__(self):
return self.email
def get_absolute_url(self):
return reverse('accounts:profile_detail', kwargs={'pk':pk})
| [
"linusidom@gmail.com"
] | linusidom@gmail.com |
9e2fd5f4161aa84c62285366896ec09d1372f331 | 522e5c63bd53dccf824c17e882f564e8423331ea | /gbce_trading_unittests.py | 497ea315ef6066e57c1da334e7e5a3c796870a50 | [] | no_license | pankaj-purohit/Assignment | df20a65bb2460cbb28e19b6156003a2611a36f82 | 9ce95b020673ef621e3696b16997c7ff2f989daa | refs/heads/master | 2020-05-31T16:23:37.125624 | 2019-06-05T11:17:14 | 2019-06-05T11:17:14 | 190,380,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,925 | py | import datetime
import unittest
import gbce_trading_config
from gbce_trading import GBCETrading, Stock, Trade
class GBCEUnitTests(unittest.TestCase):
def test_getDividendYield(self):
stockComm = {'POP': {'Type': 'Common', 'Last Dividend': 8, 'Fixed Dividend': None, 'Par Value': 100, 'Trades': []}}
testObj = GBCETrading(stockComm)
self.assertEqual(testObj.getDividendYield(symbol='POP', price=160.0), 0.05)
self.assertEqual(testObj.getDividendYield(symbol='POP', price=0), None)
self.assertRaises(ValueError, testObj.getDividendYield, symbol='POP', price=-10.7)
self.assertRaises(ValueError, testObj.getDividendYield, symbol = 'ABC', price=24.7)
stockPref = {'JOE': {'Type': 'Preferred', 'Last Dividend': 18, 'Fixed Dividend': 4, 'Par Value': 200, 'Trades': []}}
testObj2 = GBCETrading(stockPref)
self.assertAlmostEqual(testObj2.getDividendYield(symbol='JOE', price=254.5), 3.143, 2)
def test_getPERatio(self):
stockComm = {'ALE': {'Type': 'Common', 'Last Dividend': 23, 'Fixed Dividend': None, 'Par Value': 60, 'Trades': []}}
testObj = GBCETrading(stockComm)
self.assertAlmostEqual(testObj.getPERatio(symbol='ALE', price=88.0), 336.695, 2)
self.assertEqual(testObj.getPERatio(symbol='ALE', price=0), None)
self.assertRaises(ValueError, testObj.getPERatio, symbol='POP', price=-75.7)
self.assertRaises(ValueError, testObj.getPERatio, symbol='XYZ', price=190.2)
stockPref = {'GIN': {'Type': 'Preferred', 'Last Dividend': 24, 'Fixed Dividend': 14, 'Par Value': 250, 'Trades': []}}
testObj2 = GBCETrading(stockPref)
self.assertEqual(testObj2.getPERatio(symbol='GIN', price=350), 35)
def test_getVolumeWeightedStockPrice(self):
trade1 = Trade(symbol='TEA', quantity=100, tradeType=gbce_trading_config.TradeType.BUY, price=120.5)
trade2 = Trade(symbol='TEA', quantity=50, tradeType=gbce_trading_config.TradeType.SELL, price=125.4)
trade3 = Trade(symbol='TEA', quantity=200, tradeType=gbce_trading_config.TradeType.BUY, price=110.5, timestamp=datetime.datetime.now() - datetime.timedelta(minutes=7))
stockGin = Stock(symbol='GIN')
stockGin.addStockDetails(type=gbce_trading_config.StockType.COMMON, lastDividend=20, parValue=200)
stockTea = Stock(symbol='TEA')
stockTea.addStockDetails(type=gbce_trading_config.StockType.COMMON, lastDividend=0, parValue=100)
stockTea.recordTrade(trade1)
stockTea.recordTrade(trade2)
stockTea.recordTrade(trade3)
testObj = GBCETrading()
self.assertAlmostEqual(testObj.getVolumeWeightedStockPrice(symbol='TEA'), 122.133, 2)
self.assertRaises(ValueError, testObj.getVolumeWeightedStockPrice, symbol='PQR')
self.assertEqual(testObj.getVolumeWeightedStockPrice(symbol='GIN'), None)
def test_getGBCEAllShareIndex(self):
trade1 = Trade(symbol='TEA', quantity=100, tradeType=gbce_trading_config.TradeType.BUY, price=120.5)
trade2 = Trade(symbol='TEA', quantity=50, tradeType=gbce_trading_config.TradeType.SELL, price=125.4)
stockGin = Stock(symbol='GIN')
stockGin.addStockDetails(type=gbce_trading_config.StockType.COMMON, lastDividend=20, parValue=200)
stockTea = Stock(symbol='TEA')
stockTea.addStockDetails(type=gbce_trading_config.StockType.COMMON, lastDividend=0, parValue=100)
stockTea.recordTrade(trade1)
stockTea.recordTrade(trade2)
testObj = GBCETrading()
self.assertEqual(testObj.getGBCEAllShareIndex(), None)
trade3 = Trade(symbol='GIN', quantity=150, tradeType=gbce_trading_config.TradeType.BUY, price=225.4)
stockGin.recordTrade(trade3)
self.assertAlmostEqual(testObj.getGBCEAllShareIndex(), 165.918, 2)
if __name__ == '__main__':
unittest.main() | [
"noreply@github.com"
] | pankaj-purohit.noreply@github.com |
c67e75c1252e6c67b7433b5b12e21fc96a3a4abf | 59a4a1409fabdd29c6db85775a549c00b5a7b3b7 | /小型FTP/服务端/server.py | 9d417fab71d1189dbf72fcb676df0e9a7689d76d | [] | no_license | laijin/mini_FTP | 5be28ebe457dac57eef05d48c88e81c01fd0d1da | 01c5777135a019efacddce0566dca1df757b9568 | refs/heads/master | 2020-04-01T17:08:10.404816 | 2018-10-17T07:37:31 | 2018-10-17T07:37:31 | 153,414,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | import socket
import os
import subprocess
import struct
import json
share_dir=r'D:\python3.7\chengxu\luffy\ch-5\小型FTP\客户端\share'
def get(cmds,conn):
filename = cmds[1]
# 以读的方式打开文件,读取文件内容,发送给客户端
header_dic = {
'filename': filename,
'md5': 'graga',
'file_size': os.path.getsize(r'%s/%s' % (share_dir, filename))
}
header_json = json.dumps(header_dic)
header_bytes = header_json.encode('utf-8')
header_len = struct.pack('i', len(header_bytes))
# 2、把报头发送给客户端
conn.send(header_len)
conn.send(header_bytes)
# 3、发送数据
f = open('%s/%s' % (share_dir, filename), 'rb')
for line in f:
conn.send(line)
def server():
phone=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
phone.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
phone.bind(('127.0.0.1',8081))
phone.listen(5)
while True:
conn,client_addr=phone.accept()
while True:
try:
data=conn.recv(1024)
if not data:break
print('客户端的数据为',data)
# 1、conn.send(data.upper())
#解析命令,提取相应的命令参数
cmds=data.decode('utf-8').split()
if cmds[0] == 'get':
get(cmds,conn)
except ConnectionResetError:
break
conn.close()
phone.close()
if __name__ == '__main__':
server()
| [
"noreply@github.com"
] | laijin.noreply@github.com |
f8fdac6b1a2846a9f74f7db1f038fed9022ab0a4 | 5dd03f9bd8886f02315c254eb2569e4b6d368849 | /3rdparty/python/GitPython-0.3.1-py2.6.egg/git/__init__.py | 500d053f7729d7172f300870e30b00ae7a523f09 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | adamsxu/commons | 9e1bff8be131f5b802d3aadc9916d5f3a760166c | 9fd5a4ab142295692994b012a2a2ef3935d35c0b | refs/heads/master | 2021-01-17T23:13:51.478337 | 2012-03-11T17:30:24 | 2012-03-11T17:30:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | # __init__.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import os
import sys
import inspect
__version__ = '0.3.1'
#{ Initialization
def _init_externals():
"""Initialize external projects by putting them into the path"""
sys.path.append(os.path.join(os.path.dirname(__file__), 'ext', 'gitdb'))
try:
import gitdb
except ImportError:
raise ImportError("'gitdb' could not be found in your PYTHONPATH")
#END verify import
#} END initialization
#################
_init_externals()
#################
#{ Imports
from git.config import GitConfigParser
from git.objects import *
from git.refs import *
from git.diff import *
from git.exc import *
from git.db import *
from git.cmd import Git
from git.repo import Repo
from git.remote import *
from git.index import *
from git.util import (
LockFile,
BlockingLockFile,
Stats,
Actor
)
#} END imports
__all__ = [ name for name, obj in locals().items()
if not (name.startswith('_') or inspect.ismodule(obj)) ]
| [
"jsirois@twitter.com"
] | jsirois@twitter.com |
d69078e714be2c5e36cd35abb4d251b6fb236980 | 16582b82a7cb119f218b6d4db025c338263852fc | /download.py | 6246ed7d49a2d7362dc394241198fcef86f9194f | [] | no_license | a1004123217/music | 849921ffbeea264e258ea53d9b4c773b21a68a9d | 9ef35653c0872c70463c9802ea6fe318f726cfcf | refs/heads/master | 2022-12-12T11:50:49.233881 | 2020-09-14T10:42:29 | 2020-09-14T10:42:29 | 292,523,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | from urllib.request import urlretrieve
import os
# 解决
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
'''
通过txt网址文件,现在图片到本地
'''
def download():
categories = ['music']
for category in categories:
# 新建存储ladder文件夹存储图片
os.makedirs('/Users/wzy/PycharmProjects/pythonProject/music/data/%s' % category, exist_ok=True)
# 读取txt文件
with open('/Users/wzy/PycharmProjects/pythonProject/music/beatanddownbeat/uploadfile.txt', 'r') as file:
urls = file.readlines()
print(urls)
#计算链接地址条数
n_urls = len(urls)
# 遍历链接地址下载图片
for i, url in enumerate(urls):
#print(url.strip())
#print(url.strip().split('/')[-1])
tmp = url.strip().split('/')[-1]
tmp2 = tmp.strip().split('_')
str1 = ''
for i in range(len(tmp2)):
if i == 2:
str1 = tmp2[i]+tmp2[3][:]
#print(t)
print("str1",str1)
#print("tmp2",tmp[2])
# print(tmp[2])
print(str1)
try:
# 请求下载图片,并截取最后链接第一最后一节字段命名图片
urlretrieve(url.strip(), '/Users/wzy/PycharmProjects/pythonProject/music/data/json/%s' % str1)
print('%s %i/%i' % (category, i, n_urls))
except:
print('%s %i/%i' % (category, i, n_urls), 'no image')
if __name__ == '__main__':
download() | [
"wzy@wangzhenyudeMacBook-Pro.local"
] | wzy@wangzhenyudeMacBook-Pro.local |
e81f2da8791b183158fe5fb31295415705244e0d | 15fbec197e9e8508bce3189b0d6d6f0b2fc317ee | /core/migrations/0001_initial.py | 74785cf38e45b6aa551bf1a24a0c5aecd7f27a42 | [] | no_license | douglasbastos/minhatabela | d698071b23ecd2c880af790bd1d85be3a04c7a46 | df8499e46b00d45b9cf7ffaf236b1635fa6e2b85 | refs/heads/master | 2021-01-09T20:29:02.844595 | 2016-07-04T04:30:45 | 2016-07-04T04:30:45 | 62,531,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,487 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-04 04:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Competition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name=b'Nome')),
('logo', models.ImageField(upload_to=b'', verbose_name=b'Logo')),
('creator_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Competi\xe7\xe3o',
'verbose_name_plural': 'Competi\xe7\xf5es',
},
),
migrations.CreateModel(
name='CompetitionHasTeam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Competition')),
],
options={
'verbose_name': 'Time/competi\xe7\xe3o',
'verbose_name_plural': 'Times/competi\xe7\xf5es',
},
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result_home', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name=b'Resultado Mandante')),
('result_visitor', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name=b'Resultado Visitante')),
('local', models.CharField(blank=True, max_length=255, null=True, verbose_name=b'Local')),
('date', models.DateTimeField(blank=True, null=True, verbose_name=b'Data e Hora')),
],
options={
'verbose_name': 'Jogo',
'verbose_name_plural': 'Jogos',
},
),
migrations.CreateModel(
name='Rounds',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.PositiveSmallIntegerField()),
('competition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Competition')),
],
options={
'verbose_name': 'Rodada',
'verbose_name_plural': 'Rodadas',
},
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name=b'Nome')),
('flag', models.ImageField(upload_to=b'', verbose_name=b'Escudo')),
],
options={
'verbose_name': 'Time',
'verbose_name_plural': 'Times',
},
),
migrations.AddField(
model_name='game',
name='rounds',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Rounds'),
),
migrations.AddField(
model_name='game',
name='team_home',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Mandante', to='core.Team'),
),
migrations.AddField(
model_name='game',
name='team_visitor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Visitante', to='core.Team'),
),
migrations.AddField(
model_name='competitionhasteam',
name='team',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Team'),
),
migrations.AddField(
model_name='competition',
name='teams',
field=models.ManyToManyField(through='core.CompetitionHasTeam', to='core.Team'),
),
]
| [
"douglashsb@gmail.com"
] | douglashsb@gmail.com |
ea69fdc5fcb293b108694f4a74dd68b98284def9 | c744b461256a699d2f039edee0d09344523e86ae | /src/bloch_plot_state.py | a772c653ab022806f6e410d7ee7455d904acab47 | [] | no_license | rajeshn76/Blochsphere_Demo | 672e348991d8918d858464ac2ee7c2827e38b837 | 46371dca1b390a639da25d60bce2fce4174c8874 | refs/heads/master | 2020-04-01T15:54:21.442509 | 2018-10-26T14:59:45 | 2018-10-26T14:59:45 | 153,356,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute, Aer
from qiskit.tools.visualization import plot_state, plot_histogram
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.s(q).inverse()
qc.h(q)
qc.rz(np.pi/4, q)
qc.rx(-np.pi/4, q)
qc.barrier()
backend = Aer.get_backend('statevector_simulator')
job = execute(qc, backend, shots=1024)
result = job.result()
data = np.round(result.get_data(qc)['statevector'], 5)
print(data)
plot_state(data, 'city')
plot_state(data, 'bloch')
plot_state(data, 'qsphere')
plot_state(data, 'paulivec') | [
"rajeshn76@gmail.com"
] | rajeshn76@gmail.com |
2cdcc3f42550aff81547194ab8337407a6b72240 | 0d6c0df740fad947cd489bcd81c067c2a50e6508 | /projects/hog/tests/05.py | 29eebe503fa8fa044c6c7cefc608ea2cdc7710da | [
"MIT"
] | permissive | louieyan/CS61A | 39f317db01c25219ef813dd8b0af788441ba700f | efa7e7d1becbea776ccfa545197a50c20b85d4fb | refs/heads/master | 2022-03-08T03:41:57.052682 | 2022-02-28T02:03:30 | 2022-02-28T02:03:30 | 148,614,155 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,687 | py | test = {
'name': 'Question 5',
'points': 2,
'suites': [
{
'cases': [
{
'answer': 'While score0 and score1 are both less than goal',
'choices': [
'While score0 and score1 are both less than goal',
'While at least one of score0 or score1 is less than goal',
'While score0 is less than goal',
'While score1 is less than goal'
],
'hidden': False,
'locked': False,
'question': r"""
The variables score0 and score1 are the scores for Player 0
and Player 1, respectively. Under what conditions should the
game continue?
"""
},
{
'answer': 'A function that returns the number of dice a player will roll',
'choices': [
'The number of dice a player will roll',
'A function that returns the number of dice a player will roll',
"A player's desired turn outcome"
],
'hidden': False,
'locked': False,
'question': 'What is a strategy in the context of this game?'
},
{
'answer': 'strategy1(score1, score0)',
'choices': [
'strategy1(score1, score0)',
'strategy1(score0, score1)',
'strategy1(score1)',
'strategy1(score0)'
],
'hidden': False,
'locked': False,
'question': r"""
If strategy1 is Player 1's strategy function, score0 is
Player 0's current score, and score1 is Player 1's current
score, then which of the following demonstrates correct
usage of strategy1?
"""
}
],
'scored': False,
'type': 'concept'
},
{
'cases': [
{
'code': r"""
>>> #
>>> # Play function stops at goal
>>> s0, s1 = hog.play(always(5), always(3), score0=91, score1=10, dice=always_three)
>>> s0
106
>>> s1
10
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> #
>>> # Goal score is not hardwired
>>> s0, s1 = hog.play(always(5), always(5), goal=10, dice=always_three)
>>> s0
15
>>> s1
0
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import hog
>>> always_three = hog.make_test_dice(3)
>>> always = hog.always_roll
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> #
>>> # Use strategies
>>> # We recommend working this out turn-by-turn on a piece of paper.
>>> strat0 = lambda score, opponent: opponent % 10
>>> strat1 = lambda score, opponent: score // 10
>>> s0, s1 = hog.play(strat0, strat1, score0=41, score1=80, dice=always_three)
>>> s0
51
>>> s1
104
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import hog
>>> always_three = hog.make_test_dice(3)
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> #
>>> # Goal edge case
>>> s0, s1 = hog.play(always(4), always(3), score0=88, score1=20, dice=always_three)
>>> s0
20
>>> s1
100
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> #
>>> # Player 1 win
>>> s0, s1 = hog.play(always(4), always(4), score0=87, score1=88, dice=always_three)
>>> s0
99
>>> s1
100
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Check strategies are actually used correctly
>>> strat0 = lambda score, opponent: opponent % 10
>>> strat1 = lambda score, opponent: opponent // 10
>>> s0, s1 = hog.play(strat0, strat1, score0=40, score1=92, dice=always_three)
>>> s0
101
>>> s1
73
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> #
>>> # Swine swap applies during Player 1 turn
>>> s0, s1 = hog.play(always(4), always(4), score0=42, score1=96, dice=always_three)
>>> s0
108
>>> s1
54
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> #
>>> # Free bacon refers to correct opponent score
>>> s0, s1 = hog.play(always(0), always(0), score0=11, score1=99, dice=always_three)
>>> s0
13
>>> s1
103
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> #
>>> # Handle multiple turns with many swaps
>>> s0, s1 = hog.play(always(0), always(2), goal=15, dice=always_three)
>>> s0
16
>>> s1
2
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import hog
>>> always_three = hog.make_test_dice(3)
>>> always = hog.always_roll
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> tests.play_utils.check_play_function(hog)
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> # Fuzz Testing
>>> # Plays a lot of random games, and calculates a secret value.
>>> # Failing this test means something is wrong, but you should
>>> # look at other tests to see where the problem might be.
>>> # Hint: make sure you're only calling take_turn once per turn!
>>> #
>>> import hog, importlib
>>> importlib.reload(hog)
>>> import tests.play_utils
""",
'teardown': r"""
""",
'type': 'doctest'
}
]
}
| [
"noreply@github.com"
] | louieyan.noreply@github.com |
433233f2557ef53958b800aa05a1f64706518639 | 7170b86508060488b07b07738ee6742ac9a3bc3c | /zeusci/zeus/tests/test_api_builds.py | 6b2e3ea7484adedd9b2c399a3b2cfaf30bcd9cf7 | [] | no_license | lukaszb/zeusci | 3bb98ca8c35633613c40dc8ea1af010487b8e155 | 8a0d2e99ef4f2d12c9d90a9d3aad82cd4ade7d8c | refs/heads/master | 2020-05-18T16:36:19.349260 | 2013-10-20T08:29:29 | 2013-10-20T08:29:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,784 | py | from .test_api_base import BaseApiTestCase
from django.core.urlresolvers import reverse
from django.core.cache import cache
from zeus.models import Build
from zeus.models import Buildset
from zeus.models import Command
from zeus.models import Output
from zeus.models import Project
from zeus.models import Status
import datetime
class TestBuildApi(BaseApiTestCase):
maxDiff = None
def setUp(self):
zeus = Project.objects.create(
name='zeus',
url='https://github.com/lukaszb/zeus',
repo_url='git://github.com/lukaszb/zeus.git',
)
self.buildset = Buildset.objects.create(
project=zeus,
number=1,
)
self.build1 = Build.objects.create(
buildset=self.buildset,
number=1,
)
self.build1_cmd1 = Command.objects.create(
number=1,
build=self.build1,
title='Step 1 -- Configuration',
cmd=['./configure'],
)
output = Output.objects.create(output='Configured')
self.build1_cmd1.command_output = output
delta = datetime.timedelta(seconds=2)
self.build1_cmd1.started_at = self.build1_cmd1.created_at + delta
self.build1_cmd1.finished_at = self.build1_cmd1.started_at + delta
self.build1_cmd1.returncode = 0
self.build1_cmd1.status = Status.PASSED
self.build1_cmd1.save()
self.build1_cmd2 = Command.objects.create(
number=2,
build=self.build1,
title='Step 2 -- Build',
cmd=['make', 'all'],
)
output = Output.objects.create(output='Build in progress ...')
self.build1_cmd2.command_output = output
self.build1_cmd2.started_at = self.build1_cmd2.created_at
self.build1_cmd2.status = Status.RUNNING
self.build1_cmd2.save()
dt = datetime.datetime(2013, 7, 2, 22, 8)
self.build2 = Build(
buildset=self.buildset,
number=2,
created_at=dt,
finished_at=(dt + datetime.timedelta(seconds=3)),
)
self.build2.save()
cache.clear()
def test_build_detail(self):
url_params = {'name': 'zeus', 'buildset_no': 1, 'build_no': 1}
url = reverse('zeus_api_build_detail', kwargs=url_params)
response = self.client.get(url)
expected = {
'uri': self.make_api_build_detail_url('zeus', 1, 1),
'url': self.build1.get_absolute_url(),
'number': 1,
'created_at': self.build1.created_at,
'finished_at': self.build1.finished_at,
'status': 'running',
'commands': [
{
'number': 1,
'title': 'Step 1 -- Configuration',
'cmd': './configure',
'output': 'Configured',
'started_at': self.build1_cmd1.started_at,
'finished_at': self.build1_cmd1.finished_at,
'status': 'passed',
'returncode': 0,
},
{
'number': 2,
'title': 'Step 2 -- Build',
'cmd': 'make all',
'output': 'Build in progress ...',
'started_at': self.build1_cmd2.started_at,
'finished_at': None,
'status': 'running',
'returncode': None,
},
],
}
self.assertDictEqual(response.data, expected)
url_params = {'name': 'zeus', 'buildset_no': 1, 'build_no': 2}
url = reverse('zeus_api_build_detail', kwargs=url_params)
response = self.client.get(url)
expected = {
'uri': self.make_api_build_detail_url('zeus', 1, 2),
'url': self.build2.get_absolute_url(),
'number': 2,
'created_at': self.build2.created_at,
'finished_at': self.build2.finished_at,
'status': 'pending',
'commands': [],
}
self.assertDictEqual(response.data, expected)
def test_build_restart_fails_if_build_is_still_running(self):
url_params = {'name': 'zeus', 'buildset_no': 1, 'build_no': 1}
url = reverse('zeus_api_build_detail', kwargs=url_params)
response = self.client.put(url)
self.assertEqual(response.status_code, 409)
def test_build_restart(self):
url_params = {'name': 'zeus', 'buildset_no': 1, 'build_no': 1}
url = reverse('zeus_api_build_detail', kwargs=url_params)
self.build1.commands.update(status=Status.FAILED)
response = self.client.put(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['status'], Status.PENDING)
self.assertIsNone(response.data['finished_at'])
self.assertEqual(self.build1.commands.count(), 0)
def test_build_status_is_changing_correctly(self):
self.assertEqual(self.build1.status, Status.RUNNING)
self.build1_cmd1.status = Status.FAILED
self.build1_cmd1.save()
self.assertEqual(self.build1.status, Status.FAILED)
self.build1.commands.update(status=Status.PASSED)
self.assertEqual(self.build1.status, Status.PASSED)
self.build1.commands.all().delete()
self.assertEqual(self.build1.status, Status.PENDING)
# Create a command and make sure status us RUNNING once again
Command.objects.create(
number=1,
build=self.build1,
title='Step 1 -- Configuration',
cmd=['./configure'],
status=Status.RUNNING,
)
self.assertEqual(self.build1.status, Status.RUNNING)
| [
"lukaszbalcerzak@gmail.com"
] | lukaszbalcerzak@gmail.com |
39ecdda194fb35100d42e0cdad25360099fde4b9 | 665089f28faa90d31a3ba4d62246c8323e4de9f1 | /communityid/algo.py | 366748f6056a748a8ccb618adde26f5c7098b04f | [
"BSD-2-Clause"
] | permissive | corelight/pycommunityid | 64b7385609bdd5b329d95b99a1389ddb5257033a | 64a6fa18ed2dc9ea1f6fed04e670660a9854a632 | refs/heads/master | 2022-04-29T07:09:53.287643 | 2022-03-05T00:45:39 | 2022-03-05T00:45:53 | 196,440,073 | 22 | 5 | BSD-3-Clause | 2022-03-05T00:43:57 | 2019-07-11T17:40:14 | Python | UTF-8 | Python | false | false | 17,482 | py | """
This module implements Community ID network flow hashing.
"""
import abc
import base64
import collections
import hashlib
import logging
import socket
import string
import struct
from communityid import error
from communityid import compat
from communityid import icmp
from communityid import icmp6
from . import LOG
# Proper enums here would be nice, but this aims to support Python
# 2.7+ and while there are ways to get "proper" enums pre-3.0, it just
# seems overkill. --cpk
PROTO_ICMP = 1
PROTO_TCP = 6
PROTO_UDP = 17
PROTO_ICMP6 = 58
PROTO_SCTP = 132
# The set of protocols we explicitly support as port-enabled:
# Community ID computations on those protocols should be based on a
# five-tuple.
PORT_PROTOS = set([PROTO_ICMP, PROTO_TCP, PROTO_UDP, PROTO_ICMP6, PROTO_SCTP])
class FlowTuple:
"""
Tuples of network flow endpoints, used as input for the Community
ID computation. These tuple objects are flexible regarding the
input data types -- for the addresses you can use NBO byte-strings
or ASCII, for example. They usually are 5-tuples of address & port
pairs, plus IP protocol number, but port-less tuples are supported
for less common IP payloads.
"""
Data = collections.namedtuple(
'Data', ['proto', 'saddr', 'daddr', 'sport', 'dport'])
def __init__(self, proto, saddr, daddr, sport=None, dport=None,
is_one_way=False):
"""Tuple initializer.
The proto argument is a non-negative integer and represents an
IP protocol number, e.g. 6 for TCP. You can use the PROTO_*
constants if convenient, and communityid.get_proto() to help
convert to integer.
The saddr and daddr arguments are source & destination IP
addresses, either IPv4 or IPv6. Multiple data types are
supported, including bytes (as str in older Pythons, or the
explicit bytes type), IPv4Address, IPv6Address, and string
representations.
The sport and dport arguments are numeric port numbers, either
provided as ints or in packed 16-bit network byte order, of
type "bytes". When the protocol number is one of PORT_PROTOS
(TCP, UDP, etc), they are required. For other IP protocols
they are optional.
The optional Boolean is_one_way argument indicates whether the
tuple captures a bidirectional flow (the default) or
not. Setting this to true means that the computation will
consider the tuple directional and not try to pair up with
flipped-endpoint tuples. Normally you don't need to pass this.
This can raise FlowTupleErrors when the input is inconsistent.
"""
self.proto = proto
self.saddr, self.daddr = saddr, daddr
self.sport, self.dport = sport, dport
if proto is None or type(proto) != int:
raise error.FlowTupleError('Need numeric protocol number')
if saddr is None or daddr is None:
raise error.FlowTupleError('Need source and destination address')
if not self.is_ipaddr(saddr):
raise error.FlowTupleError('Unsupported format for source IP address "%s"' % saddr)
if not self.is_ipaddr(daddr):
raise error.FlowTupleError('Unsupported format for destination IP address "%s"' % daddr)
if ((sport is None and dport is not None) or
(dport is None and sport is not None)):
raise error.FlowTupleError('Need either both or no port numbers')
if sport is not None and not self.is_port(sport):
raise error.FlowTupleError('Source port "%s" invalid' % sport)
if dport is not None and not self.is_port(dport):
raise error.FlowTupleError('Destination port "%s" invalid' % dport)
if proto in PORT_PROTOS and sport is None:
raise error.FlowTupleError('Need port numbers for port-enabled protocol %s' % proto)
# Our ICMP handling directly mirrors that of Zeek, since it
# tries hardest to map ICMP into traditional 5-tuples. For
# this, it evaluates the message type & code to identify
# whether the notion of two-way communication applies. If not,
# tuple-flipping isn't an option either. The following flag
# stores this result, assuming by default we're bidirectional.
self.is_one_way = is_one_way
# The rest of the constructor requires ports.
if sport is None or dport is None:
return
# If we're explicitly told this is a one-way flow-tuple, we
# don't need to consider directionality further. And, testing
# directionality only makes sense when the ports are integers,
# not lower-level NBO representations. Throughout we need to
# keep track of the types of the ports, since the ICMP logic
# works only with regular ints.
if self.is_one_way is False:
if self.proto == PROTO_ICMP:
sport, dport, self.is_one_way = icmp.get_port_equivalents(
self._port_to_int(sport), self._port_to_int(dport))
self.sport = self._port_to_same(sport, self.sport)
self.dport = self._port_to_same(dport, self.dport)
elif self.proto == PROTO_ICMP6:
sport, dport, self.is_one_way = icmp6.get_port_equivalents(
self._port_to_int(sport), self._port_to_int(dport))
self.sport = self._port_to_same(sport, self.sport)
self.dport = self._port_to_same(dport, self.dport)
def __repr__(self):
data = self.get_data()
ordered = 'ordered' if self.is_ordered() else 'flipped'
if data.sport is None or data.dport is None:
return '%s -> %s, proto %s, %s' % (
data.saddr, data.daddr, data.proto, ordered)
return '%s %s -> %s %s, proto %s, %s' % (
data.saddr, data.sport, data.daddr, data.dport, data.proto, ordered)
def get_data(self):
"""
Returns a FlowTuple.Data namedtuple with this flow tuple's
data. The protocol is an integer number (e.g. 6 for TCP),
saddr and daddr are ASCII-rendered/unpacked, and the ports
are integers or None, if absent.
"""
# Absent good types, make it best-effort to get these
# renderable. If all characters are printable, we assume this
# in not NBO.
saddr, daddr, sport, dport = self.saddr, self.daddr, self.sport, self.dport
if compat.have_real_bytes_type() and isinstance(saddr, bytes):
saddr = self._addr_to_ascii(saddr)
elif compat.is_ipaddress_type(saddr):
saddr = saddr.exploded
elif not all(c in string.printable for c in saddr):
saddr = self._addr_to_ascii(saddr)
if compat.have_real_bytes_type() and isinstance(daddr, bytes):
daddr = self._addr_to_ascii(daddr)
elif compat.is_ipaddress_type(daddr):
daddr = daddr.exploded
elif not all(c in string.printable for c in daddr):
daddr = self._addr_to_ascii(daddr)
if sport is not None and not isinstance(sport, int):
sport = struct.unpack('!H', sport)[0]
if dport is not None and not isinstance(dport, int):
dport = struct.unpack('!H', dport)[0]
return FlowTuple.Data(self.proto, saddr, daddr, sport, dport)
def is_ordered(self):
"""
Predicate, returns True when this flow tuple is ordered.
A flow tuple is ordered when any of the following are true:
- It's marked as a one-way flow.
- Its source IP address is smaller than its dest IP address, both in
network byte order (NBO).
- The IP addresses are equal and the source port is smaller than the
dest port, in NBO.
"""
nbo = self.in_nbo()
return (nbo.is_one_way or nbo.saddr < nbo.daddr or
(nbo.saddr == nbo.daddr and
nbo.sport is not None and nbo.dport is not None and
nbo.sport < nbo.dport))
def has_ports(self):
"""
Predicate, returns True when this tuple features port numbers.
"""
return self.sport is not None and self.dport is not None
def in_order(self):
"""
Returns a copy of this tuple that is ordered canonically. Ie, regardless
of the src/dest IP addresses and ports, the returned tuple will be be
the same: the source side will contain the smaller endpoint (see
FlowTuple.is_ordered() for details).
"""
if self.is_ordered():
return FlowTuple(self.proto, self.saddr, self.daddr,
self.sport, self.dport, self.is_one_way)
return FlowTuple(self.proto, self.daddr, self.saddr,
self.dport, self.sport, self.is_one_way)
def in_nbo(self):
"""
Returns a copy of this tuple where the addresses and port are
rendered into NBO byte strings.
"""
saddr = self._addr_to_nbo(self.saddr)
daddr = self._addr_to_nbo(self.daddr)
if isinstance(self.sport, int):
sport = struct.pack('!H', self.sport)
else:
sport = self.sport
if isinstance(self.dport, int):
dport = struct.pack('!H', self.dport)
else:
dport = self.dport
return FlowTuple(self.proto, saddr, daddr, sport, dport, self.is_one_way)
@staticmethod
def is_ipaddr(val):
return (FlowTuple.addr_is_text(val) or
FlowTuple.addr_is_packed(val) or
FlowTuple.addr_is_ipaddress_type(val))
@staticmethod
def addr_is_text(addr):
for family in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(family, addr)
return True
except (socket.error, TypeError):
pass
return False
@staticmethod
def addr_is_packed(addr):
for family in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_ntop(family, addr)
return True
except (socket.error, ValueError, TypeError):
pass
return False
@staticmethod
def addr_is_ipaddress_type(addr):
return compat.is_ipaddress_type(addr)
@staticmethod
def is_port(val):
if isinstance(val, bytes):
try:
port = struct.unpack('!H', val)[0]
return 0 <= port <= 65535
except (struct.error, IndexError, TypeError):
pass
if isinstance(val, int):
return 0 <= val <= 65535
return False
@staticmethod
def _port_to_int(port):
"""Convert a port number to regular integer."""
if isinstance(port, int):
return port
# Assume it's two bytes in NBO:
return struct.unpack('!H', port)[0]
@staticmethod
def _port_to_nbo(port):
"""Convert a port number to 2-byte NBO."""
if isinstance(port, int):
return struct.pack('!H', port)
# Assume it's two bytes in NBO
return port
@staticmethod
def _port_to_same(port, sample):
"""Convert a port number to the same type as that of another instance."""
if isinstance(sample, int):
return FlowTuple._port_to_int(port)
return FlowTuple._port_to_nbo(port)
@staticmethod
def _addr_to_ascii(addr):
if compat.is_ipaddress_type(addr):
return addr.exploded
for family in (socket.AF_INET, socket.AF_INET6):
try:
return socket.inet_ntop(family, addr)
except (socket.error, ValueError, TypeError):
pass
return addr
@staticmethod
def _addr_to_nbo(addr):
if compat.is_ipaddress_type(addr):
return addr.packed
for family in (socket.AF_INET, socket.AF_INET6):
try:
return socket.inet_pton(family, addr)
except (socket.error, TypeError):
pass
return addr
# Convenience wrappers for making protocol-specific tuple instances.
@classmethod
def make_tcp(cls, saddr, daddr, sport, dport):
return cls(PROTO_TCP, saddr, daddr, int(sport), int(dport))
@classmethod
def make_udp(cls, saddr, daddr, sport, dport):
return cls(PROTO_UDP, saddr, daddr, int(sport), int(dport))
@classmethod
def make_sctp(cls, saddr, daddr, sport, dport):
return cls(PROTO_SCTP, saddr, daddr, int(sport), int(dport))
@classmethod
def make_icmp(cls, saddr, daddr, mtype, mcode):
return cls(PROTO_ICMP, saddr, daddr, int(mtype), int(mcode))
@classmethod
def make_icmp6(cls, saddr, daddr, mtype, mcode):
return cls(PROTO_ICMP6, saddr, daddr, int(mtype), int(mcode))
@classmethod
def make_ip(cls, saddr, daddr, proto):
return cls(proto, saddr, daddr)
class CommunityIDBase:
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_error(self):
"""
Error handler. After something fails during the ID computation,
this method should return an explanation why.
"""
return None
@abc.abstractmethod
def calc(self, tpl):
"""
Entrypoint to the ID computation, given a FlowTuple instance.
Returns a string containing the Community ID value, or None on
error.
"""
return None
@abc.abstractmethod
def hash(self, tpl):
"""
The tuple-hashing part of the computation. Returns hashlib
algorithm instance ready for digesting, or None on error.
"""
return None
@abc.abstractmethod
def render(self, hashstate):
"""
The rendering part of the computation. Receives a hashlib
algorithm instance and returns a string containing the
community ID value according to this instance's configuration,
or None on error.
"""
return None
class CommunityID(CommunityIDBase):
"""
An algorithm object that computes Community IDs on FlowTuple instances.
"""
def __init__(self, seed=0, use_base64=True):
self._version = 1
self._seed = seed
self._use_base64 = use_base64
self._err = None
def __repr__(self):
return 'CommunityID(v=%s,seed=%s,base64=%s)' \
% (self._version, self._seed, self._use_base64)
def get_error(self):
"""
Returns an error string when problems came up during the
computation. This is only valid directly after calc() returned
None, i.e., something went wrong during the calculation.
"""
return self._err
def calc(self, tpl):
"""
The biggie: given a FlowTuple instance, returns a string
containing the Community ID. In case of problems, returns
None. In that case consider get_error() to learn more about
what happened.
"""
LOG.info('CommunityID for %s:' % tpl)
tpl = tpl.in_nbo().in_order()
return self.render(self.hash(tpl))
def hash(self, tpl):
hashstate = hashlib.sha1()
def hash_update(data, context):
# Handy for troubleshooting: show the hashed byte sequence
# when verbosity level is at least INFO (-vv):
if LOG.isEnabledFor(logging.INFO):
# On Python 2.7 the data comes as a string, later it's bytes.
# The difference matters for rendering the byte hex values.
if isinstance(data, str):
hexbytes = ':'.join('%02x' % ord(b) for b in data)
else:
hexbytes = ':'.join('%02x' % b for b in data)
LOG.info('| %-7s %s' % (context, hexbytes))
hashstate.update(data)
return len(data)
try:
dlen = hash_update(struct.pack('!H', self._seed), 'seed') # 2-byte seed
dlen += hash_update(tpl.saddr, 'ipaddr') # 4 bytes (v4 addr) or 16 bytes (v6 addr)
dlen += hash_update(tpl.daddr, 'ipaddr') # 4 bytes (v4 addr) or 16 bytes (v6 addr)
dlen += hash_update(struct.pack('B', tpl.proto), 'proto') # 1 byte for transport proto
dlen += hash_update(struct.pack('B', 0), 'padding') # 1 byte padding
if tpl.has_ports():
dlen += hash_update(tpl.sport, 'port') # 2 bytes
dlen += hash_update(tpl.dport, 'port') # 2 bytes
except struct.error as err:
self._err = 'Could not pack flow tuple: %s' % err
LOG.warning(self._err)
return None
# The data structure we hash should always align on 32-bit
# boundaries.
if dlen % 4 != 0:
self._err = 'Unexpected hash input length: %s' % dlen
LOG.warning(self._err)
return None
return hashstate
def render(self, hashstate):
if hashstate is None:
return None
# Unless the user disabled the feature, base64-encode the
# (binary) hash digest. Otherwise, print the ASCII digest.
if self._use_base64:
return str(self._version) + ':' + base64.b64encode(hashstate.digest()).decode('ascii')
return str(self._version) + ':' + hashstate.hexdigest()
| [
"christian@corelight.com"
] | christian@corelight.com |
996aac45cc3fff5b7b5a9eb0567f864fdb8f7981 | 8c4af05e0257661195c95b0b9e0873eeb6391dab | /packages/python-packages/apiview-gpt/src/_models.py | 85d435a1aa1dd9ca713a5cccc403df55f16ebc0d | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | Azure/azure-sdk-tools | 6d171054800807fcbe7b8b878c5903a202d31faa | 2dce521dedc3f5169007d4c481ae8ec077be4450 | refs/heads/main | 2023-09-01T00:00:32.662190 | 2023-08-31T22:21:44 | 2023-08-31T22:21:44 | 170,592,186 | 113 | 174 | MIT | 2023-09-14T21:53:41 | 2019-02-13T22:55:46 | C# | UTF-8 | Python | false | false | 744 | py | from pydantic import BaseModel, Field
from typing import List, Optional
class Violation(BaseModel):
rule_ids: List[str] = Field(description="unique rule ID or IDs that were violated.")
line_no: Optional[int] = Field(description="the line number of the violation.")
bad_code: str = Field(description="the original code that was bad, cited verbatim.")
suggestion: str = Field(description="the suggested fix for the bad code.")
comment: str = Field(description="a comment about the violation.")
class GuidelinesResult(BaseModel):
status: str = Field(description="Succeeded if the request has no violations. Error if there are violations.")
violations: List[Violation] = Field(description="list of violations if any")
| [
"noreply@github.com"
] | Azure.noreply@github.com |
ad74fb1924b3b8c48c2d09b0bbc03cd105920500 | af97a81a69d100e684b8e9f02bc1b117069b9895 | /Testcases_Copilot/CWE-730/codeql_2.py | 1c59968d10ace977410229a13e67c6c9188734a4 | [] | no_license | s2e-lab/SecurityEval | 8de3e2f78fffa7058d79cce84fbcd8ff29c8468d | f637f67b8e8d52d641ab15903e46a41ead7e30a5 | refs/heads/main | 2023-06-17T22:55:08.047769 | 2023-06-16T16:54:54 | 2023-06-16T16:54:54 | 509,664,226 | 28 | 7 | null | 2023-05-27T04:55:05 | 2022-07-02T05:10:17 | Python | UTF-8 | Python | false | false | 294 | py | from flask import request
import re
@app.route("/direct")
def direct():
'''
get the regular expression pattern from the get request, comipile it and then use it for search
'''
pattern = request.args.get('pattern')
regex = re.compile(pattern)
return regex.search(text)
| [
"lsiddiqsunny@gmail.com"
] | lsiddiqsunny@gmail.com |
05e6de66f7d42a5e5cc0ec5d44e586790873c9b5 | 46cbc45de7cd3ad8c565603918dcec8f52eeab82 | /11.12 Selenium单元测试.py | fdbf1a76c6ecbf251ecacdab7aa61d0b2dc057ea | [] | no_license | Patrickctyyx/WebScrapers | 8bb946a2b60e21426e1b3048bb4752d71c79b401 | 02ebd9bef8242223b1e73ade0a4290235bebf668 | refs/heads/master | 2020-05-29T08:52:18.017387 | 2016-11-16T14:05:52 | 2016-11-16T14:05:52 | 69,369,962 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | from selenium import webdriver
# 如果通过了就没提示
# 没通过就报错
driver = webdriver.PhantomJS(executable_path='/home/patrick/phantomjs-2.1.1-linux-x86_64/bin/phantomjs')
driver.get('http://en.wikipedia.org/wiki/Kevin_Durant')
# 这不就是系统自带的断言么...
assert 'Kevin Durant' in driver.title
driver.close()
| [
"873948000@qq.com"
] | 873948000@qq.com |
6278c357cec08deee02c0d6bb03990e43931a5d9 | 1f5ff597e967a63de3e005b876b644e25cd08dfc | /stage3.py | 873501d6cef32b2731fb6e2a05ffb818309dec3e | [] | no_license | codefloww/BBA | 713f7e1a96c8f291d917d5a44f8ae7f5e2d5e09e | 2b0c4c1a408dd41960f736a2f20336db71995b82 | refs/heads/main | 2023-08-24T23:08:16.814240 | 2021-10-31T13:35:45 | 2021-10-31T13:35:45 | 422,621,197 | 0 | 0 | null | 2021-10-31T13:35:46 | 2021-10-29T15:14:34 | Python | UTF-8 | Python | false | false | 1,753 | py | import main
import os
import pygame
import physics
pygame.mixer.init()
def stage():
stage3 = main.Window(1920, 1080, "Enter.mp3", "background3.jpg")
stage3.play_audio("start")
running = True
objects = []
moveable = [] # Each moveable object, basically mobs and player
player = main.Wolf(100, 100, 100, 10, "Wolf")
moveable.append(player)
objects.append(player)
soil1 = main.Road(100, 500, "ground", 900, 50)
soil2 = main.Road(300, 400, "ground", 100, 100)
objects.append(soil1)
objects.append(soil2)
objects_unmoveable = objects.copy()
objects_unmoveable.remove(player)
clock = pygame.time.Clock()
while running:
clock.tick(120)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_s:
stage3.play_audio("stop")
if event.key == pygame.K_p:
stage3.play_audio("play")
for entity in moveable:
stage3.update_screen(objects)
# physics.gravity(entity, objects)
stand = 0
wall_collision = physics.stutter(entity, objects)
while stand == 0 and entity.alive:
stand = physics.gravity(entity, objects, stage3)
entity.direction = None
stage3.update_screen(objects)
keys_pressed = pygame.key.get_pressed()
player.movement_handle(keys_pressed, objects, stand, stage3, wall_collision)
stage3.update_screen(objects)
if not player.alive:
running = False
return player.alive
if __name__ == "__main__":
stage()
| [
"kryvpaul@gmail.com"
] | kryvpaul@gmail.com |
df79b390cc42bfa1f4ab91ea70a0d961c8fa1131 | c4f0e34324d5a44895212c9c24c6ed57e62cf9f9 | /MinorProject/settings.py | 5d5b9498599232e03e6175b4075b4e1f0e074a09 | [] | no_license | Shinde-Hmn/MINDSHUFFLER | 7fce564d567200dea01319cebce15f3f9d405bbc | 0bf22df015d9e62e9a5622cea40f6915edf1b500 | refs/heads/master | 2022-04-26T20:10:31.581050 | 2020-04-30T16:57:09 | 2020-04-30T16:57:09 | 260,267,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,900 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@w!k6l-+(6s^j8-gxsqyo(5^s5cns=(_ubf#xht=q31ts^f_(#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost','127.0.0.1' ]
# Application definition
INSTALLED_APPS = [
'rest_framework',
'blog.apps.BlogConfig',
'users.apps.UsersConfig',
#'django_leaderboard.apps.DjangoLeaderboardConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# allauth
'allauth',
'allauth.account',
'allauth.socialaccount',
#providers
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MinorProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MinorProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'blog-home'
LOGIN_URL = 'login'
SITE_ID =2
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.environ.get('EMAIL_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASS')
| [
"himanshushinde1997812@gmail.com"
] | himanshushinde1997812@gmail.com |
aeff8452dda485b474d6f78dec9db2f3f258e6ff | 80d1f1f50b9f7f6be17302df7695154f54aa7a76 | /pdp8/core.py | 3691cb7deeb3ab2b53a7ab5d3ba6877bf6d1f838 | [
"MIT"
] | permissive | romilly/pdp8 | af7f7df0136b31df03a4f53b709869a59f25ca2c | 68e1025c5e4c6b6fa92a62cc81117d841c214137 | refs/heads/master | 2020-03-13T15:24:12.955602 | 2019-06-19T05:58:50 | 2019-06-19T05:58:50 | 131,176,107 | 4 | 1 | null | 2018-05-16T07:13:14 | 2018-04-26T15:31:01 | HTML | UTF-8 | Python | false | false | 7,983 | py | from io import StringIO
from pdp8.tracing import NullTracer
def octal(string):
return int(string, 8)
OPR_GROUP1 = octal('0400')
OPR_GROUP2 = octal('0001')
CLA1 = octal('0200')
CLL = octal('0100')
CMA = octal('0040')
CML = octal('0020')
RAR = octal('0010')
RAL = octal('0004')
RTR = octal('0012')
RTL = octal('0006')
IAC = octal('0001')
HALT = octal('0002')
BIT8 = octal('0010')
Z_BIT = octal('0200')
I_BIT = octal('0400')
class PDP8:
# TODO simplify these, use constants rather than calculating?
W_BITS = 12 # number of bits in a word
W_MASK = 2 ** W_BITS - 1 # word mask
OP_BITS = 3 # 3 bits in the opcode
V_BITS = 7 # 7 bits for the value part of an instruction
OP_MASK = (2 ** OP_BITS - 1) << W_BITS - OP_BITS
V_MASK = 2 ** V_BITS - 1 # mask for instruction data
MAX = 2 ** (V_BITS - 1)
def __init__(self):
self.memory = 2 ** self.W_BITS * [0]
self.pc = 0
self.accumulator = 0
self.link = 0
self.running = False
self.debugging = False
self.stepping = False
self.ia = None
self.instruction = None
self.tape = StringIO('')
self.READER1 = 0o03
self.PUNCH1 = 0o04
self.punchflag = 0
self.output = ''
self.tracer = None
self.ops = [self.andi,
self.tad,
self.isz,
self.dca,
self.jms,
self.jmp,
self.iot,
self.opr]
def __getitem__(self, address):
return self.memory[address] & self.W_MASK # only 12 bits retrieved
def is_group1(self):
return 0 == self.i_mask(OPR_GROUP1)
def i_mask(self, mask):
return self.instruction & mask
def is_iac(self):
return 0 != self.i_mask(IAC)
def is_group2(self):
return (not self.is_group1()) and 0 == self.i_mask(OPR_GROUP2)
# Group 2
def is_halt(self):
return self.i_mask(HALT)
def __setitem__(self, address, contents):
self.memory[address] = contents & self.W_MASK # only 12 bits stored
if self.debugging:
self.tracer.setting(address, contents)
def run(self, debugging=False, start=None, tape='', stepping=None, tracer=None):
self.running = True
if tracer is not None:
self.tracer = tracer
else:
if self.tracer is None:
self.tracer = NullTracer()
if start:
self.pc = start
# TODO: smarter tape creation to cope with text and binary tapes.
self.tape = StringIO(tape)
if stepping is not None:
self.stepping = stepping
self.debugging = debugging
while self.running:
self.execute()
if self.stepping:
self.running = False
def execute(self):
old_pc = self.pc # for debugging
self.instruction = self[self.pc]
self.ia = self.instruction_address()
op = self.opcode()
self.pc += 1
self.ops[op]()
if self.debugging:
self.tracer.instruction(old_pc, self.instruction, self.accumulator, self.link, self.pc)
def opcode(self):
bits = self.i_mask(self.OP_MASK)
code = bits >> self.W_BITS - self.OP_BITS
return code
def andi(self):
self.accumulator &= self[self.ia]
def tad(self):
self.add_12_bits(self[self.ia])
def add_12_bits(self, increment):
self.accumulator += increment
total = self.accumulator
self.accumulator &= octal('7777')
if self.accumulator == total:
self.link = 0
else:
self.link = 1
def isz(self):
contents = self[self.ia]
contents += 1
self[self.ia] = contents # forces 12-bit value
if self[self.ia] == 0:
self.pc += 1 # skip
def dca(self):
self[self.ia] = self.accumulator
self.accumulator = 0
def jmp(self):
self.pc = self.ia
def jms(self):
self[self.ia] = self.pc
self.pc = self.ia + 1
def iot(self):
device = (self.instruction & 0o0770) >> 3
io_op = self.instruction & 0o0007
if device == self.READER1:
self.reader(io_op)
elif device == self.PUNCH1:
self.punch(io_op)
else:
raise ValueError('uknown device')
def opr(self):
if self.is_group1():
self.group1()
return
if self.is_group2():
self.group2()
return
raise ValueError('Unknown opcode in instruction 0o%o at %d(%o)' % (self.instruction, self.pc-1, self.pc-1) )
def instruction_address(self):
o = self.i_mask(self.V_MASK)
if not self.i_mask(Z_BIT):
o += self.pc & 0o7600
if self.i_mask(I_BIT):
o = self[o]
return o
def cla(self):
self.accumulator = 0
def cll(self):
self.link = 0
def cma(self):
self.accumulator ^= 0o7777
def cml(self):
self.link = 1-self.link
def rr(self):
self.rar(0 < self.i_mask(2))
def rar(self, flag):
count = 2 if flag else 1
for i in range(count):
new_link = self.accumulator & 0o0001
self.accumulator = self.accumulator >> 1
if self.link:
self.accumulator |= 0o4000
self.link = new_link
def rl(self):
self.ral(self.i_mask(2))
def ral(self, flag):
count = 2 if flag else 1
for i in range(count):
new_link = 1 if self.accumulator & 0o4000 else 0
self.accumulator = 0o7777 & self.accumulator << 1
if self.link:
self.accumulator |= 0o0001
self.link = new_link
def iac(self):
self.add_12_bits(1)
def halt(self):
if self.debugging:
print('Halted')
self.tracer.halt(self.pc)
self.running = False
def group1(self):
for (mask, ins) in zip([ CLA1, CLL, CMA, CML, IAC, RAR, RAL],
[self.cla, self.cll, self.cma, self.cml, self.iac,self.rr, self.rl]):
if self.i_mask(mask):
ins()
def is_or_group(self):
return not self.i_mask(BIT8)
def is_and_group(self):
return self.i_mask(BIT8)
def group2(self):
if self.is_or_group() and (self.sma() or self.sza() or self.snl()):
self.pc += 1
if self.is_and_group() and self.spa() and self.sna() and self.szl():
self.pc += 1
if self.is_cla2():
self.cla()
if self.is_halt():
self.halt()
def sma(self):
return self.accumulator_is_negative() and (self.i_mask(octal('0100')))
def accumulator_is_negative(self):
return self.accumulator & octal('4000')
def sza(self):
return self.accumulator == 0 and (self.i_mask(octal('0040')))
def snl(self):
return self.link == 1 and (self.i_mask(octal('0020')))
def spa(self):
return self.accumulator_is_positive() or not (self.i_mask(octal('0100')))
def accumulator_is_positive(self):
return not self.accumulator_is_negative()
def sna(self):
return self.accumulator != 0 or not (self.i_mask(octal('0040')))
def szl(self):
return self.link == 0 or not (self.i_mask(octal('0020')))
def reader(self, io_op):
pass
def punch(self, io_op):
if (io_op & 1) and self.punchflag:
self.pc += 1
if io_op & 2:
self.punchflag = 0
if io_op & 4:
if self.accumulator != 0:
self.output += str(chr(self.accumulator))
self.punchflag = 1
def is_cla2(self):
return self.instruction & octal('0200')
| [
"romilly.cocking@gmail.com"
] | romilly.cocking@gmail.com |
7d471f712e0a1b1cc63bcd73298eac954a746430 | 2a3bd5db7f3db99b6b098ba569655a591b673aac | /10_Sony_Select_Service/Documents/02_Design/05_Module_Specification_Design/V5.6/05_BackupRestore/03_BackupRestoreAppListManager/source/conf.py | 8fbb8ae9311a6706e70f7a0820eeb30175fbcbdd | [] | no_license | imace/service-dev | 3c6ce201b51bbc481484ee83aa338ee2af31e67f | a2ee6b8bf844cde34b7f4b5915e2465d84e6258f | refs/heads/master | 2021-01-17T06:01:05.093541 | 2014-12-08T06:37:22 | 2014-12-08T06:37:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,323 | py | # -*- coding: utf-8 -*-
#
# Pro_Name documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 18 22:33:41 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PRD_DOC'
copyright = u'2012, Service_Dev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1.0'
# The full version, including alpha/beta/rc tags.
release = '1.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'zh_CN'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "Service_Dev"
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'basic'
html_style = 'adctheme.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
#"stickysidebar":"true",
#"sidebarbgcolor":"#eceae7",
#"sidebartextcolor":"#000000",
#"relbartextcolor":"black",
#"textcolor":"black",
#"headtextcolor":"black",
#"footertextcolor":"black",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = u'SonySelect Service'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "Service Design"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = "./images/select_icon.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "./images/select_icon.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = u'Service_Dev'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
'preamble': '''
\usepackage{xeCJK}
\usepackage{indentfirst}
\setlength{\parindent}{2em}
\setCJKmainfont[BoldFont=SimHei, ItalicFont=KaiTi_GB2312]{SimSun}
\setCJKmonofont[Scale=0.9]{Droid Sans Mono}
\setCJKfamilyfont{song}[BoldFont=SimSun]{SimSun}
\setCJKfamilyfont{sf}[BoldFont=SimSun]{SimSun}
''',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Pro_Name.tex', u'Pro\\_Name Documentation',
u'ServiceDev', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pro_name', u'Pro_Name Documentation',
[u'Robert Guo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Pro_Name', u'Pro_Name Documentation',
u'ServiceDev', 'Pro_Name', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| [
"devin.wei@sonymobile.com"
] | devin.wei@sonymobile.com |
def4e0c9060cbb6946a984f723129a8064a91715 | 16679038c7a0b75097ffdd2d5b6be28ae8dae68f | /test/utilities/test_catch_exceptions.py | 74b112680efb4b09be050e92f0ab2d6cc4bcdc2b | [
"MIT"
] | permissive | elifesciences/profiles | d98e5c2391630f9877e0585e07143d7904f1e777 | 9cd2e523f9dfa864891511e6525381f191951b24 | refs/heads/develop | 2023-08-31T03:09:08.723797 | 2023-08-25T06:54:55 | 2023-08-25T06:54:55 | 94,993,646 | 2 | 0 | MIT | 2023-06-21T01:15:37 | 2017-06-21T10:43:52 | Python | UTF-8 | Python | false | false | 959 | py | import logging
from logging import Handler, Logger, Manager
from logging.handlers import BufferingHandler
from pytest import fixture
from profiles.utilities import catch_exceptions
@fixture
def logger(handler: Handler) -> Logger:
logger = Logger('logger', logging.DEBUG)
logger.addHandler(handler)
logger.manager = Manager('root')
return logger
@fixture
def handler() -> Handler:
return BufferingHandler(100)
def test_it_catches_and_logs_exceptions(logger: Logger, handler: BufferingHandler):
@catch_exceptions(logger)
def my_function():
raise Exception('My exception')
result = my_function()
assert result is None
assert len(handler.buffer) == 1
def test_it_does_nothing_when_no_exception(logger: Logger, handler: BufferingHandler):
@catch_exceptions(logger)
def my_function():
return True
result = my_function()
assert result is True
assert len(handler.buffer) == 0
| [
"noreply@github.com"
] | elifesciences.noreply@github.com |
e47bd5ee0609619cf412769fd1666431740a27f0 | 4929b707e07efedcff122241f33ff05e25004d10 | /orders/migrations/0007_auto_20160411_1257.py | 2f2327ec6d0a1e617b5cd122315c69e90c712d4f | [
"MIT"
] | permissive | loafbaker/django_ecommerce2 | be06f00ae637dc9228ba3b5830f8ecef043922d0 | 7f1e95cf4082960861c3090ed47480eefb9593b9 | refs/heads/master | 2021-01-21T04:44:46.496191 | 2016-07-29T05:40:53 | 2016-07-29T05:41:44 | 53,693,350 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-11 12:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_usercheckout_braintree_id'),
]
operations = [
migrations.AddField(
model_name='order',
name='transaction_id',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(choices=[('created', 'Created'), ('paid', 'Paid'), ('shipped', 'Shipped'), ('refunded', 'Refunded'), ('completed', 'Completed')], default='created', max_length=120),
),
]
| [
"loafbaker@hotmail.com"
] | loafbaker@hotmail.com |
f9881fe5f317247e0325b463ffcc17864d748ccf | 882e97d3392f11692d8cae815439cd2609c3b96b | /StacksAndQueues/Stack.py | 9bf9a1a0179c83472be858b75092b24b8ce7ba3c | [] | no_license | springrolldippedinsoysauce/pythonshit | af0a48ee99838df35da4cdb9d1990ae41ccb07c4 | 340f9d9aab0f75914d3306694f73b821808702e2 | refs/heads/master | 2020-07-18T17:28:42.122560 | 2019-09-04T09:42:24 | 2019-09-04T09:42:24 | 206,284,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | from StacksAndQueues import Exceptions as EXP
class Stack:
def __init__(self, capacity):
self._capacity = capacity
self._stack = []
self._count = 0
def get_count(self):
return self._count
def is_empty(self):
empty = (self._count == 0)
return empty
def is_full(self):
full = (self._count == self._capacity)
return full
def push(self, value):
if self.is_full() is True:
raise EXP.StackOverflowError("Stack is full!")
else:
self._stack.append(value)
self._count += 1
def pop(self):
top_val = self.top()
self._count -= 1
return top_val
def top(self):
if self.is_empty() is True:
raise EXP.StackUnderflowError("Stack is already empty!")
else:
top_val = self._stack[self._count - 1]
return top_val
| [
"noreply@github.com"
] | springrolldippedinsoysauce.noreply@github.com |
d193d711f2be24fe4204a34d2b1a3b14eda09afd | d40ab8694389d1a0d80013a2b0ecd8c426e6e8f8 | /graphs/scc.py | 5847ec8cbff74c175a28bd22a6d879601af33ceb | [] | no_license | lukebiggerstaff/Stanford-Algorithms-MOOC | b5b34c8d8ff7725461fd03bb3aac505c87a1012e | 382a30f27dff6ca9d30c071a4d3418ff6333f4c3 | refs/heads/master | 2021-01-01T17:02:26.883373 | 2017-11-14T19:06:14 | 2017-11-14T19:06:14 | 97,980,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | import sys
import re
import resource
from collections import defaultdict
sys.setrecursionlimit(10 ** 6)
resource.setrlimit(resource.RLIMIT_STACK, (2 ** 29, 2 ** 30))
def dfsfirstpass(graph):
visited = set()
stack = list()
for i in graph.keys():
start = str(i)
if start in graph:
dfsfirstpassrecursive(graph, start, stack, visited)
return stack
def dfsfirstpassrecursive(graph, start, stack, visited):
if start not in visited:
visited.add(start)
if start in graph:
for edge in graph[start]:
if edge not in visited:
dfsfirstpassrecursive(graph, edge, stack, visited)
stack.append(start)
def dfssecondpass(rgraph, stack):
visited = set()
leaderlist = defaultdict(list)
while stack:
start = stack.pop()
if start not in visited:
visited.add(start)
leader = start
leaderlist[leader] += [start]
for edge in set(rgraph[start]) - visited:
dfsrecursive(rgraph, edge, visited, leaderlist, leader)
return leaderlist
def dfsrecursive(graph, start, visited, leaderlist, leader):
visited.add(start)
leaderlist[leader] += [start]
for edge in set(graph[start]) - visited:
dfsrecursive(graph, edge, visited, leaderlist, leader)
def return_top_five_scc(leaderlist):
sccsizelist = list()
for key in leaderlist.keys():
size = len(leaderlist[key])
sccsizelist.append(size)
sccsizelist.sort()
return sccsizelist[-5:]
def kosaraju(graph, rgraph):
stack = dfsfirstpass(rgraph)
#print(f'stack is {stack}')
leaderdict = dfssecondpass(graph, stack)
#print(f'graph is {graph}\n'
#f'leader is {leaderdict}\n')
top5 = return_top_five_scc(leaderdict)
return top5
if __name__ == '__main__':
graph = defaultdict(list)
rgraph = defaultdict(list)
with open(sys.argv[1]) as f:
for line in f:
line_lst = re.findall(r'(\d+|\w+)',line)
graph[line_lst[0]] += [line_lst[1]]
rgraph[line_lst[1]] += [line_lst[0]]
print(kosaraju(graph,rgraph))
| [
"luke.biggerstaff@gmail.com"
] | luke.biggerstaff@gmail.com |
9cf3d784947858edaf604e6c180fff1007ff9612 | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/request/AlipayExscUserFirstsignGetRequest.py | e639edfe18817ae9d75d814b5ee4329e27910ec7 | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 3,664 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayExscUserFirstsignGetRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._alipay_id = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def alipay_id(self):
return self._alipay_id
@alipay_id.setter
def alipay_id(self, value):
self._alipay_id = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.exsc.user.firstsign.get'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.alipay_id:
if hasattr(self.alipay_id, 'to_alipay_dict'):
params['alipay_id'] = json.dumps(obj=self.alipay_id.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['alipay_id'] = self.alipay_id
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
e0ec0a0e6219b4c6d9bf344fbe2feece9e5e101f | b034c20e2cc4bb480060c2a2a3db78ad8c760818 | /3.7-predicting-house-prices.py | a7d57877dd4f4e3feb808ac138229e876cdf5987 | [] | no_license | practice-more/try-keras | e84c61d396d34f73743cbb40b8a105d1dbb72986 | 4921c0c38e04dada327d994d3a1b531a7d032cd8 | refs/heads/master | 2020-09-05T03:25:14.372434 | 2019-11-15T10:14:34 | 2019-11-15T10:14:34 | 219,967,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,255 | py | from keras.datasets import boston_housing
from keras import models
from keras import layers
import numpy as np
import matplotlib.pyplot as plt
import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
print(train_data.shape)
print(train_data[0,:])
# train data is value of the following classes
#1. Per capita crime rate.
#2. Proportion of residential land zoned for lots over 25,000 square feet.
#3. Proportion of non-retail business acres per town.
#4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).
#5. Nitric oxides concentration (parts per 10 million).
#6. Average number of rooms per dwelling.
#7. Proportion of owner-occupied units built prior to 1940.
#8. Weighted distances to five Boston employment centres.
#9. Index of accessibility to radial highways.
#10. Full-value property-tax rate per $10,000.
#11. Pupil-teacher ratio by town.
#12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town.
#13. % lower status of the population.
# train target is the price
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
def build_model():
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1], )))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
# use k-fold validation
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
all_mae_history = []
for i in range(k):
print("process folder #", i)
val_data = train_data[i * num_val_samples: (i+1) * num_val_samples]
val_target = train_targets[i * num_val_samples: (i+1) * num_val_samples]
partial_train_data = np.concatenate(
[
train_data[:i * num_val_samples],
train_data[(i+1) * num_val_samples:]
],
axis=0
)
partial_train_targets = np.concatenate(
[
train_targets[:i * num_val_samples],
train_targets[(i+1) * num_val_samples:]
],
axis=0
)
model = build_model()
history = model.fit(
partial_train_data, partial_train_targets,
validation_data=(val_data, val_target),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mae']
all_mae_history.append(mae_history)
#val_mes, val_mae = model.evaluate(val_data, val_target, verbose=0)
#all_scores.append(val_mae)
average_mae_history = [
np.mean([x[i] for x in all_mae_history]) for i in range(num_epochs)
]
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
average_mae_history = smooth_curve(average_mae_history[10:])
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel("Epochs")
plt.ylabel("Validation MAE")
plt.show()
| [
"btan@esri.com"
] | btan@esri.com |
ff0f2048594d475b598b860002a8ba4d6b8f0047 | e1e59ba5f60886be8b1f587f274d4fe177982e35 | /familiarize_project/apps/users_app/urls.py | c0a27ca2522a0c48a2afd59e929931634f7d45cf | [] | no_license | betalantz/CodingDojo_LearningProjects | 0b2d82a7e32e8e22a07de79d50a46a61bb8fbeca | 4188a0c1df42da3ce5d29f2bdeb92865ea2c6cd1 | refs/heads/master | 2021-01-01T19:26:16.879191 | 2019-04-23T00:48:12 | 2019-04-23T00:48:12 | 98,582,797 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | """familiarize_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^users/$', views.index),
url(r'^users/new/$', views.register),
url(r'^register/$', views.register),
url(r'^login/$', views.login),
] | [
"noreply@github.com"
] | betalantz.noreply@github.com |
ca7db0a3b82cc1c33311d1481cc649686e98ba8e | b8c8af33c8a1a7130c62b42cf22f2eeaf3d81427 | /譌ァ/PUBLIC.py | ea18217cffe68043903b90b91aad46320da0bc0b | [] | no_license | natsukaze4545/nnnn | 57e7a020373569d3ccf26e2e86e210c7aa4d3182 | d0dc58d1fb8981c625aed204aa865992114e0144 | refs/heads/master | 2020-03-22T08:44:38.124591 | 2018-07-05T03:41:02 | 2018-07-05T03:41:02 | 139,787,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,402 | py | #SRSU LINE SELF BOT v7.0.3
# -*- coding: utf-8 -*-
import LAG
from LAG.lib.Gen.ttypes import *
from datetime import datetime
from LAG.lib.Gen.ttypes import *
from datetime import datetime
from LAPI.main import qr
from threading import Thread
import time,random,sys,re,os,json,subprocess,codecs,threading,glob,requests,string
##############################################
cl = LAG.LINE()
cl.login(token="Eq0xkbFlQkZuAuJZ9j70.3So3iW8wB5sEssSoxCD4ea.MRWAvyDMfS7ZWlOAMr9je7YP48464T4J80Xc/OOwWLg=")
cl.loginResult()
##############################################
reload(sys)
sys.setdefaultencoding('utf-8')
##############################################
helpMessage ="""PUBLIC BOT
help ...コマンド一覧を送信します
test ...動いているか確認します
speed ...bot speed
mid ...midを送信します
csend: ...midから連絡先を送信します
ginfo ...グループ情報を送信します
g: ...googleで検索します
search: ...検索
srsu.weebly.com"""
##############################################
mid = cl.getProfile().mid
profile = cl.getProfile()
##############################################
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
##############################################
def Cmd(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = [""]
for texX in tex:
for command in commands:
if string ==texX + command:
return True
return False
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
x = Message()
x.to = to
x.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
x.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
try:
cl.sendMessage(x)
except Exception as error:
print error
##############################################
def bot(op):
try:
if op.type == 0:
return
if op.type == 26:
x = op.message
if x.contentType == 13:
return
if x.contentType == 0:
if x.text == None:
return
### command ##################################
elif x.text.lower() == '.help':
cl.sendText(x.to,helpMessage)
elif x.text in [".test"]:
cl.sendText(x.to,"ok")
elif x.text in [".speed"]:
start = time.time()
cl.sendText(x.to, "...")
elapsed_time = time.time() - start
cl.sendText(x.to, "%s" % (elapsed_time))
elif x.text in [".version"]:
cl.sendText(x.to,"PUBLIC")
##############################################
elif x.text in [".mid"]:
cl.sendText(x.to,mid)
elif ".csend:" in x.text:
mmid = x.text.replace("csend:","")
x.contentType = 13
x.contentMetadata = {"mid":mmid}
cl.sendMessage(x)
##############################################
elif ".ginfo" == x.text:
if x.toType == 2:
g = cl.getGroup(x.to)
try:
gCreator = g.creator.displayName
except:
gCreator = "不明"
if g.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(g.invitee))
if g.preventJoinByTicket == True:
QR = "拒否"
else:
QR = "許可"
cl.sendText(x.to,"グループ名 : " + str(g.name) + "\ngid : " + x.to + "\n作成者 : " + gCreator + "\n\n招待URL :" + QR + "\nグループ画像 : http://dl.profile.line.naver.jp/" + g.pictureStatus + "\nメンバー数 : " + str(len(g.members)) + "\n招待数 : " + sinvitee)
#全員をメンション(上が使えなかったらこっち) /allmention
elif x.text in ["/all mention"]:
group = cl.getGroup(x.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(x.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(x.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(x.to, nm2)
if jml > 200 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(x.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(x.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(x.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(x.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(x.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "メンションした人数:\n" + str(jml) + " 人"
cnt.to = x.to
cl.sendMessage(cnt)
##############################################
elif ".g:" in x.text:
s = x.text.replace("g:","")
cl.sendText(x.to,"google.com/search?q=" + s.replace("g:",""))
elif ".search:" in x.text:
s = x.text.replace("search:","")
cl.sendText(x.to,"\ngoogle : " + "google.com/search?q=" + s.replace("search:","") + "\ngoo : " + "search.goo.ne.jp/web.jsp?MT=" + s.replace("search:","") + "&mode=0&sbd=goo001&IE=UTF-8&OE=UTF-8&from=gootop&PT=TOP" + "\nbing : " + "bing.com/search?scope=web&q=" + s.replace("search:","") + "\nexcite : " + "websearch.excite.co.jp/?q=" + s.replace("search:","") + "\nnever : " + "matome.naver.jp/search?q=" + s.replace("search:","") + "\n楽天 : " + "websearch.rakuten.co.jp/WebIS?col=OW&svx=100610&nc=1&lg=all&svp=SEEK&enc=UTF-8&qt=" + s.replace("search:","") + "\nBIGLOBE : " + "cgi.search.biglobe.ne.jp/cgi-bin/search2-b?search=検索&q=" + s.replace("search:","") + "\nlivedoor : " + "search.livedoor.com/search?ie=utf-8&q=%E6%A4%9C%E7%B4%A2&search_btn=1" + s.replace("search:","") + "\nnifty : " + "search.nifty.com/websearch/search?select=2&ss=nifty_top_tp&cflg=検索&q=" + s.replace("search:","") + "\nyahoo : " + "search.yahoo.co.jp/search;_ylt=A2RA5ONbn7NaBjEAmGCJBtF7?p=" + s.replace("search:",""))
#-------------------------------------------------------------------------------#lastseen
elif x.text == "ぽいんと":
cl.sendText(x.to, "既読ポイントを設定しました。確認したい場合は「ちぇっく」と送信してください。")
try:
del wait2['readPoint'][x.to]
del wait2['readMember'][x.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][x.to] = x.id
wait2['readMember'][x.to] = ""
wait2['setTime'][x.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][x.to] = {}
print wait2
elif x.text == "ちぇっく":
if x.to in wait2['readPoint']:
if wait2["ROM"][x.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][x.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(x.to,"きどく %s\nきどくむし\n%s\nせっていじこく:\n[%s]" % (wait2['readMember'][x.to],chiya,setTime[x.to]))
else:
cl.sendText(x.to,"既読ポイントが設定されていません。「ぽいんと」と送信して既読ポイントを設定してください")
#-------------------------------------------------------#search
if op.type == 55:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
cl.sendText
if op.type == 59:
print op
except Exception as error:
print error
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op) | [
"114514kusai@usako.net"
] | 114514kusai@usako.net |
5a6ea935188b2c941b1c2a18e79fce894cd1916c | ab77b9ee5bed093016ec44ab2b47e055f7f1e0d1 | /Python/mysql/full_friends/bin/pip2.7 | d99268cba2cf8698d7f0b6eceef55b75de7ba898 | [] | no_license | minokuchi/coding_dojo | f07737642228ca069e82894496ff2c0aed1f735f | 0f6d8262c093dc9c6f3825667df5e67b3810c80d | refs/heads/master | 2021-09-07T04:59:10.116505 | 2018-02-17T17:41:51 | 2018-02-17T17:41:51 | 109,316,639 | 0 | 0 | null | 2018-02-17T17:41:52 | 2017-11-02T20:41:00 | Python | UTF-8 | Python | false | false | 275 | 7 | #!/Users/marki/Desktop/Coding_Dojo/DojoAssignments/Python/mysql/full_friends/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"minokuchi@gmail.com"
] | minokuchi@gmail.com |
a655da1503c403cc2cfe9a6e9f8fb672253bd71f | a7efab7b8555d75349917f9a23368a87e9d98726 | /venv/Scripts/pip-script.py | 1d65201c8efab4a9201514b89c1303150ae235b0 | [] | no_license | text007/python | 0779d43c9e2a235939117a8d86901a1d9878bf0d | 9c39f8876953ad7d2065e06c96ceb28b1bb43085 | refs/heads/master | 2022-11-24T13:32:48.217181 | 2020-07-24T02:45:06 | 2020-07-24T02:45:06 | 265,194,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | #!F:\python2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"604573679@qq.com"
] | 604573679@qq.com |
e08c3ff7f96aee19729d67accd05c2b3f7ffbdf7 | 49fb950bafeeac754107a6c962f8ea1a1b5212d2 | /app/database.py | 98872f91a5824945704474bf2c4430fc9e34d0fc | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | VOC-Electronics/WiFiDomo_Manager | 5a50fcaf59e88172dc15b478b06deca3e35feee3 | b7f319fa0d8f5c7328948a40962cbed6abffcf25 | refs/heads/master | 2023-08-10T09:02:17.785967 | 2016-08-30T09:40:30 | 2016-08-30T09:40:30 | 60,178,446 | 0 | 1 | null | 2016-08-30T09:40:30 | 2016-06-01T13:17:25 | Python | UTF-8 | Python | false | false | 7,259 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'Martijn van Leeuwen'
__email__ = 'info@voc-electronics.com'
'''
# =[ DISCLAIMER ]===============================================================
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ==============================================================================
#
# Todo:
#
# Get data from input parameters.
# Check connections
# Login
# Update
# ==============================================================================
# Imports
# ==============================================================================
'''
from datetime import datetime
from sqlalchemy import create_engine, Column, Integer, String, DateTime, Boolean, \
ForeignKey, event
from sqlalchemy.orm import scoped_session, sessionmaker, backref, relation
from sqlalchemy.ext.declarative import declarative_base
from werkzeug import cached_property, http_date
from flask import url_for, Markup
from app.wifidomo_manager import app, db
from modules.config import config
# ===========================================================================
# Global settings
# ===========================================================================
engine = create_engine(app.config[ 'DATABASE_URI' ],
convert_unicode=True,
**app.config[ 'DATABASE_CONNECT_OPTIONS' ])
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
def init_db():
Base.metadata.create_all(bind=engine)
Base = declarative_base(name='Base')
Base.query = db_session.query_property()
class Person(Base):
__tablename__ = 'person'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
surname = Column(String, nullable=False)
lastname = Column(String, nullable=False)
fullname = Column(String, nullable=False)
loginid = Column(String, nullable=True)
password = Column(String, nullable=False)
email = Column(String, nullable=True)
created = Column(DateTime,
default=datetime.utcnow,
onupdate=datetime.utcnow)
updated_on = Column(DateTime,
default=datetime.utcnow,
onupdate=datetime.utcnow)
# authenticated = Column(db.Boolean, default=False)
def __init__(self, surname, lastname, fullname, password):
self.fullname = fullname
self.surname = surname
self.lastname = lastname
self.created = datetime.utcnow()
self.password = password
def __repr__(self):
return '<User %r>' % (self.name)
def is_active(self):
"""True, as all users are active."""
return True
def get_id(self):
"""Return the email address to satisfy Flask-Login's requirements."""
return self.email
def get_email(self):
return self.email
def get_lastname(self):
return self.lastname
def get_surname(self):
return self.surname
def is_authenticated(self):
"""Return True if the user is authenticated."""
return self.authenticated
def is_anonymous(self):
"""False, as anonymous users aren't supported."""
return False
class WiFiDomo(Base):
__tablename__ = 'wifidomo'
id = Column(Integer, primary_key=True)
name = Column(String, index=True, nullable=True)
MAC = Column(String, nullable=True)
locationid = Column(Integer, nullable=True)
ip4 = Column(String(16), nullable=True)
ip6 = Column(String, nullable=True)
port = Column(Integer, nullable=True, default=80)
fqdn = Column(String, nullable=True)
status = Column(Boolean, default=False)
powerstatus = Column(Boolean, default=False)
last_used_r = Column(Integer)
last_used_g = Column(Integer)
last_used_b = Column(Integer)
last_used_preset = Column(Integer)
created = Column(DateTime,
default=datetime.utcnow,
onupdate=datetime.utcnow)
updated_on = Column(DateTime,
default=datetime.utcnow,
onupdate=datetime.utcnow)
def __init__(self, name, MAC, location_id, fqdn, status, ip4, ip6=0, port=80):
self.name = name
self.MAC = MAC
self.locationid = location_id
self.fqdn = fqdn
self.ip4 = ip4
self.ip6 = ip6
self.port = port
self.status = status
self.powerstatus = False
self.created = datetime.utcnow()
self.updated_on = datetime.utcnow()
def to_json(self):
return dict( name=self.name, MAC=self.MAC, status=self.status, powerstatus=self.powerstatus, fqdn=self.fqdn, ip4=self.ip4, ip6=self.ip6, port=self.port, last_used_rgb=self.last_used_rgb )
@cached_property
def count(self):
return self.wifidomo.count()
class WiFiNetworks(Base):
__tablename__ = 'wifinetworks'
id = Column(Integer, primary_key=True)
wifi_sid = Column(String(128), nullable=True)
wifi_loc = Column(String(128), nullable=True)
created = Column(DateTime)
def __init__(self, wifi_sid, wifi_loc):
self.wifi_sid = wifi_sid
self.wifi_loc = wifi_loc
self.created = datetime.utcnow()
def to_json(self):
return dict(wifi_sid=self.wifi_sid, wifi_loc=self.wifi_loc)
class Locations(Base):
__tablename__ = 'locations'
id = Column(Integer, primary_key=True)
location_name = Column(String, unique=True, nullable=False)
location_code = Column(Integer, default=0)
location_description = Column(String, nullable=True)
created = Column(DateTime)
def __init__(self, location_name, location_code, location_description):
self.location_name = location_name
self.location_code = location_code
self.location_description = location_description
self.created = datetime.utcnow()
def to_json(self):
return dict(name=self.location_name, code=self.location_code, description=self.location_description)
class Loginlog(Base):
__tablename__ = 'loginlog'
id = Column(Integer, primary_key=True)
loginby = Column(String, nullable=True)
logindate = Column(DateTime)
class Preset(Base):
__tablename__ = 'preset'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
r_code = Column(Integer, nullable=False)
g_code = Column(Integer, nullable=False)
b_code = Column(Integer, nullable=False)
created = Column(DateTime)
def __init__(self, name, r_code, g_code, b_code):
self.name = name
self.r_code = r_code
self.g_code = g_code
self.b_code = b_code
self.created = datetime.utcnow()
def to_json(self):
return dict(name=self.name, r_code=self.r_code, g_code=self.g_code, b_code=self.b_code)
class Pattern(Base):
__tablename__ = 'pattern'
id = Column(Integer, primary_key=True)
name = Column(String(200), nullable=False)
def __init__(self, name):
self.name = name
| [
"info@voc-electronics.com"
] | info@voc-electronics.com |
91c8298a8f35841bf72996c47795505cf4afd03a | 65c001b5f572a6b0ca09dd9821016d628b745009 | /frappe-bench/env/lib/python2.7/site-packages/watchdog/observers/polling.py | 3039ceb3678ce611aeccc6d88d0586c3f632a5e2 | [
"MIT"
] | permissive | ibrahmm22/library-management | 666dffebdef1333db122c2a4a99286e7c174c518 | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | refs/heads/master | 2022-10-30T17:53:01.238240 | 2020-06-11T18:36:41 | 2020-06-11T18:36:41 | 271,620,992 | 0 | 1 | MIT | 2022-10-23T05:04:57 | 2020-06-11T18:36:21 | CSS | UTF-8 | Python | false | false | 4,687 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.polling
:synopsis: Polling emitter implementation.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
Classes
-------
.. autoclass:: PollingObserver
:members:
:show-inheritance:
.. autoclass:: PollingObserverVFS
:members:
:show-inheritance:
:special-members:
"""
from __future__ import with_statement
import os
import threading
from functools import partial
from watchdog.utils import stat as default_stat
from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_OBSERVER_TIMEOUT,
DEFAULT_EMITTER_TIMEOUT
)
from watchdog.events import (
DirMovedEvent,
DirDeletedEvent,
DirCreatedEvent,
DirModifiedEvent,
FileMovedEvent,
FileDeletedEvent,
FileCreatedEvent,
FileModifiedEvent
)
class PollingEmitter(EventEmitter):
"""
Platform-independent emitter that polls a directory to detect file
system changes.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT,
stat=default_stat, listdir=os.listdir):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._snapshot = None
self._lock = threading.Lock()
self._take_snapshot = lambda: DirectorySnapshot(
self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir)
def queue_events(self, timeout):
if not self._snapshot:
self._snapshot = self._take_snapshot()
# We don't want to hit the disk continuously.
# timeout behaves like an interval for polling emitters.
if self.stopped_event.wait(timeout):
return
with self._lock:
if not self.should_keep_running():
return
# Get event diff between fresh snapshot and previous snapshot.
# Update snapshot.
new_snapshot = self._take_snapshot()
events = DirectorySnapshotDiff(self._snapshot, new_snapshot)
self._snapshot = new_snapshot
# Files.
for src_path in events.files_deleted:
self.queue_event(FileDeletedEvent(src_path))
for src_path in events.files_modified:
self.queue_event(FileModifiedEvent(src_path))
for src_path in events.files_created:
self.queue_event(FileCreatedEvent(src_path))
for src_path, dest_path in events.files_moved:
self.queue_event(FileMovedEvent(src_path, dest_path))
# Directories.
for src_path in events.dirs_deleted:
self.queue_event(DirDeletedEvent(src_path))
for src_path in events.dirs_modified:
self.queue_event(DirModifiedEvent(src_path))
for src_path in events.dirs_created:
self.queue_event(DirCreatedEvent(src_path))
for src_path, dest_path in events.dirs_moved:
self.queue_event(DirMovedEvent(src_path, dest_path))
class PollingObserver(BaseObserver):
"""
Platform-independent observer that polls a directory to detect file
system changes.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=PollingEmitter, timeout=timeout)
class PollingObserverVFS(BaseObserver):
"""
File system independent observer that polls a directory to detect changes.
"""
def __init__(self, stat, listdir, polling_interval=1):
"""
:param stat: stat function. See ``os.stat`` for details.
:param listdir: listdir function. See ``os.listdir`` for details.
:type polling_interval: float
:param polling_interval: interval in seconds between polling the file system.
"""
emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir)
BaseObserver.__init__(self, emitter_class=emitter_cls, timeout=polling_interval)
| [
"iabouelftouh@trudoc24x7.com"
] | iabouelftouh@trudoc24x7.com |
a7ca9c17124ca0830796ae2a129f45c0f8d02b13 | 458495651f84b6ef033d084fa6caad15c2154077 | /book/views.py | 8b57fc286b811c4742788b028b40bfdc25d4fead | [] | no_license | waldemarantypov/sprint17_django_Forms | 85c5333d4f39ed998c844b0056193ad0fc20f84a | 15061b558a4b5e385d6a538ff89d664f6255223c | refs/heads/main | 2023-04-04T21:53:17.876812 | 2021-04-04T14:52:44 | 2021-04-04T14:52:44 | 353,795,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | from django.shortcuts import render
from django.http import HttpResponse
from book.models import Book
# Create your views here.
def show_books(request):
randomBooks = Book.objects.all().order_by('?')[:3]
context = {"allBooks": randomBooks}
return render(request, 'index.html', context)
| [
"waldemar.antypov@gmail.com"
] | waldemar.antypov@gmail.com |
8180cf64858edff78040580e57e99bca34304e13 | bd649f51496a24a55a2327e658f31d6e03e2f602 | /InvTL/lm_py/py/bin/py.which | f2b30cfff3ced4c92ec7655ae03a05a1f0b7818b | [
"MIT"
] | permissive | mickg10/DARLAB | 6507530231f749e8fc1647f3a9bec22a20bebe46 | 0cd8d094fcaf60a48a3b32f15e836fcb48d93e74 | refs/heads/master | 2020-04-15T20:39:23.403215 | 2019-01-10T06:54:50 | 2019-01-10T06:54:50 | 16,510,433 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 65 | which | #!/usr/bin/env python
from _findpy import py
py.cmdline.pywhich() | [
"root@darlab1.mickg.net"
] | root@darlab1.mickg.net |
13e0e56617747cebd4d1b90a7eb5a82697e973ae | b32224af384173f41732539dd216d9312ffb521e | /rabbitmq/routing/emit_log_direct.py | 1d732cbd9bff728a0821c952e9d5dfd296bf7fc0 | [] | no_license | 4179e1/demo | 2140e63cdecb0f5df4d5d435f482c3da49d63cfa | 8cc84a64c34e2b822fe4114db1ca6531e647affa | refs/heads/master | 2023-08-17T12:16:06.002630 | 2021-09-18T03:27:27 | 2021-09-18T03:27:27 | 407,744,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | #!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='direct_logs', exchange_type='direct')
# If message cannot be routed to a queue, we need confirm_delivery() to get noticed.
# see https://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.publish
# https://pika.readthedocs.io/en/latest/examples/blocking_publish_mandatory.html
channel.confirm_delivery()
severity = sys.argv[1] if len(sys.argv) > 1 else 'info'
message = ' '.join(sys.argv[2:]) or 'Hello World!'
try:
res = channel.basic_publish(
exchange='direct_logs',
routing_key=severity,
mandatory=True,
body=message
)
print(" [x] Sent %r:%r" % (severity, message))
except pika.exceptions.UnroutableError:
print("Message was returned")
connection.close() | [
"lyre@poetpalace.org"
] | lyre@poetpalace.org |
64bef1b8d66e25515d68a737b143f8d15d5675ce | 7790e3a3f2de068fef343585ec856983591997a2 | /bank/migrations/0021_followlawtype.py | 67c1e5c0f8ed434aeb042dbf4b3e27f516602279 | [] | no_license | mehdi1361/tadbir | ce702a9a02672826f0bf06e8d5cf0644efe31949 | c0a67710099f713cf96930e25df708625de89a6f | refs/heads/master | 2021-06-04T07:35:37.624372 | 2018-07-23T05:25:04 | 2018-07-23T05:25:04 | 148,870,028 | 0 | 0 | null | 2019-10-22T21:40:28 | 2018-09-15T04:40:26 | HTML | UTF-8 | Python | false | false | 1,147 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-11 16:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bank', '0020_auto_20180510_1351'),
]
operations = [
migrations.CreateModel(
name='FollowLawType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ایجاد')),
('update_at', models.DateTimeField(auto_now=True, verbose_name='تاریخ بروزرسانی')),
('type', models.CharField(max_length=100, verbose_name='نوع پیگیری')),
('enable', models.BooleanField(default=False, verbose_name='فعال')),
],
options={
'verbose_name': 'پیگیری حقوقی',
'verbose_name_plural': 'پیگیری های حقوقی',
'db_table': 'follow_low_type',
},
),
]
| [
"mhd.mosavi@gmail.com"
] | mhd.mosavi@gmail.com |
eb78acba4bf3287ebe00fb95622aa68a63eb3407 | 5e5dfefcd80df20efa07bf101d202902921ba40d | /scripts/bam2fna | 0c7af193443d72b98092cab452115f4a0076e792 | [] | no_license | veg/BioExt | ba8539361dffd6a366cf174b938c00e28c6649c7 | 5d0e283553dcc0016567182dae59e4b4994b505f | refs/heads/master | 2023-08-31T11:29:56.342017 | 2023-06-13T20:56:40 | 2023-06-13T20:56:40 | 20,779,973 | 1 | 11 | null | 2023-08-11T21:55:30 | 2014-06-12T19:20:46 | Python | UTF-8 | Python | false | false | 1,120 | #!/usr/bin/env python3
import signal
from Bio import SeqIO
from BioExt.io import BamIO
def main(bam_file, out_handle):
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except ValueError:
pass
SeqIO.write(BamIO.parse(bam_file), out_handle, 'fasta')
return 0
if __name__ == '__main__':
import sys
import argparse
parser = argparse.ArgumentParser(
description='convert a BAM file to a FASTA file'
)
parser.add_argument(
'input',
metavar='INPUT',
type=argparse.FileType('rb'),
help='input BAM file'
)
parser.add_argument(
'output',
default=sys.stdout,
metavar='OUTPUT',
type=argparse.FileType('w'),
help='output FASTA file'
)
args = None
retcode = -1
try:
args = parser.parse_args()
bam_file = args.input.name
args.input.close()
retcode = main(bam_file, args.output)
finally:
if args is not None:
if args.output != sys.stdout:
args.output.close()
sys.exit(retcode)
| [
"nlhepler@gmail.com"
] | nlhepler@gmail.com | |
4d797ba65df470752fb7054a0f9213d8436220cc | ccfd41c1f6f90608f4c2596963a28303ab0bdee3 | /test_standalone/test_rpc_client.py | ca8e4062e9ba10e622c152fed40fb4932da79473 | [] | no_license | jsmoyam/zserver | 4e8ffb4543f663a70ca96f1988958dba66b79d49 | d838937cf92262176faec6a6a833a602dbcd868e | refs/heads/master | 2022-12-23T01:21:29.200727 | 2019-04-30T11:05:53 | 2019-04-30T11:05:53 | 184,247,118 | 0 | 0 | null | 2022-12-12T10:22:38 | 2019-04-30T11:05:41 | Python | UTF-8 | Python | false | false | 165 | py | import Pyro4
# obj = Pyro4.Proxy("PYRONAME:test_module")
# obj.example_method()
# obj.initialize()
obj = Pyro4.Proxy("PYRONAME:mytestserver")
print(obj.m1('hola')) | [
"jsmoya@one-esecurity.com"
] | jsmoya@one-esecurity.com |
0593e05afd2470b385d6c0f363d7d4ef5f18a772 | b770761d348794b86ac0d87b16750625eadc176d | /est_imp_from_file.py | 84100f6ca75cc3a32dc41421fcdfe72d6e81bb47 | [
"MIT"
] | permissive | fujiyuer-fujiyu/imp_tsp | 51855f722cea1864ca32adf66dbec42c3d4bcc8e | 4d19a28fc0dea4d81c38dcfb0ddd2800513257cb | refs/heads/master | 2020-06-20T19:40:05.935343 | 2018-03-02T03:12:41 | 2018-03-02T03:12:41 | 74,744,644 | 0 | 0 | null | 2016-11-25T09:34:19 | 2016-11-25T09:34:18 | null | UTF-8 | Python | false | false | 1,413 | py | from scipy.io import wavfile
from scipy import signal as sig
import numpy as np
import matplotlib.pyplot as plt
#input file (observed signal)
(rate,data)=wavfile.read("tsp_out.16.wav")
#(rate,data)=wavfile.read("./20180228/ref/ec_tsp.wav")
i_length=data.shape[0]
print "lenght of Observed signal and number of channels is {0}".format(str(data.shape))
#inverse of TSP signal
(rate,data2)=wavfile.read("./itsp_out.wav")
print "length of Original TSP signal is {}".format(str(data2.shape))
length = data2.shape[0]
#number of chanel
if len(data.shape) > 1:
nc = data.shape[1]
else :
nc = 1
print "Number of channel of Observed signal is {}".format(nc)
## synchronous addition
if nc > 1:
data_s = np.zeros((length,nc))
else:
data_s = np.zeros(length)
print data_s.shape
for s in range(0,int(i_length/length)):
print str(s)+"th addition.. "
for i in range(0,length):
if nc > 1:
data_s[i][nc] += data[i+s*length][nc]
else:
data_s[i] += data[i+s*length]
# calc inpulse response
data4_f=np.fft.fft(data_s,length)*np.fft.fft(data2,length)
data4=np.fft.ifft(data4_f)
data4_i = np.zeros(length, dtype=np.int16)
# normalize and convert to integer
data4_i = 9000*data4.real/np.max(data4.real)
data4_i = data4_i.astype(np.int16)
wavfile.write("./IR-from-FFT.wav",rate,data4_i)
plt.figure()
plt.title("IR: estimated from FFT")
plt.plot(data4_i)
plt.show()
| [
"yuyfujit@mbp-15uas-012.yjoffice.local"
] | yuyfujit@mbp-15uas-012.yjoffice.local |
c33d59f6b5de8274dc27c8aaca734bc82148e904 | 6a6ca78209adc4e342529d9d3d748f664b2fb511 | /run.py | a7a9504931696e6ad18b1b4a377b020d113d6ccd | [] | no_license | feibl/geometa_search | 88b641a38699bf9ab3af4ef1153c2ee57356ef79 | a4afca92e6639617bcb905ebe3351f0fc3fb440f | refs/heads/master | 2021-01-02T08:57:26.283601 | 2015-01-30T10:58:29 | 2015-01-30T10:58:29 | 26,922,245 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | from geometa_search.factory import create_app as create_frontend
from search_rex.factory import create_app as create_backend
from search_rex.recommendations import create_recommender_system
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.serving import run_simple
backend = create_backend('config.DevelopmentConfig')
create_recommender_system(backend)
frontend = create_frontend('config.DevelopmentConfig')
app = DispatcherMiddleware(
frontend, {
'/recommender': backend
}
)
if __name__ == '__main__':
run_simple(
'localhost', 5000, app, use_reloader=True,
use_debugger=True, threaded=True)
| [
"fabian.senn@gmail.com"
] | fabian.senn@gmail.com |
bd6a9831ec6c286db6ed408f8d0f498356771f0c | 24c8065f5508126675029cdb41cfa22ec1d70539 | /venv/lib/python3.6/site-packages/haystackbrowser/models.py | 5e8d9bf2e45c5c418883af394de4584f3d9d0dd1 | [] | no_license | Shawnmhy/BEProject | 3b2eaff1737a8bc08af87e6d1e078912449531dd | 06bff81c5b8942c3b713263b0d417c4d19f1d7b3 | refs/heads/master | 2020-03-10T03:18:53.341055 | 2018-05-23T21:51:16 | 2018-05-23T21:51:16 | 129,161,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,794 | py | # -*- coding: utf-8 -*-
import logging
from copy import deepcopy
try:
from urllib import quote_plus
except ImportError: # > Python 3
from django.utils.six.moves.urllib import parse
quote_plus = parse.quote_plus
from operator import itemgetter
from itertools import groupby
from collections import namedtuple
from django.db import models
try:
from django.utils.encoding import force_text
except ImportError: # < Django 1.5
from django.utils.encoding import force_unicode as force_text
from django.utils.safestring import mark_safe
from django.utils.html import strip_tags
from django.urls import NoReverseMatch, reverse
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
logger = logging.getLogger(__name__)
class HaystackResults(models.Model):
""" Our fake model, used for mounting :py:class:`~haystackbrowser.admin.HaystackResultsAdmin`
onto the appropriate AdminSite.
.. note::
the model is marked as unmanaged, so will never get created via ``syncdb``.
"""
class Meta:
managed = False
verbose_name = _('Search result')
verbose_name_plural = _('Search results')
class SearchResultWrapper(object):
"""Value object which consumes a standard Haystack SearchResult, and the current
admin site, and exposes additional methods and attributes for displaying the data
appropriately.
:param obj: the item to be wrapped.
:type obj: object
:param admin_site: the parent site instance.
:type admin_site: AdminSite object
"""
def __init__(self, obj, admin_site=None):
self.admin = admin_site
self.object = obj
if getattr(self.object, 'searchindex', None) is None:
# < Haystack 1.2
from haystack import site
self.object.searchindex = site.get_index(self.object.model)
def __repr__(self):
return '<%(module)s.%(cls)s [%(app)s.%(model)s pk=%(pk)r]>' % {
'module': self.__class__.__module__,
'cls': self.__class__.__name__,
'obj': self.object,
'app': self.object.app_label,
'model': self.object.model_name,
'pk': self.object.pk,
}
def get_app_url(self):
"""Resolves a given object's app into a link to the app administration.
.. warning::
This link may return a 404, as pretty much anything may
be reversed and fit into the ``app_list`` urlconf.
:return: string or None
"""
try:
return reverse('%s:app_list' % self.admin, kwargs={
'app_label': self.object.app_label,
})
except NoReverseMatch:
return None
def get_model_url(self):
"""Generates a link to the changelist for a specific Model in the administration.
:return: string or None
"""
try:
parts = (self.admin, self.object.app_label, self.object.model_name)
return reverse('%s:%s_%s_changelist' % parts)
except NoReverseMatch:
return None
def get_pk_url(self):
"""Generates a link to the edit page for a specific object in the administration.
:return: string or None
"""
try:
parts = (self.admin, self.object.app_label, self.object.model_name)
return reverse('%s:%s_%s_change' % parts, args=(self.object.pk,))
except NoReverseMatch:
return None
def get_detail_url(self):
try:
urlname = '%s:haystackbrowser_haystackresults_change' % self.admin
return reverse(urlname, kwargs={
'content_type': '.'.join([self.object.app_label,
self.object.model_name]),
'pk': self.object.pk})
except NoReverseMatch:
return None
def get_model_attrs(self):
outfields = {}
try:
fields = self.object.searchindex.fields
except:
fields = {}
else:
for key, field in fields.items():
has_model_attr = getattr(field, 'model_attr', None)
if has_model_attr is not None:
outfields[key] = force_text(has_model_attr)
return outfields
def get_stored_fields(self):
stored_fields = {}
model_attrs = self.get_model_attrs()
for key, value in self.object.get_stored_fields().items():
safe_value = force_text(value).strip()
stored_fields[key] = {
'raw': safe_value,
'safe': mark_safe(strip_tags(safe_value))
}
if key in model_attrs:
stored_fields[key].update(model_attr=model_attrs.get(key))
return stored_fields
def get_additional_fields(self):
"""Find all fields in the Haystack SearchResult which have not already
appeared in the stored fields.
:return: dictionary of field names and values.
"""
additional_fields = {}
stored_fields = self.get_stored_fields().keys()
model_attrs = self.get_model_attrs()
for key, value in self.object.get_additional_fields().items():
if key not in stored_fields:
safe_value = force_text(value).strip()
additional_fields[key] = {
'raw': safe_value,
'safe': mark_safe(strip_tags(safe_value))
}
if key in model_attrs:
additional_fields[key].update(model_attr=model_attrs.get(key))
return additional_fields
def get_content_field(self):
"""Find the name of the main content field in the Haystack SearchIndex
for this object.
:return: string representing the attribute name.
"""
return self.object.searchindex.get_content_field()
def get_content(self):
"""Given the name of the main content field in the Haystack Search Index
for this object, get the named attribute on this object.
:return: whatever is in ``self.object.<content_field_name>``
"""
return getattr(self.object, self.get_content_field())
def get_stored_field_count(self):
"""
Provides mechanism for finding the number of stored fields stored on
this Search Result.
:return: the count of all stored fields.
:rtype: integer
"""
return len(self.object.get_stored_fields().keys())
def get_additional_field_count(self):
"""
Provides mechanism for finding the number of stored fields stored on
this Search Result.
:return: the count of all stored fields.
:rtype: integer
"""
return len(self.get_additional_fields().keys())
def __getattr__(self, attr):
return getattr(self.object, attr)
class FacetWrapper(object):
"""
A simple wrapper around `sqs.facet_counts()` to filter out things with
0, and re-arrange the data in such a way that the template can handle it.
"""
__slots__ = ('dates', 'fields', 'queries', '_total_count', '_querydict')
def __init__(self, facet_counts, querydict):
self.dates = facet_counts.get('dates', {})
self.fields = facet_counts.get('fields', {})
self.queries = facet_counts.get('queries', {})
self._total_count = len(self.dates) + len(self.fields) + len(self.queries)
# querydict comes from the cleaned form data ...
page_key = 'p'
if querydict is not None and page_key in querydict:
querydict.pop(page_key)
self._querydict = querydict
def __repr__(self):
return '<%(module)s.%(cls)s fields=%(fields)r dates=%(dates)r ' \
'queries=%(queries)r>' % {
'module': self.__class__.__module__,
'cls': self.__class__.__name__,
'fields': self.fields,
'dates': self.dates,
'queries': self.queries,
}
def get_facets_from(self, x):
if x not in ('dates', 'queries', 'fields'):
raise AttributeError('Wrong field, silly.')
for field, items in getattr(self, x).items():
for content, count in items:
content = content.strip()
if count > 0 and content:
yield {'field': field, 'value': content, 'count': count,
'fieldvalue': quote_plus('%s:%s' % (field, content)),
'facet': Facet(field, querydict=self._querydict)}
def get_grouped_facets_from(self, x):
data = sorted(self.get_facets_from(x), key=itemgetter('field'))
#return data
results = ({'grouper': Facet(key), 'list': list(val)}
for key, val in groupby(data, key=itemgetter('field')))
return results
def get_field_facets(self):
return self.get_grouped_facets_from('fields')
def get_date_facets(self):
return self.get_grouped_facets_from('dates')
def get_query_facets(self):
return self.get_grouped_facets_from('queries')
def __bool__(self):
"""
Used for doing `if facets: print(facets)` - this is the Python 2 magic
method; __nonzero__ is the equivalent thing in Python 3
"""
return self._total_count > 0
__nonzero__ = __bool__
def __len__(self):
"""
For checking things via `if len(facets) > 0: print(facets)`
"""
return self._total_count
class AppliedFacet(namedtuple('AppliedFacet', 'field value querydict')):
__slots__ = ()
def title(self):
return self.value
@property
def facet(self):
""" a richer object """
return Facet(self.raw)
@property
def raw(self):
""" the original data, rejoined """
return '%s:%s' % (self.field, self.value)
@property
def narrow(self):
""" returns a string format value """
return '{0}:"{{cleaned_value}}"'.format(self.field)
def link(self):
""" link to just this facet """
new_qd = self.querydict.copy()
page_key = 'p'
if page_key in new_qd:
new_qd.pop(page_key)
new_qd['selected_facets'] = self.raw
new_qd['possible_facets'] = self.field
return '?%s' % new_qd.urlencode()
def remove_link(self):
new_qd = self.querydict.copy()
# remove page forcibly ...
page_key = 'p'
if page_key in new_qd:
new_qd.pop(page_key)
# remove self from the existing querydict/querystring ...
key = 'selected_facets'
if key in new_qd and self.raw in new_qd.getlist(key):
new_qd.getlist(key).remove(self.raw)
return '?%s' % new_qd.urlencode()
class AppliedFacets(object):
__slots__ = ('_applied',)
def __init__(self, querydict):
self._applied = {}
selected = ()
if 'selected_facets' in querydict:
selected = querydict.getlist('selected_facets')
for raw_facet in selected:
if ":" not in raw_facet:
continue
field, value = raw_facet.split(":", 1)
to_add = AppliedFacet(field=field, value=value,
querydict=querydict)
self._applied[raw_facet] = to_add
def __iter__(self):
return iter(self._applied.values())
def __len__(self):
return len(self._applied)
def __contains__(self, item):
return item in self._applied
def __repr__(self):
raw = tuple(v.raw for k, v in self._applied.items())
return '<{cls!s}.{name!s} selected_facets={raw}>'.format(
cls=self.__class__.__module__, name=self.__class__.__name__,
raw=raw)
def __str__(self):
raw = [v.facet.get_display() for k, v in self._applied.items()]
return '{name!s} {raw!s}'.format(name=self.__class__.__name__, raw=raw)
class Facet(object):
"""
Takes a facet field name, like `thing_exact`
"""
__slots__ = ('fieldname', '_querydict')
def __init__(self, fieldname, querydict=None):
self.fieldname = fieldname
self._querydict = querydict
def __repr__(self):
return '<%(module)s.%(cls)s - %(field)s>' % {
'module': self.__class__.__module__,
'cls': self.__class__.__name__,
'field': self.fieldname,
}
def link(self):
qd = self._querydict
if qd is not None:
return '?%s' % qd.urlencode()
return '?'
def get_display(self):
return self.fieldname.replace('_', ' ').title()
def choices(self):
return (self.fieldname, self.get_display())
| [
"shawnmhy@gmail.com"
] | shawnmhy@gmail.com |
4d057261b30773fd2e2100f2226e925e29b70202 | 7e48f2d3592dc815e06956bfaba78adbda58626c | /sibu/sample_programs/python/Python/37~48강/alzio07.py | da2f5c72d175385629b371779f69b2799919125c | [] | no_license | joonholee95/Sibu-taikai | bec247a1a1d480f6b659d885b1b7e8dc89cdfd39 | 75ca616daab89f2016b75d340dfe8b49ec3af3c8 | refs/heads/master | 2020-03-22T15:25:34.664224 | 2018-10-01T02:30:15 | 2018-10-01T02:30:15 | 140,251,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,831 | py |
# coding: utf-8
# In[1]:
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import font_manager, rc
font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
rc('font', family=font_name)
# # pandas로 데이터 다루기
# ## 변형
# - 중복 제거
# - 매핑
# - 치환
# - 특이값 다루기
# ### 중복 제거
# In[2]:
df = DataFrame({'key1': list("가가가나나다다다"), 'key2': list("13122414")})
df
# In[3]:
df.duplicated()
# In[4]:
df.drop_duplicates()
# In[5]:
df.drop_duplicates(keep='last')
# In[6]:
df.drop_duplicates('key1')
# In[7]:
df.drop_duplicates('key2')
# ### 매핑
# In[8]:
data = {
'이름': list("가나다라마"),
'점수': np.arange(5) * 10
}
df = DataFrame(data)
df
# In[9]:
gender = {
"나": "여",
"마": "여",
"가": "남",
"라": "남",
"다": "여",
}
# In[10]:
df['성별'] = df['이름'].map(gender)
df
# In[11]:
df['점수'] = df['점수'].map("{}점".format)
df
# ### 치환
# In[12]:
sr = Series([2000, -1, 2001, 2002, -1, 0, 2003])
sr
# In[13]:
sr.replace(-1, np.nan)
# In[14]:
sr.replace([-1, 0], np.nan)
# In[15]:
sr.replace({-1: np.nan, 0: 2000})
# ### 특이값 다루기
# In[16]:
np.random.seed(1)
df = DataFrame(np.random.randn(1000, 5))
df.describe()
# In[17]:
plt.hist(df[2], bins=100)
plt.show();
# In[18]:
abs(df[2]) > 2.5
# In[19]:
df[abs(df[2]) > 2.5]
# In[20]:
df[abs(df[2]) > 2.5] = np.sign(df) * 2.5
# In[21]:
plt.hist(df[2], bins=100)
plt.show();
# # matplotlib로 그래프 그리기
# In[22]:
from numpy.random import randn
# ## 기본
# In[23]:
plt.plot(randn(50).cumsum())
plt.show();
# ## 서브플롯
# In[24]:
fig, axes = plt.subplots(2, 2)
axes[0, 0].hist(randn(100), color='k')
axes[0, 1].plot(randn(100).cumsum(), c='red')
axes[1, 0].scatter(randn(100), randn(100), color='green')
axes[1, 1].bar(np.arange(100), np.random.randint(1, 100, size=100), color='blue')
plt.show();
# ## 서브플롯간 간격 조절
# In[25]:
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
axes[0, 0].hist(randn(1000), color='k')
axes[1, 0].hist(randn(1000), color='r')
axes[0, 1].hist(randn(1000), color='g')
axes[1, 1].hist(randn(1000), color='b')
plt.subplots_adjust(wspace=0, hspace=0)
plt.show();
# ## 크기, 마커, 선 스타일
# In[26]:
plt.figure(figsize=(12, 6))
plt.plot(randn(50).cumsum(), linestyle='-', color='k', marker='*')
plt.plot(randn(50).cumsum(), linestyle='--', color='r', marker='o')
plt.plot(randn(50).cumsum(), linestyle='-.', color='g', marker='v')
plt.plot(randn(50).cumsum(), linestyle=':', color='b', marker='D')
plt.show();
# ## 눈금
# In[27]:
plt.plot(randn(500).cumsum())
plt.show();
# In[28]:
plt.plot(randn(500).cumsum())
plt.xticks([0, 250, 500])
plt.show();
# In[29]:
plt.plot(randn(500).cumsum())
plt.xticks([0, 250, 500], ["2000년 1월 1일", "7월 1일", "2001년 1월 1일"], rotation=45)
plt.show();
# ## 라벨, 범례
# In[30]:
plt.plot(randn(50).cumsum(), linestyle='-', color='k', label="검정")
plt.plot(randn(50).cumsum(), linestyle='--', color='r', label="빨강")
plt.plot(randn(50).cumsum(), linestyle='-.', color='g', label="초록")
plt.xlabel("스텝")
plt.ylabel("값")
plt.title("제목")
plt.legend(loc="lower left")
plt.show();
# ## 저장
# In[32]:
fig = plt.figure()
plt.plot(randn(50).cumsum(), linestyle='-', color='k', label="검정")
plt.plot(randn(50).cumsum(), linestyle='--', color='r', label="빨강")
plt.plot(randn(50).cumsum(), linestyle='-.', color='g', label="초록")
plt.xlabel("스텝")
plt.ylabel("값")
plt.title("제목")
plt.legend(loc="lower left")
plt.savefig("test.jpg", dpi=300);
plt.show();
# 
| [
"joonho.lee@human.ait.kyushu-u.ac.jp"
] | joonho.lee@human.ait.kyushu-u.ac.jp |
65522eea8575a938acfbe8a6f92ea0e0fbd28d33 | d36ea3f3a03299d0572914dd31c63a74c9da2c45 | /listapar.py | 217a2e0645c11b0ef1054b356536875010eb7a4c | [] | no_license | BrauCamacho/Mis_Trabajos_python | a05b3954751e37e5b2cbede0d8746faa774a7e1d | e6e0168b33e87c9b8924cf36441d0b585c3aee76 | refs/heads/master | 2022-11-30T11:42:43.520350 | 2020-08-15T04:52:05 | 2020-08-15T04:52:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | lista = []
for i in range(2,21,2):
lista.append(i)
print(lista)
#lo mismo pero comprimido
lista2= [val for val in range(2,21,2)]
print(lista2)
valor = 0
#if ternario
valor = 10
Edad = 10 if valor < 20 else 0
print (Edad)
#strings
nombre = " Juan "
print(nombre[0:2])
print(nombre[:3])
print(nombre[-1])
print(nombre[::-1])
print(lista[::-1])
#print(nombre.suorted())
print(sorted(nombre))
print(nombre.upper())
print(nombre.lower())
print(nombre.split("a"))
print(nombre.replace("a","A"))
print(nombre.strip()) | [
"noreply@github.com"
] | BrauCamacho.noreply@github.com |
be4bda3f34c623e89442a9c1b39b272bef2b621d | 65b3d3230cd6b828eeb664429997468cd39db92c | /GUI/venv/bin/pip3.6 | e8ca7da5cd04e5723cc80121403f3f1dbfb27657 | [] | no_license | shaniphankar/IR-Assignments | 6bbd0c5e9d52704e67a8c49ee2551d2f693f6578 | db9120861c8dca83d11340c57240e3dc53f660b1 | refs/heads/master | 2021-06-05T09:49:16.734160 | 2020-02-04T15:30:24 | 2020-02-04T15:30:24 | 147,077,391 | 1 | 3 | null | 2018-10-23T13:35:59 | 2018-09-02T11:01:01 | Python | UTF-8 | Python | false | false | 222 | 6 | #!/home/cross/GUI/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"vishalcross36@gmail.com"
] | vishalcross36@gmail.com |
afbea8d9ea26a5aca621937e13f4651df94afc0f | 51e61e09fc0440acc56e94446873a686e5f19a9b | /sentiment_analysis.py | 580864d55f9c0138a144fed68b75040afa71a50c | [] | no_license | Yunhan0816/band-lyrics-analysis | f6d3c1981e0718dc53a1cd2908f4f19d28a93371 | e4e0b5cca125dfa70ea1b1f699d595956e2c7b97 | refs/heads/master | 2020-06-23T09:46:56.817147 | 2019-08-15T09:02:15 | 2019-08-15T09:02:15 | 198,588,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import config
import matplotlib.pyplot as plt
import pandas as pd
artists = config.artists
df = pd.DataFrame(columns = ('artist', 'positive', 'neautral', 'negative'))
s = SentimentIntensityAnalyzer()
i=0
for artist in artists:
pos = 0
neg = 0
neu = 0
f = open('lyrics/' + artist + '_cleaned', 'rb')
for sentence in f.readlines():
this_sentence = sentence.decode('utf-8')
comp = s.polarity_scores(this_sentence)
comp = comp['compound']
if comp >= 0.5:
pos += 1
elif comp > -0.5 and comp < 0.5:
neu += 1
else:
neg += 1
num_total = pos + neu + neg
percent_negative = (neg / float(num_total)) * 100
percent_neutral = (neu / float(num_total)) * 100
percent_positive = (pos / float(num_total)) * 100
df.loc[i] = (artist, percent_positive, percent_neutral, percent_negative)
i+=1
df.plot.bar(x='artist', stacked=True)
plt.show()
| [
"yunhanh@bu.edu"
] | yunhanh@bu.edu |
5497eed0b98d3d44dc25ed39c7376e7800f9fcaa | 350cb6c7c7a7842e80aa06ee32bfffc5bc35ee03 | /programming/language/python/python-pillow/actions.py | 09179eb1424c0d20883d92c49aeb6480d96ba765 | [] | no_license | LimeLinux/Packages-2 | f41d11343e8b39274ccd85b9850d0f4e76830031 | 356975df129f2097f12dbed3bc2604cadb5a6c64 | refs/heads/master | 2021-04-30T23:25:31.121967 | 2017-01-21T21:46:54 | 2017-01-21T21:46:54 | 79,139,920 | 0 | 2 | null | 2017-01-21T21:46:55 | 2017-01-16T17:02:37 | Python | UTF-8 | Python | false | false | 823 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import pisitools
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
#WorkDir="Imaging-%s" % get.srcVERSION()
def install():
pisitools.dosed("_imagingft.c", "<freetype/freetype.h>", "<freetype2/freetype.h>")
pisitools.dosed("_imagingft.c", "<freetype/fterrors.h>", "<freetype2/fterrors.h>")
pythonmodules.install()
#shelltools.cd("Sane")
#pythonmodules.install()
#shelltools.cd("..")
for header in ["Imaging.h","ImPlatform.h"]:
pisitools.insinto("/usr/include/%s" % get.curPYTHON(), "libImaging/%s" % header)
pisitools.dodoc("README.rst")
| [
"ergunsalman@hotmail.com"
] | ergunsalman@hotmail.com |
712213e339f5e2cbd3b35fc7dbce59681eaeb484 | 3c968361997cc8555bf69f8dcb2283831ac99832 | /pubmed_prepro/train_test_splits.py | a303ab18a02a896f2d53af5c4ab517f90ff903ed | [] | no_license | yellajaswanth/Pubset | 01014c4b99ae32561741c14c8c638f60daa36a8a | 650bd925c82e11aa44ae7be2dc8c10f4401ec074 | refs/heads/main | 2023-03-21T07:06:32.503514 | 2021-03-05T22:01:49 | 2021-03-05T22:01:49 | 340,588,695 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | #%%
import shelve
from tqdm import tqdm
import json
from sklearn.model_selection import train_test_split
db = shelve.open('/home/aniljegga1/bigdataserver/pubmed_docs/abstracts/pubmed_small/pubmed.db', 'r')
keys = list(db.keys())
all_indices = []
for key in tqdm(keys):
if 'bert' in db[key]:
num_sents = len(db[key]['bert'])
all_indices.extend([(key, i) for i in range(num_sents-6)])
train, test = train_test_split(all_indices, test_size=0.25)
train, val = train_test_split(train, test_size=0.15)
#%%
with open('/home/aniljegga1/bigdataserver/pubmed_docs/abstracts/pubmed_small/all_indices.json', 'w') as fin:
json.dump(all_indices, fin)
with open('/home/aniljegga1/bigdataserver/pubmed_docs/abstracts/pubmed_small/train_indices.json', 'w') as fin:
json.dump(train, fin)
with open('/home/aniljegga1/bigdataserver/pubmed_docs/abstracts/pubmed_small/test_indices.json', 'w') as fin:
json.dump(test, fin)
with open('/home/aniljegga1/bigdataserver/pubmed_docs/abstracts/pubmed_small/val_indices.json', 'w') as fin:
json.dump(val, fin)
| [
"yellajaswanth@gmail.com"
] | yellajaswanth@gmail.com |
aeb4299c83b8a609952126db23c62ac404ef1759 | b422f6330d969887db3e6f7cafa52b681bd181de | /bank_system/decorators/client_decorator.py | ee7bd500164ccc60d932da471a15587876bf2d0e | [] | no_license | weslleymberg/eispatterns-examples | 0346f78cdfcb0729fae72465662d9b797a0aa4b9 | ae1a1759886fb1e05067ea26589e0e229fc646f0 | refs/heads/master | 2021-01-17T16:49:34.464541 | 2011-12-06T17:21:10 | 2011-12-06T17:21:10 | 1,979,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from should_dsl import should
from domain.base.decorator import Decorator
from domain.node.person import Person
from domain.resource.operation import operation
from domain.supportive.rule import rule
from domain.supportive.association_error import AssociationError
class ClientDecorator(Decorator):
'''A general porpuse Client decorator'''
decoration_rules = ['should_be_instance_of_person']
def __init__(self):
Decorator.__init__(self)
self.description = "Supplies the basis for representing clients"
self.accounts = []
def generate_register(self, register):
''' generates the register number for the client'''
self.register = register
| [
"weslleym.lisboa@gmail.com"
] | weslleym.lisboa@gmail.com |
c63f6c71799ea453d1f3eec67be2aff4089d9177 | bc41457e2550489ebb3795f58b243da74a1c27ae | /fabfile.py | 36e45dab0037e8a64b682e70626dadcb3e9d14de | [] | no_license | SEL-Columbia/ss_sql_views | 28a901d95fe779b278d2a51aec84d6bf51245c02 | d146fd96849a4d165f3dc3f197aadda804a2f60a | refs/heads/master | 2021-01-01T19:35:18.999147 | 2012-05-10T18:43:36 | 2012-05-10T18:43:36 | 3,020,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 1,649 | py | '''
fabfile for offline gateway tasks
'''
import datetime as dt
from fabric.api import local, lcd, run, env
env.hosts = ['gateway.sharedsolar.org']
env.user = 'root'
def sync_db():
time = dt.datetime.now().strftime('%y%m%d')
file = 'gateway.' + time + '.sql.zip'
url = 'root@gateway.sharedsolar.org'
path = 'var/lib/postgresql/backups/'
local('mkdir temp')
with lcd('temp'):
download_db(url, path, file)
load_db(path, file)
create_views()
local('rm -rf temp')
show_disk_space()
def download_db(url, path, file):
# create local temp folder
print 'Creating temporary folder ./temp'
# create timestamp
# create string for getting database
# scp database
print 'Downloading database from gateway'
local('scp ' + url + ':/' + path + file + ' .')
# locally unzip database
print 'Expanding database'
local('unzip ' + file)
def load_db(path, file):
# if database exists, dropdb
local('dropdb gateway')
# create db
local('createdb gateway')
# load database
print 'Loading database'
local('psql -d gateway -f ' + path + file[:-4])
def create_views():
print 'Executing create_views'
# execute all sql files
local('psql -d gateway -f views/create_view_primary_log.sql')
local('psql -d gateway -f views/create_view_midnight.sql')
local('psql -d gateway -f views/create_view_meter.sql')
local('psql -d gateway -f views/create_view_alarms.sql')
local('psql -d gateway -f views/create_view_solar.sql')
local('psql -d gateway -f views/create_view_recharge.sql')
def show_disk_space():
run('df -h') | [
"danielrsoto@gmail.com"
] | danielrsoto@gmail.com |
de2749d93e0cc76fcd54f2d8c178db2908200109 | b92db3e3eae34b69dce099f78ebe8c0446439faf | /models.py | 30e97d1a6a4e3ae8d3438702123b4f5d29f413e0 | [] | no_license | gayathri44/Portfolio-project | 06834ab0ca261d96f56e80809c4c138c1332dd69 | 260bf23700888fc18fcd0d808d57dd0d8d6650f9 | refs/heads/main | 2023-03-30T22:24:46.645209 | 2021-03-29T17:39:27 | 2021-03-29T17:39:27 | 352,729,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from django.db import models
class Project(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
technology = models.CharField(max_length=20)
image = models.FilePathField(path="/img")
# Create your models here.
| [
"noreply@github.com"
] | gayathri44.noreply@github.com |
491e15f5ff3feba66ba8f3a0cb4ca00a15def0fe | 58b37bcd7f406034274bfaf64fafe446ca7d07bd | /Asset_Management/Asset_Management/settings.py | 6822e9cc1a8f9552726787cca43a7ce874115c1c | [] | no_license | jmarigondon/django-repo | cfb06c6e18f85334f15150085df6748633590d56 | 48a8271c76fca27a3a095a19586d79d684afe9c2 | refs/heads/master | 2021-01-20T12:44:59.729698 | 2013-10-15T11:04:48 | 2013-10-15T11:04:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,317 | py | # Django settings for Asset_Management project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '/home/jmarigondon/workspace/Asset_Management/sqlite.db', # Or path to database file if using sqlite3.
'USER': 'jmarigondon', # Not used with sqlite3.
'PASSWORD': '12345Ye@h', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@$id$eos_^=iz645bj_gb#)tq)!$-15d&i59io5kcku5#a=%xl'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'Asset_Management.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"jbmarigondon@gmail.com"
] | jbmarigondon@gmail.com |
c67befeec81e23a656e21cce799a95202898726a | 51d602577affebc8d91ffe234f926469d389dc75 | /lis/specimen/lab_result_item/migrations/0001_initial.py | 69d94bf24a380776b43692bb1088907fc36bac4f | [] | no_license | botswana-harvard/lis | 5ac491373f74eaf3855f173580b000539d7f4740 | 48dc601ae05e420e8f3ebb5ea398f44f02b2e5e7 | refs/heads/master | 2020-12-29T01:31:07.821681 | 2018-06-24T06:06:57 | 2018-06-24T06:06:57 | 35,820,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,300 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ResultItemAudit'
db.create_table('bhp_lab_core_resultitem_audit', (
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('user_created', self.gf('django.db.models.fields.CharField')(default='', max_length=250)),
('user_modified', self.gf('django.db.models.fields.CharField')(default='', max_length=250)),
('hostname_created', self.gf('django.db.models.fields.CharField')(default='home', max_length=50, blank=True)),
('hostname_modified', self.gf('django.db.models.fields.CharField')(default='home', max_length=50, blank=True)),
('id', self.gf('django.db.models.fields.CharField')(max_length=36, blank=True)),
('result', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_audit_resultitem', to=orm['lab_result.Result'])),
('test_code', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_audit_resultitem', to=orm['lab_test_code.TestCode'])),
('result_item_value', self.gf('django.db.models.fields.CharField')(max_length=25, db_index=True)),
('result_item_quantifier', self.gf('django.db.models.fields.CharField')(default='=', max_length=25)),
('result_item_datetime', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('result_item_operator', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=50, null=True, blank=True)),
('validation_status', self.gf('django.db.models.fields.CharField')(default='P', max_length=10, db_index=True)),
('validation_datetime', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('validation_username', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=50, null=True, blank=True)),
('validation_reference', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('comment', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('result_item_source', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_audit_resultitem', to=orm['lab_result.ResultSource'])),
('result_item_source_reference', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('error_code', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('_audit_id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('_audit_timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('_audit_change_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
))
db.send_create_signal('lab_result_item', ['ResultItemAudit'])
# Adding model 'ResultItem'
db.create_table('bhp_lab_core_resultitem', (
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('user_created', self.gf('django.db.models.fields.CharField')(default='', max_length=250)),
('user_modified', self.gf('django.db.models.fields.CharField')(default='', max_length=250)),
('hostname_created', self.gf('django.db.models.fields.CharField')(default='home', max_length=50, blank=True)),
('hostname_modified', self.gf('django.db.models.fields.CharField')(default='home', max_length=50, blank=True)),
('id', self.gf('django.db.models.fields.CharField')(max_length=36, primary_key=True)),
('result', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lab_result.Result'])),
('test_code', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lab_test_code.TestCode'])),
('result_item_value', self.gf('django.db.models.fields.CharField')(max_length=25, db_index=True)),
('result_item_quantifier', self.gf('django.db.models.fields.CharField')(default='=', max_length=25)),
('result_item_datetime', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('result_item_operator', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=50, null=True, blank=True)),
('validation_status', self.gf('django.db.models.fields.CharField')(default='P', max_length=10, db_index=True)),
('validation_datetime', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('validation_username', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=50, null=True, blank=True)),
('validation_reference', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('comment', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('result_item_source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lab_result.ResultSource'])),
('result_item_source_reference', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('error_code', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
))
db.send_create_signal('lab_result_item', ['ResultItem'])
def backwards(self, orm):
# Deleting model 'ResultItemAudit'
db.delete_table('bhp_lab_core_resultitem_audit')
# Deleting model 'ResultItem'
db.delete_table('bhp_lab_core_resultitem')
models = {
'bhp_research_protocol.fundingsource': {
'Meta': {'ordering': "['name']", 'object_name': 'FundingSource'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'})
},
'bhp_research_protocol.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'})
},
'bhp_research_protocol.protocol': {
'Meta': {'ordering': "['protocol_identifier']", 'object_name': 'Protocol'},
'date_opened': ('django.db.models.fields.DateField', [], {}),
'date_registered': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
'funding_source': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['bhp_research_protocol.FundingSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_title': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'protocol_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True'}),
'research_title': ('django.db.models.fields.TextField', [], {'max_length': '250'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'site_name_fragment': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'bhp_research_protocol.site': {
'Meta': {'ordering': "['site_identifier']", 'object_name': 'Site'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_research_protocol.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'site_identifier': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'})
},
'lab_account.account': {
'Meta': {'ordering': "['account_name']", 'object_name': 'Account', 'db_table': "'bhp_lab_registration_account'"},
'account_closedate': ('django.db.models.fields.DateField', [], {}),
'account_holder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_account.AccountHolder']"}),
'account_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'account_opendate': ('django.db.models.fields.DateField', [], {}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_account.accountholder': {
'Meta': {'ordering': "['last_name', 'first_name']", 'unique_together': "(['last_name', 'first_name'],)", 'object_name': 'AccountHolder', 'db_table': "'bhp_lab_registration_accountholder'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_aliquot.aliquot': {
'Meta': {'object_name': 'Aliquot', 'db_table': "'bhp_lab_core_aliquot'"},
'aliquot_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 8, 30, 13, 11, 14, 896689)'}),
'aliquot_identifier': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'aliquot_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_aliquot.AliquotType']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'condition': ('django.db.models.fields.related.ForeignKey', [], {'default': '10', 'to': "orm['lab_aliquot.AliquotCondition']"}),
'count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'current_measure': ('django.db.models.fields.DecimalField', [], {'default': "'5.00'", 'max_digits': '10', 'decimal_places': '2'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'measure_units': ('django.db.models.fields.CharField', [], {'default': "'mL'", 'max_length': '25'}),
'medium': ('django.db.models.fields.CharField', [], {'default': "'TUBE'", 'max_length': '25'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'original_measure': ('django.db.models.fields.DecimalField', [], {'default': "'5.00'", 'max_digits': '10', 'decimal_places': '2'}),
'parent_identifier': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_aliquot.Aliquot']", 'to_field': "'aliquot_identifier'", 'null': 'True', 'blank': 'True'}),
'receive': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_receive.Receive']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'available'", 'max_length': '25'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_aliquot.aliquotcondition': {
'Meta': {'ordering': "['short_name']", 'object_name': 'AliquotCondition', 'db_table': "'bhp_lab_core_aliquotcondition'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'display_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'1.0'", 'max_length': '35'})
},
'lab_aliquot.aliquottype': {
'Meta': {'ordering': "['name']", 'object_name': 'AliquotType', 'db_table': "'bhp_lab_core_aliquottype'"},
'alpha_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dmis_reference': ('django.db.models.fields.IntegerField', [], {}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_order.order': {
'Meta': {'object_name': 'Order', 'db_table': "'bhp_lab_core_order'"},
'aliquot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_aliquot.Aliquot']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dmis_reference': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'order_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'panel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_panel.Panel']"}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_panel.panel': {
'Meta': {'object_name': 'Panel', 'db_table': "'bhp_lab_core_panel'"},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lab_account.Account']", 'symmetrical': 'False'}),
'aliquot_type': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lab_aliquot.AliquotType']", 'symmetrical': 'False'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dmis_panel_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'panel_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_panel.PanelGroup']"}),
'test_code': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lab_test_code.TestCode']", 'symmetrical': 'False'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_panel.panelgroup': {
'Meta': {'object_name': 'PanelGroup', 'db_table': "'bhp_lab_core_panelgroup'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_patient.patient': {
'Meta': {'ordering': "['subject_identifier']", 'unique_together': "(['subject_identifier'],)", 'object_name': 'Patient', 'db_table': "'bhp_lab_registration_patient'"},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lab_account.Account']", 'null': 'True', 'blank': 'True'}),
'art_status': ('django.db.models.fields.CharField', [], {'default': "'UNKNOWN'", 'max_length': '10'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'hiv_status': ('django.db.models.fields.CharField', [], {'default': "'UNKNOWN'", 'max_length': '10'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'is_dob_estimated': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'simple_consent': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lab_patient.SimpleConsent']", 'null': 'True', 'blank': 'True'}),
'subject_identifier': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25', 'db_index': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_patient.simpleconsent': {
'Meta': {'ordering': "['consent_startdate']", 'object_name': 'SimpleConsent', 'db_table': "'bhp_lab_registration_simpleconsent'"},
'consent_enddate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'consent_site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_research_protocol.Site']"}),
'consent_startdate': ('django.db.models.fields.DateField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'may_store_samples': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'protocol': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_research_protocol.Protocol']"}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_receive.receive': {
'Meta': {'object_name': 'Receive', 'db_table': "'bhp_lab_core_receive'"},
'clinician_initials': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'datetime_drawn': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'dmis_reference': ('django.db.models.fields.IntegerField', [], {}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_patient.Patient']"}),
'protocol': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_research_protocol.Protocol']"}),
'receive_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 8, 30, 13, 11, 14, 891588)', 'db_index': 'True'}),
'receive_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_research_protocol.Site']"}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'visit': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'lab_result.result': {
'Meta': {'ordering': "['result_identifier', 'order', 'result_datetime']", 'object_name': 'Result', 'db_table': "'bhp_lab_core_result'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dmis_result_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_order.Order']"}),
'release_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'release_status': ('django.db.models.fields.CharField', [], {'default': "'NEW'", 'max_length': '25', 'db_index': 'True'}),
'release_username': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'result_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'result_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_result.resultsource': {
'Meta': {'object_name': 'ResultSource', 'db_table': "'bhp_lab_core_resultsource'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'display_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'1.0'", 'max_length': '35'})
},
'lab_result_item.resultitem': {
'Meta': {'object_name': 'ResultItem', 'db_table': "'bhp_lab_core_resultitem'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_result.Result']"}),
'result_item_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'result_item_operator': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'result_item_quantifier': ('django.db.models.fields.CharField', [], {'default': "'='", 'max_length': '25'}),
'result_item_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_result.ResultSource']"}),
'result_item_source_reference': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'result_item_value': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'test_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_test_code.TestCode']"}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'validation_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'validation_reference': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'validation_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '10', 'db_index': 'True'}),
'validation_username': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'lab_result_item.resultitemaudit': {
'Meta': {'ordering': "['-_audit_timestamp']", 'object_name': 'ResultItemAudit', 'db_table': "'bhp_lab_core_resultitem_audit'"},
'_audit_change_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'_audit_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'_audit_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_audit_resultitem'", 'to': "orm['lab_result.Result']"}),
'result_item_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'result_item_operator': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'result_item_quantifier': ('django.db.models.fields.CharField', [], {'default': "'='", 'max_length': '25'}),
'result_item_source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_audit_resultitem'", 'to': "orm['lab_result.ResultSource']"}),
'result_item_source_reference': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'result_item_value': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'test_code': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_audit_resultitem'", 'to': "orm['lab_test_code.TestCode']"}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'validation_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'validation_reference': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'validation_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '10', 'db_index': 'True'}),
'validation_username': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'lab_test_code.testcode': {
'Meta': {'ordering': "['name']", 'object_name': 'TestCode', 'db_table': "'bhp_lab_test_code_testcode'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'display_decimal_places': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'formula': ('django.db.models.fields.CharField', [], {'max_length': "'50'", 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_absolute': ('django.db.models.fields.CharField', [], {'default': "'absolute'", 'max_length': "'15'"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'test_code_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_test_code.TestCodeGroup']"}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_test_code.testcodegroup': {
'Meta': {'ordering': "['code']", 'object_name': 'TestCodeGroup', 'db_table': "'bhp_lab_test_code_testcodegroup'"},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
}
}
complete_apps = ['lab_result_item']
| [
"ckgathi@gmail.com"
] | ckgathi@gmail.com |
1a7c77ff09ef224b7bba1da2aa9c3122854922d9 | 1c1269eff0161d23845444ce0254d2ed195d6fb4 | /app.py | d01b14ee1672d92ed61693f1e504dc1dfedcda9b | [] | no_license | dich1123/tasks | abf5aa9cdf17be0ac245eb27e99e3cc894238ec1 | 3c9aeccb172c9272f573b5831c8c6b8b0c7f2ba6 | refs/heads/master | 2021-06-27T01:15:48.574125 | 2019-11-20T11:11:09 | 2019-11-20T11:11:09 | 222,911,800 | 0 | 0 | null | 2021-03-20T02:13:02 | 2019-11-20T10:29:34 | Python | UTF-8 | Python | false | false | 1,756 | py | from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(200), nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow())
def __repr__(self):
return f'Task {self.id}'
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
task_content = request.form['content']
new_task = Todo(content=task_content)
try:
db.session.add(new_task)
db.session.commit()
return redirect('/')
except:
return 'There were troubles with adding your task'
else:
tasks = Todo.query.order_by(Todo.date_created).all()
return render_template('index.html', tasks=tasks)
@app.route('/delete/<int:id>')
def delete(id):
task_to_delete = Todo.query.get_or_404(id)
try:
db.session.delete(task_to_delete)
db.session.commit()
return redirect('/')
except:
return 'There were troubles with deleting your task'
@app.route('/update/<int:id>', methods=['GET', 'POST'])
def update(id):
task = Todo.query.get_or_404(id)
if request.method == 'POST':
task.content = request.form['content']
try:
db.session.commit()
return redirect('/')
except:
return 'There were troubles with updating your task'
else:
return render_template('update.html', task=task)
if __name__ == '__main__':
app.run(debug=True) | [
"cherenkov1123@gmail.com"
] | cherenkov1123@gmail.com |
6132ba796a9e7e8e7a7f159210569d88c3858189 | eda48a0bb8322bb5bb107cea8324aafaf105bf46 | /SVNDev/comments/setting.py | 57e88967a9aeb8bb9c2c471c717f9eda2ae6cfe3 | [] | no_license | sougannkyou/pyWorks | c171dfc2c213bd7d75dc168b987eb96cbcbfd7e1 | 2402b3da4521a17d7f828504c0745ff55cb031be | refs/heads/master | 2020-04-04T06:23:28.678959 | 2017-05-02T09:06:04 | 2017-05-02T09:06:19 | 48,786,947 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # coding=utf-8
# MONGODB_SERVER = '127.0.0.1'
# MONGODB_PORT = 27017
# REDIS_SERVER = 'redis://127.0.0.1/15'
MONGODB_SERVER = '192.168.16.223'
MONGODB_PORT = 37017
MONGODB_SERVER_DIRECT = '192.168.149.39'
MONGODB_PORT_DIRECT = 37017
REDIS_SERVER = 'redis://192.168.187.55/15'
CONFIG_ID = '37556' # 全站爬虫配置ID
OK_PERCENT = 0.8
UNKOWN = '未知'
PAGE_SIZE = 20 | [
"syq314159"
] | syq314159 |
869e9c59cdc059f3f26e0773ffa1001df5e61962 | ad7e79fd9538dede52604603dde6a014a95ce425 | /urls.py | 805049b0f48b475a5e5771c4cffbfcfdad869490 | [] | no_license | coco-ty/Course_Registration_Automation | b66ebf3ac19a0a7e6d1d1aafeb2afa3350b936ab | 80fab09af70d67e74896f76f233601bd7b2077e2 | refs/heads/master | 2021-01-10T13:15:01.923534 | 2015-10-10T06:30:57 | 2015-10-10T06:30:57 | 43,997,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | from django.conf.urls.defaults import *
urlpatterns = patterns('main.views',
(r'^$', 'home'),
(r'^login/$', 'login'),
(r'^logout/$', 'logout'),
(r'^process/?$', 'process'),
(r'^authenticate/?$', 'authenticate'),
(r'^rate/?$', 'rate'),
(r'^sites/$','sites'),
(r'^sites/(?P<url>.+)/$','sites'),
(r'^courses/$','courses'),
(r'^courses/(?P<code>.+)/$','courses'),
(r'^sections/(?P<id>.+)/$','sections'),
(r'^classrooms/(?P<id>.+)/$','classrooms'),
(r'^depts/$','department'),
(r'^depts/(?P<deptcode>.+)/$','department'),
(r'^books/$','textbook'),
(r'^books/(?P<isbn>.+)/$','textbook'),
)
| [
"eliz.clair914@gmail.com"
] | eliz.clair914@gmail.com |
5c33f8c1cfcbce6f594edaa5d44c32aebcdf7169 | 0fe2847bf222a3df0847a08de244000207514d05 | /src/seabreeze/pyseabreeze/features/introspection.py | 3422b402411c9583f102c62d5d08ac6c55d658e4 | [
"MIT"
] | permissive | asenchristov/python-seabreeze | 3656161eb2bf2be082839700f021a5957b81f00b | 573bae1d9de4e819611b2f5b9c66f98d7d0fe066 | refs/heads/master | 2022-12-01T09:39:46.079901 | 2020-08-18T09:07:30 | 2020-08-18T09:07:30 | 288,403,712 | 0 | 0 | MIT | 2020-08-18T08:49:58 | 2020-08-18T08:49:57 | null | UTF-8 | Python | false | false | 669 | py | from seabreeze.pyseabreeze.features._base import SeaBreezeFeature
# Definition
# ==========
#
# TODO: This feature needs to be implemented for pyseabreeze
#
class SeaBreezeIntrospectionFeature(SeaBreezeFeature):
identifier = "introspection"
def number_of_pixels(self):
raise NotImplementedError("implement in derived class")
def get_active_pixel_ranges(self):
raise NotImplementedError("implement in derived class")
def get_optical_dark_pixel_ranges(self):
raise NotImplementedError("implement in derived class")
def get_electric_dark_pixel_ranges(self):
raise NotImplementedError("implement in derived class")
| [
"andreas@poehlmann.io"
] | andreas@poehlmann.io |
64e6333fe55a93a5646cccf205ff50466adc18b0 | 8ed27d2b651e07d7d9cb489abe9686b7d158741e | /tasks.py | ceffdc72dbddf8e1f82cfa923555c1c7ddb68a3c | [] | no_license | Seven4ME/tgBot | 6874b887849b0ae6ecceb6fae62b561d199dd1a3 | 1f51e41fd0c5c826e315c4f1f5e01b428b1cd528 | refs/heads/master | 2022-12-16T09:03:35.252717 | 2020-08-26T21:11:36 | 2020-08-26T21:11:36 | 163,940,738 | 0 | 0 | null | 2022-12-08T01:53:49 | 2019-01-03T07:53:21 | Python | UTF-8 | Python | false | false | 155 | py |
from celery.schedules import crontab
# https://api.telegram.org/bot1215880984:AAHuLxPx8vEuOVPIznPhOWBCKkBFUlZbKgs/sendMessage?chat_id=194650240&text=Hi | [
"kobun2341@gmail.com"
] | kobun2341@gmail.com |
48a939c169881e193fbae7563fa3afb68881c6e0 | f4c49fb03e1358c675f6d29ad265ac34fe8edee4 | /LeducPoker/LeducPokerGame.py | d0b91936149476f7e540124f8e3e6ad39683a9af | [] | no_license | mzktbyjc2016/nfsp-pytorch | 4b36afd0db8c8634ef71be5cf4b6b97fe814da3f | 125268908919661a508abc7bddc1015a92116f96 | refs/heads/master | 2020-06-25T09:25:20.115407 | 2019-03-03T02:46:49 | 2019-03-03T02:46:49 | 199,271,302 | 2 | 0 | null | 2019-07-28T10:09:19 | 2019-07-28T10:09:19 | null | UTF-8 | Python | false | false | 9,630 | py | from typing import Tuple, Optional, List
import random
import numpy as np
import copy
class PlayerActions:
BET_RAISE = 2
CHECK_CALL = 1
FOLD = 0
ALL_ACTIONS = [FOLD, CHECK_CALL, BET_RAISE]
ACTION_TO_CHAR = {
FOLD: "f",
CHECK_CALL: "c",
BET_RAISE: "r"
}
class LeducNode(object):
def __init__(
self,
bet_sequences: List[Tuple[PlayerActions]],
board_card: Optional[int]):
assert len(bet_sequences) == 2
self._bet_sequences = bet_sequences
self.board_card = board_card
if self.game_round == 1:
assert self.board_card is not None or len(self.bet_sequences[1]) == 0
@property
def game_round(self) -> int:
return 1 if len(self.bet_sequences[0]) >= 2 and self.bet_sequences[0][-1] == PlayerActions.CHECK_CALL else 0
def can_take_action(self, action) -> bool:
if action == PlayerActions.CHECK_CALL:
return True
elif action == PlayerActions.FOLD:
return self.can_fold
elif action == PlayerActions.BET_RAISE:
return self.can_raise
raise RuntimeError("Bad action")
@property
def can_raise(self) -> bool:
relevant_bet_sequences = self._relevant_bet_sequence()
if len(relevant_bet_sequences) <= 1:
return True
else:
return relevant_bet_sequences.count(PlayerActions.BET_RAISE) < 2
def fixup_action(self, action: PlayerActions):
if action == PlayerActions.FOLD and not self.can_fold:
return PlayerActions.CHECK_CALL
elif action == PlayerActions.BET_RAISE and not self.can_raise:
return PlayerActions.CHECK_CALL
else:
return action
@property
def can_fold(self) -> bool:
relevant_bet_sequence = self._relevant_bet_sequence()
if len(relevant_bet_sequence) == 0:
return False
else:
return relevant_bet_sequence[-1] == PlayerActions.BET_RAISE
def _relevant_bet_sequence(self) -> Tuple[PlayerActions]:
if self.game_round == 0:
relevant_bet_sequence = self.bet_sequences[0]
else:
relevant_bet_sequence = self.bet_sequences[1]
return relevant_bet_sequence
@property
def bet_sequences(self) -> List[Tuple[PlayerActions]]:
return self._bet_sequences
@property
def is_terminal(self) -> bool:
if len(self._bet_sequences[0]) > 0 and self._bet_sequences[0][-1] == PlayerActions.FOLD:
return True
if len(self._bet_sequences[1]) <= 1:
return False
if self._bet_sequences[1][-1] != PlayerActions.BET_RAISE:
return True
return False
@property
def player_to_act(self) -> int:
if self.game_round == 1 and self.board_card is None:
return -1 # Chance
relevant_bet_sequence = self._relevant_bet_sequence()
return len(relevant_bet_sequence) % 2
# Returns cost of taking action
def add_action(self, action: PlayerActions) -> (int, PlayerActions):
action = self.fixup_action(action)
game_round = self.game_round
retval = 0
if game_round == 0:
# Lua code doesn't charge for antes
# if len(self.bet_sequences[0]) < 2:
# retval = 1 # Antes
if len(self.bet_sequences[0]) > 0 and self.bet_sequences[0][-1] == PlayerActions.BET_RAISE:
retval += 2 # 2 to call
if action == PlayerActions.BET_RAISE:
retval += 2
self.bet_sequences[0] = self.bet_sequences[0] + (action,)
else:
if len(self.bet_sequences[1]) > 0 and self.bet_sequences[1][-1] == PlayerActions.BET_RAISE:
retval = 4 # 4 to call
if action == PlayerActions.BET_RAISE:
retval += 4
self.bet_sequences[1] = self.bet_sequences[1] + (action,)
if self.game_round == 1 and self.player_to_act != -1:
assert self.board_card is not None
else:
assert self.board_card is None
# one fixup: if they folded
# if action == PlayerActions.FOLD:
# if game_round == 0 and len(self.bet_sequences[0]) <= 2:
# retval = 1 # Ante
# else:
# retval = 0
# Lua code doesn't charge for antes
if action == PlayerActions.FOLD:
retval = 0
# return the action cost and the fixed-up action
return retval, action
def _get_half_pot(self) -> float:
half_pot = 1 # Antes
to_call = 0
for action in self._bet_sequences[0]:
if action == PlayerActions.FOLD:
return half_pot
elif action == PlayerActions.CHECK_CALL:
half_pot += to_call
to_call = 0
elif action == PlayerActions.BET_RAISE:
half_pot += to_call
to_call = 2
to_call = 0
for action in self._bet_sequences[1]:
if action == PlayerActions.FOLD:
return half_pot
elif action == PlayerActions.CHECK_CALL:
half_pot += to_call
to_call = 0
elif action == PlayerActions.BET_RAISE:
half_pot += to_call
to_call = 4
return float(half_pot)
def _get_winner(self, player_cards: List[int]) -> Optional[int]:
try:
fold_idx = self._bet_sequences[0].index(PlayerActions.FOLD)
unfolded_player = (fold_idx + 1) % 2
return unfolded_player
except ValueError:
pass
try:
fold_idx = self._bet_sequences[1].index(PlayerActions.FOLD)
unfolded_player = (fold_idx + 1) % 2
return unfolded_player
except ValueError:
pass
# Showdown
assert self.board_card is not None
player_normalized_cards = [player_cards[0] % 3, player_cards[1] % 3]
board_normalized_card = self.board_card % 3
if player_normalized_cards[0] == player_normalized_cards[1]:
return None
elif player_normalized_cards[0] == board_normalized_card:
return 0
elif player_normalized_cards[1] == board_normalized_card:
return 1
else:
return 0 if player_normalized_cards[0] > player_normalized_cards[1] else 1
def get_payoffs(self, player_cards: List[int]) -> np.ndarray:
if not self.is_terminal:
raise RuntimeError("Can't get payoffs for non-terminal")
half_pot = self._get_half_pot()
winner = self._get_winner(player_cards)
if winner is None:
return np.array([half_pot, half_pot])
if winner == 0:
return np.array([half_pot * 2.0, 0.0])
elif winner == 1:
return np.array([0.0, half_pot * 2.0])
class LeducInfoset(LeducNode):
def __init__(
self,
card: int,
bet_sequences: List[Tuple],
board_card: Optional[int]):
super().__init__(bet_sequences=bet_sequences, board_card=board_card)
self.card = card
def __str__(self):
card_to_char = {
0: "J",
1: "Q",
2: "K"
}
retval = card_to_char[self.card % 3]
if self.board_card is not None:
retval += card_to_char[self.board_card % 3]
retval += ":/"
retval += "".join(PlayerActions.ACTION_TO_CHAR[a] for a in self.bet_sequences[0])
if self.game_round == 1:
retval += "/"
retval += "".join(PlayerActions.ACTION_TO_CHAR[a] for a in self.bet_sequences[1])
retval += ":"
return retval
def __eq__(self, other):
if other is None:
return False
return (self.card == other.card and self._bet_sequences == other.bet_sequences
and self.board_card == other.board_card)
class LeducGameState(LeducNode):
def __init__(
self,
player_cards: List[int],
bet_sequences: List[Tuple],
board_card: Optional[int]):
self.player_cards = player_cards
super().__init__(bet_sequences=bet_sequences, board_card=board_card)
self.infosets = None
self._update_infosets()
def _update_infosets(self):
self.infosets = tuple(
LeducInfoset(card=card, bet_sequences=copy.deepcopy(self._bet_sequences), board_card=self.board_card) for card in
self.player_cards)
def deal_board_card(self):
assert self.board_card is None and self.player_to_act == -1
deck = list(LeducPokerGame.DECK)
deck.remove(self.player_cards[0])
deck.remove(self.player_cards[1])
self.board_card = random.choice(deck)
self._update_infosets()
def get_payoffs(self):
return LeducNode.get_payoffs(self, self.player_cards)
def add_action(self, action: PlayerActions):
retval = super().add_action(action)
if self.player_to_act == -1:
self.deal_board_card()
else:
self._update_infosets()
return retval
class LeducPokerGame(object):
NUM_CARDS = 6
DECK = tuple(range(6))
def __init__(self, player_cards: Optional[List[int]] = None):
if player_cards is None:
cards = random.sample(self.DECK, 2)
self.player_cards = cards
self.game_state = LeducGameState(self.player_cards, [(), ()], board_card=None)
| [
"thomas.j.johnson@gmail.com"
] | thomas.j.johnson@gmail.com |
ca725619d876cdd4db8f6f079b26a3b27c7f51e9 | e16a00326983750985f0d308b2b8cb156d2b7048 | /DesignPatterns/Strategy.py | 241764acd6a53fea462f8e5394fa31ed73a4ded5 | [] | no_license | farabbit/learning_area | ff6b6cee6de5551a58068dd73b34f427903d3973 | e553186ececb7ef2149356465044138cd84588c5 | refs/heads/master | 2020-05-22T22:45:58.115383 | 2019-09-27T08:23:13 | 2019-09-27T08:23:13 | 186,551,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | import abc
""" Traditional way """
# every strategy is as class that inherits base stategy
print("Traditional: ")
class Strategy:
""" Base Strategy """
@abc.abstractmethod
def operation(self, context): pass
# concrate stategies
class StrategyAdd(Strategy):
def operation(self, context): return context.intA + context.intB
class StrategyMultiple(Strategy):
def operation(self, context): return context.intA * context.intB
class Context:
""" class that using Strategy """
def __init__(self, intA, intB):
self.intA, self.intB = intA, intB
def setStrategy(self, strategy):
self.strategy = strategy
def executeStrategy(self):
return self.strategy.operation(self)
context = Context(2, 3)
context.setStrategy(StrategyAdd()) # using add
print("ADD:", context.executeStrategy()) # ADD: 5
context.setStrategy(StrategyMultiple()) # using multiple
print("MUL:", context.executeStrategy()) # MUL: 6
""" Restruct using functional programming """
# concrete stragegies had no internal status -> performanced like functions
print("Restructed: ")
def StrategyAdd_res(context): return context.intA + context.intB
def StrategyMultiple_res(context): return context.intA * context.intB
class Context_res:
""" class that using Strategy """
def __init__(self, intA, intB):
self.intA, self.intB = intA, intB
def setStrategy(self, strategy): # syntax keeps the same but strategy here is function now, not a class
self.strategy = strategy
def executeStrategy(self):
return self.strategy(self)
context_res = Context_res(2, 3)
context_res.setStrategy(StrategyAdd_res) # using add
print("ADD:", context_res.executeStrategy()) # ADD: 5
context_res.setStrategy(StrategyMultiple_res) # using multiple
print("MUL:", context_res.executeStrategy()) # MUL: 6
""" best strategy """
# additional functionalities
print("Choose best strategy among all: ")
strategies = (StrategyAdd_res,StrategyMultiple_res)
def bestStrategy(context):
""" choose a best strategy from strategy list """
return max(strategy(context) for strategy in strategies)
print("MAX(ADD(2,3), MUL(2,3)):", bestStrategy(Context_res(2,3))) # 6
""" Find all strategies """
# using globals(): returns every global variable that in current module (module that defines that function/method)
strategies = (globals()[name] for name in globals() if name.endswith('_res') and name.startswith('Strategy'))
print(list(strategies)) # [StrategyAdd_res,StrategyMultiple_res]
"""out:
Traditional
ADD: 5
MUL: 6
Restructed:
ADD: 5
MUL: 6
Choose strategy among all
MAX(ADD(2,3), MUL(2,3)): 6
[<function StrategyAdd_res at 0x00000299A4A33E18>, <function StrategyMultiple_res at 0x00000299A4D5B378>]
"""
| [
"noreply@github.com"
] | farabbit.noreply@github.com |
a3166ac3d5064df4705a84f1e7ae3c94963ca5bd | b55801df5a6f4fe8abfbd0bb61b92906cbf2a510 | /pyLTM/pyltm/learner/mixed_clique_sufficient_statistics.py | 8a8e2f6c68b7fd2a1e8bb6f0072d30275d2902ec | [] | no_license | rezaarmand/ltvae-release | 51735fb12a33c685cb0198909355025cc84736f0 | 3a26e276e2a57363c0fb84cc1f8e492ff53bb5db | refs/heads/master | 2022-02-18T10:16:04.736113 | 2019-09-11T02:23:26 | 2019-09-11T02:23:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,416 | py | '''
Created on 12 Sep 2018
@author: Bryan
'''
from .sufficient_statistics import SufficientStatistics
from pyltm.model.potential.cgpotential import CGPotential
from pyltm.model import JointContinuousVariable, CGParameter
import collections
import numpy as np
from pyltm.model.parameter import cgparameter
from pyltm.model.variable.discrete_variable import DiscreteVariable
from pyltm.model.parameter.cptparameter import CPTParameter
from pyltm.util.utils import logsumexp
class MixedCliqueSufficientStatistics(SufficientStatistics):
'''
classdocs
'''
def __init__(self, node, batch_size):
'''
node: Clique
'''
jointVariables = node.jointVariable
discreteVariable = node.discreteVariable
if isinstance(jointVariables, JointContinuousVariable):
jointVariables = list(jointVariables.variables)
elif isinstance(jointVariables, collections.Iterable):
jointVariables = list(jointVariables)
assert isinstance(jointVariables, list)
self._continuousVariables = jointVariables
self._discreteVariable = discreteVariable
self.resetParameters(node.potential, batch_size)
def resetParameters(self, cliquepotential, batch_size):
cardinality = 1 if self._discreteVariable is None else self._discreteVariable.getCardinality()
self.size = cardinality
logp = cliquepotential.logp.copy() # (K, )
logconstant = logsumexp(logp)
self.p = np.exp(logp - logconstant) # normalize
self.mu = cliquepotential.mu.copy() # (K, D)
self.covar = cliquepotential.covar.copy() # (K, D, D)
# self.normalize()
self.p = self.p * batch_size # sufficient counts
for i in range(cardinality):
# sufficient sum_square
self.covar[i] = (self.covar[i] + np.outer(self.mu[i], self.mu[i])) * self.p[i]
# sufficient sum
self.mu[i] = self.mu[i] * self.p[i]
def normalize(self, constant=None):
if constant is None:
constant = np.sum(self.p)
self.p /= constant
return constant
def reset(self):
self.p[:] = 0
self.mu[:] = 0
self.covar[:] = 0
def add(self, potential):
'''potential: batched cliquepotential'''
batch_size = potential.logp.shape[0]
# maybe normalize it in case hasn't been normalized
logp = potential.logp - logsumexp(potential.logp, axis=1, keepdims=True)
for i in range(potential.size):
weight = np.expand_dims(np.exp(logp[:, i]), axis=1) # (N, 1)
self.p[i] += np.sum(weight)
self.mu[i] += np.sum(potential.mu[:, i, :] * weight, axis=0) # (N, D) x (N, 1)
self.covar[i] += np.sum(np.concatenate([np.expand_dims(np.outer(potential.mu[j, i, :], potential.mu[j, i, :]) * weight[j], axis=0)
for j in range(batch_size)], axis=0), axis=0)
def update(self, batchStatistics, learning_rate):
assert(self.size==batchStatistics.size)
self.p[:] = self.p + learning_rate * (batchStatistics.p - self.p)
self.mu[:] = self.mu + learning_rate * (batchStatistics.mu - self.mu)
self.covar[:] = self.covar + learning_rate * (batchStatistics.covar - self.covar)
def computePotential(self, variable, parent):
if isinstance(variable, JointContinuousVariable):
parameters = [None]*self.size
for i in range(self.size):
parameters[i] = CGParameter(1, len(self.mu[i]), self.computeMean(self.p[i], self.mu[i]),
self.computeCovariance(self.p[i], self.mu[i], self.covar[i]))
return parameters
elif isinstance(variable, DiscreteVariable):
# only possibility is that variable is root
parameter = CPTParameter(self.size)
parameter.prob[:] = self.p
parameter.normalize()
return parameter
def computeMean(self, p, mu):
if p == 0:
return np.zeros_like(mu)
return mu / p
def computeCovariance(self, p, mu, covar):
if p==0:
return np.ones_like(covar)
mu = self.computeMean(p, mu)
return covar / p - np.outer(mu, mu)
| [
"eelxpeng@gmail.com"
] | eelxpeng@gmail.com |
4979767ebde362092d71c72285624162e5330f9b | ba269272596a2427d46c21d3cebe0270775557c7 | /setup.py | 0b9e961c8d7687192ef0bc78f6f9eccb8dd148e7 | [
"MIT"
] | permissive | renatoliveira/fixer-cli | e5e65177e7af38c5f478c181f2f151292ca716ed | d1a7a9d96fbc28392f600342e201f3098a6aa94a | refs/heads/master | 2020-12-02T07:40:33.844985 | 2017-07-22T18:31:28 | 2017-07-22T18:31:28 | 96,712,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | """Setup module"""
from setuptools import setup
def readfile(filename):
"""Get readfile"""
with open(filename, 'r+') as readmefile:
return readmefile.read()
setup(
name="fixer",
version="3.1.0",
description="",
long_description=readfile('README.md'),
author="Renato O.",
url="",
py_modules=['fixer', 'history'],
license=readfile('LICENSE'),
entry_points={
'console_scripts': [
'fixer = fixer:handle_options'
]
},
)
| [
"ren811@gmail.com"
] | ren811@gmail.com |
cf72bc593892466402f683cff40798c3ae48bb49 | 5c13b223102d2f7559f2855eb6b8716de0708638 | /pythonCollections/tuple/tuple.py | 3d197cc7541880a3cd8938b71f2598bf122184ab | [] | no_license | Akhilvijayanponmudy/pythondjangoluminar | c0ab8e9ea1f2ef1ba034732de15b479d9f7a24da | 1fc73790c530518f4a747ed6a4fea3bfbe27687e | refs/heads/master | 2023-04-01T18:37:35.112270 | 2021-04-01T03:24:08 | 2021-04-01T03:24:08 | 328,891,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | #define()
#store different type of data
#insertion order preserved
#duplicate allowed
#tuple objects are immutable(not support update) | [
"akhilvijayanponmudy@gmail.com"
] | akhilvijayanponmudy@gmail.com |
80cff87d5c4547d5aa8707328225342560d7d397 | 7620eb38a3744d33842ef867a4dda40748f370ba | /src/apiwrappers/exceptions.py | 0537acd446dca141a49012ba99dcb5bb4827d2f4 | [
"MIT"
] | permissive | unmade/apiwrappers | f0f57d2f2d8a02ee9de2d52575b3e9b48809abfa | eb004fd86cbbc1d468030544472851a262b196dc | refs/heads/master | 2022-09-10T10:27:23.233400 | 2022-01-15T15:27:39 | 2022-01-15T15:27:39 | 231,653,904 | 18 | 1 | MIT | 2022-08-29T07:24:51 | 2020-01-03T19:43:47 | Python | UTF-8 | Python | false | false | 220 | py | class DriverError(Exception):
"""Base class for driver-specific errors."""
class ConnectionFailed(DriverError):
"""A Connection error occurred."""
class Timeout(DriverError):
"""The request timed out."""
| [
"lesha.maslakov@gmail.com"
] | lesha.maslakov@gmail.com |
b620e42042438f0ddf82969a5e2f05dcf02a8e23 | 3922c05b9434bb5a96f7833a987c50c8e3e29107 | /news/admin.py | 6881fe98a61e98e3099d1a8b53bfb646d84da9fa | [
"MIT"
] | permissive | jasonmuchiri/moringa-tribune | e7769dca9aa2e7a9cdc62be56c3071104ba30f33 | ceabe0cf9cc136b6eb5072253aef09f43bea7040 | refs/heads/master | 2020-05-23T23:19:55.111831 | 2019-05-18T21:32:44 | 2019-05-18T21:32:44 | 186,990,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.contrib import admin
from .models import Article,tags
# Register your models here.
class ArticleAdmin(admin.ModelAdmin):
filter_horizontal = ('tags',)
admin.site.register(Article,ArticleAdmin)
admin.site.register(tags)
| [
"jasonmkinyua@gmail.com"
] | jasonmkinyua@gmail.com |
1daeac4ecb17e7a2f26115ace37be515b4198865 | 897cb969990a5ae319547fd572a262d58a1e33a8 | /scripts/get_lumi.py | cf7754d90251baeaf51cecd347b356800a3c9aac | [] | no_license | KIT-CMS/Excalibur | cc5a028bf6ad29a636536c3dfc0ebdc0eacfbbb7 | 8c27e2fdd7b7d5a0439f6e63be2299b16f5291c0 | refs/heads/master | 2023-07-24T05:28:08.156998 | 2023-07-17T15:29:15 | 2023-07-17T15:29:15 | 29,307,758 | 1 | 5 | null | 2023-05-24T11:41:22 | 2015-01-15T16:59:28 | Python | UTF-8 | Python | false | false | 4,646 | py | #!/usr/bin/python
# standard library imports
import os
import sys
import json
import subprocess
import argparse
import errno
# third party imports
# application/library imports
CLI = argparse.ArgumentParser(
description="Get/Calculate integrated luminosity for given runs",
epilog="This tool uses the brilcalc suite to extract luminosity information,"
"\nautomating the queries and environment setup."
"\n"
"\nThe brilcalc documentation can be found at"
"\nhttp://cms-service-lumi.web.cern.ch/cms-service-lumi/brilwsdoc.html",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
CLI_runs = CLI.add_argument_group("run definition")
CLI_runs.add_argument(
"runs",
help="runs in CMS JSON file format; either a file path (quoted runs) or raw string (unquoted runs)"
)
CLI_bril = CLI.add_argument_group("brilsw/brilcalc settings")
CLI_bril.add_argument(
"--brilconda-path",
default="/afs/cern.ch/cms/lumi/brilconda-1.0.3",
help="path to the brilconda suite (contains bin and lib directories)"
)
CLI_bril.add_argument(
"--brilws-path",
default="~/.local",
help="pip virtual env of brilws"
)
CLI_bril.add_argument(
"--lumi-unit",
default="/pb",
help="unit of lumi output, e.g. /fb, /pb or 1e39/cm2"
)
CLI_bril.add_argument(
"--normtag",
default=None,
help="lumi calibration/correction function or json"
)
def get_bril_env(brilconda_path, brilws_path):
"""
Create the env for running bril commands
:param brilconda_path: path of the brilconda suite (contains bin and lib directories)
:type brilconda_path: str
:param brilws_path: pip virtual env of brilws
:type brilws_path: str
:returns: env for processes using brilws to run in
:rtype: dict
"""
print >> sys.stderr, "Preparing bril environment"
# construct dedicated env for bril commands
bril_env = os.environ.copy()
bril_env["PATH"] = ":".join((
os.path.join(os.path.expanduser(brilws_path), "bin"),
os.path.join(os.path.expanduser(brilconda_path), "bin"),
bril_env["PATH"],
))
# make sure brilws is available
get_proc_output(
['pip', 'install', '--install-option=--prefix=$HOME/.local', 'brilws'],
env=bril_env,
)
return bril_env
def get_lumi(run_str, bril_env, unit="/pb", normtag=None):
"""
Get the lumi for a specific run string from brilcalc
"""
print >> sys.stderr, "Querying brilcalc"
# use CSV output for easier parsing
bril_out, bril_err = get_proc_output(
[
"brilcalc",
"lumi", "-i", run_str,
"--output-style", "csv",
"-u", unit,
] + [
"--normtag", normtag
] if normtag is not None else [],
env=bril_env,
)
bril_iter, header, values = iter(bril_out.splitlines()), None, None
while True:
line = bril_iter.next()
# we only care about the summary for the runs
if not line.startswith('#Summary:'):
continue
header = bril_iter.next()
values = bril_iter.next()
break
header = header.replace("(%s)" % unit, "")
header = header[1:].split(",")
values = [
float(value) if "." in value else int(value)
for value in values[1:].split(",")
]
return dict(zip(header, values))
def main():
opts = CLI.parse_args()
# all bril commands execute with brilws suite
bril_env = get_bril_env(
brilconda_path=opts.brilconda_path,
brilws_path=opts.brilws_path
)
lumi_dict = get_lumi(
run_str=opts.runs,
bril_env=bril_env,
unit=opts.lumi_unit,
normtag=opts.normtag,
)
print json.dumps(lumi_dict)
# -- Helpers -------------------------------------------------------------------
class CalledProcessError(Exception):
def __init__(self, returncode, cmd="<unknown>", output=None):
self.returncode, self.cmd, self.output = returncode, cmd, output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
def get_proc_output(*popenargs, **kwargs):
"""
Tweaked version of subprocess.check_output (which is not in py2.6 anyways)
:param popenargs: arguments to Popen
:param kwargs: keyword arguments to Popen
:returns: stdout and stderr of the process
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
try:
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE, *popenargs, **kwargs)
except OSError as oserr:
if oserr.errno == errno.ENOENT:
raise EnvironmentError(
"Executable for '%s' not found" % kwargs.get("args", popenargs[0])
)
else:
raise
stdout, stderr = process.communicate()
if process.poll(): # check retcode != 0
print stdout, stderr
raise CalledProcessError(
returncode=process.poll(),
cmd=kwargs.get("args", popenargs[0]),
output=stdout,
)
return stdout, stderr
if __name__ == "__main__":
main()
| [
"max.fischer@kit.edu"
] | max.fischer@kit.edu |
6bd5fb8e2cc28159a3d0726aa5efc0e21295b713 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/contour/_textsrc.py | 43bd0d62ed17e92c16a553b953658aaf6d67f0be | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 393 | py | import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="contour", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
6ad6148e7378b35ec5dbeeb2a493dbad852d7119 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/61f29ff81b664630acba807a6f4f14e9.py | b34cc9defb4248705933ec5d41f5347dc783be44 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 286 | py | #
# Skeleton file for the Python "Bob" exercise.
#
def hey(what):
if what.upper() == what and any(c.isalpha() for c in what):
return "Whoa, chill out!"
if what != '' and what[-1] == '?':
return "Sure."
if len(what) < 7:
return "Fine. Be that way!"
else:
return "Whatever."
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
338c46e909035b0796f32985fc0a5b1bd0f7175c | 2438cb198fc03a1de169e87a711692045df03ccd | /coffee.py | 223762c681b226d47103c27f06908dabfaa7fa38 | [] | no_license | Om1627/corrcoef | 7138db6aa6f425af21f53ad4b01b8b353a33f9d9 | ab2d088344ec078be851fe51df259174d64876ca | refs/heads/main | 2023-02-21T22:18:55.323855 | 2021-01-28T09:05:09 | 2021-01-28T09:05:09 | 333,700,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | import pandas as pd
import plotly.express as px
import numpy as np
import csv
def getDataSource(data_path):
Sleep=[]
Coffee=[]
with open(data_path) as csv_file:
csv_reader=csv.DictReader(csv_file)
for row in csv_reader:
Sleep.append(float(row["Coffee in ml"]))
Coffee.append(float(row["sleep in hours"]))
return{"x":Coffee,"y":Sleep}
def findCorrelation(datasource):
correlation=np.corrcoef(datasource["x"],datasource["y"])
print("correlation=",correlation[0,1])
def plot():
df= pd.read_csv("coffee.csv")
fig=px.scatter(df,x="Coffee in ml",y="sleep in hours")
fig.show()
def setup():
data_path="./coffee.csv"
datasource=getDataSource(data_path)
findCorrelation(datasource)
setup()
plot()
| [
"noreply@github.com"
] | Om1627.noreply@github.com |
557c96972141d1a75b7f45e4289a642a6390440e | 08dfaf714830a6310742dcd50848790d595e838e | /位运算/code_01_EvenTimesOddTimes.py | c16881e90ab21aa241caa096e317d2dd06fa949c | [] | no_license | Tokyo113/leetcode_python | d9e0fb96a76efaadcec7aad08f5ef542d898d434 | e86b3fb26aef1cf63727e3e5c9fd4ddc9bedb7f1 | refs/heads/master | 2020-08-10T15:36:10.364714 | 2020-04-13T08:28:53 | 2020-04-13T08:28:53 | 214,369,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | #coding:utf-8
'''
@Time: 2019/12/4 21:54
@author: Tokyo
@file: code_01_EvenTimesOddTimes.py
@desc:
1.一个数组中有一种数出现了奇数次,其他数都出现了偶数次,怎么找到这一个数
2.一个数组中有两种数出现了奇数次,其他数都出现了偶数次,怎么找到这两个数
'''
def findOddTimes1(arr):
eor = 0
for i in arr:
eor = eor ^ i
return eor
def findOddTimes2(arr):
eor = 0
for i in arr:
eor = eor ^ i
# eor = a ^ b
# 取得eor最右侧的1,eor肯定不为0,存在一位为1
# 这两个数肯定在这一位不一样,一个为1,一个为0
rightone = eor & (~eor+1)
eor1 = 0
for i in arr:
if (i&rightone) == 0:
eor1 = eor1 ^ i
return eor1, eor1^eor
if __name__ == '__main__':
a = [1,2,3,2,1,2,4,4,3,2,5]
print(findOddTimes1(a))
b = [4, 3, 4, 2, 2, 1, 4, 1, 1, 1, 3, 3, 1, 1, 1, 4, 2, 2]
print(findOddTimes2(b))
print(find2(b)) | [
"21810179@zju.edu.cn"
] | 21810179@zju.edu.cn |
604d6590d6ef3fce8b32b65c1c5a36acd2fa899e | 4bbc78dfcb36bad90f4b5a4d1a1567bc820f22df | /Flaskweb1/venv/bin/pip3.6 | 6947d567b45a8cb1b6a17c1892873c702ec5d1ec | [] | no_license | Ernestbengula/python | 38d09dad271cbc966eca65804ea7bcee2c248915 | 168d385e3258cc16db811cf34af1b269d7f055c7 | refs/heads/master | 2020-07-24T03:34:18.863179 | 2019-10-30T12:34:21 | 2019-10-30T12:34:21 | 207,789,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | 6 | #!/root/PycharmProjects/Flaskweb1/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.6'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.6')()
)
| [
"ernestbengula2017@gmail.com"
] | ernestbengula2017@gmail.com |
39c078ee69d1098e1c91f37879882232c475e2f0 | 59b0ebc4249f20edd0e87dc63784c6e8c138c7fd | /.history/fibonacci_20180603232558.py | 0f355ae930f9f8d834a1e6a158738d3573e77163 | [] | no_license | Los4U/first_python_programs | f397da10be3ef525995f3f220e3b60012a6accaa | c3fc33a38c84abd292cb2e86de63e09434fc7fc4 | refs/heads/master | 2020-03-22T08:09:40.426118 | 2018-07-04T17:17:58 | 2018-07-04T17:17:58 | 139,748,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | i = 0
j = 1
k = 0
fib = 0
user_input = int(input("How many numbers print out? : "))
for fn in range(user_input):
#if i < 30:
print('{0:2d} {1:>10}'.format(fn, fib))
#print(fib)
fib = j+k
j = k
k = fib
#else:
# print("3")
| [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
6b1337a8db31e35ab1f588c870e7418838a2eb92 | 3d45daee8b81777a25d54456659d73d58b9ce81e | /patient_main.py | b8d8fe64df98631c7f89f814e55cd4c008db2305 | [] | no_license | benrprince/ref-range-test-patients | 480016f78ceda9a02d203db218675b72fd49f426 | 9757a6fc38c6596b24effa73d6207d39e7c643db | refs/heads/main | 2023-06-23T18:36:50.883701 | 2021-07-02T12:44:12 | 2021-07-02T12:44:12 | 308,037,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | # Author: Ben Prince
# Version: 1.2
# Description: Used to figure out the number of test patients needed
# based on the sex and age ranges given in the DCW
import patient_sort as ps
import xlwt
import openpyxl
# TODO: Update xlwt to use openpyxl....currently works, but pulling in redundant library
def parse_patients(filename, overlap):
"""Used to separate the excel rows into 3 lists containing
each sex category. Skips first excel row because it is
assumed to be a header row. Returns: male, female, and
undifferentiated lists"""
m_list = []
f_list = []
u_list = []
# open excel file and get the first sheet
xl_workbook = openpyxl.open(filename)
sheet = xl_workbook.worksheets[0]
# iterate through the rows and sort the data into the three lists
for i in range(2, sheet.max_row):
temp_list = []
if str(sheet.cell(i, 1).value) == 'Male':
temp_list.append(str(sheet.cell(i, 1).value))
temp_list.append(int(sheet.cell(i, 2).value))
temp_list.append(int(sheet.cell(i, 3).value))
m_list.append(temp_list)
elif str(sheet.cell(i, 1).value) == 'Female':
temp_list.append(str(sheet.cell(i, 1).value))
temp_list.append(int(sheet.cell(i, 2).value))
temp_list.append(int(sheet.cell(i, 3).value))
f_list.append(temp_list)
else:
temp_list.append(str(sheet.cell(i, 1).value))
temp_list.append(int(sheet.cell(i, 2).value))
temp_list.append(int(sheet.cell(i, 3).value))
u_list.append(temp_list)
return m_list, f_list, u_list
def get_patients_wb(filename):
"""Runs the algorithm on the patient_sort file and
arranges the data into a workbook. Returns: the
formatted workbook with test patients"""
# minutes for 1 week. This can change
overlap = 10080
# use above function to split out the age and sex lines
m_list, f_list, u_list = parse_patients(filename, overlap)
# Write to a new workbook
wb = xlwt.Workbook()
patients = wb.add_sheet('Patients')
# Set up Doc
patients.write(0, 0, 'Sex', xlwt.Style.easyxf("font: bold on"))
patients.write(0, 1, 'Age', xlwt.Style.easyxf("font: bold on"))
# Get test patient data into lists from patient_sort file
m_list = ps.test_patient_list(m_list, overlap)
m_len = len(m_list)
f_list = ps.test_patient_list(f_list, overlap)
f_len = len(f_list)
u_list = ps.test_patient_list(u_list, overlap)
u_len = len(u_list)
# import male data into return doc
for i in range(1, m_len):
patients.write(i, 0, m_list[i-1][0])
patients.write(i, 1, m_list[i-1][1])
# import female data into return doc
for i in range(m_len, f_len + m_len):
patients.write(i, 0, f_list[i - m_len][0])
patients.write(i, 1, f_list[i - m_len][1])
# import undefined or unknown data into return doc
for i in range(m_len + f_len, m_len + f_len + u_len):
patients.write(i, 0, u_list[i - (m_len+f_len)][0])
patients.write(i, 1, u_list[i - (m_len+f_len)][1])
return wb
| [
"noreply@github.com"
] | benrprince.noreply@github.com |
d190d2073bc71b201cd5a4cdf229f13d5ed9a0b0 | 43b5d7bdcc9ec7a7b38e25ce67d0a3a618362275 | /netdata/workers/worker_storage.py | c8bb6340fb8f56a87dc7c9535ace93fd81a71298 | [
"Apache-2.0"
] | permissive | mincode/netdata | c8b64d1ab99601823fba2d3c9d70b4960cd26ac9 | 4369a3bfb473509eff92083e03f214d5b75f6074 | refs/heads/master | 2021-03-22T04:33:04.076065 | 2018-07-27T19:54:48 | 2018-07-27T19:54:48 | 113,805,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py | # Storage for a table of worker instances
from netdata.workers.json_storage import JSONStorage
class WorkerStorage(JSONStorage):
"""
Table of worker instances stored in a json file;
consisting of a list of pairs {'ip': ..., 'instance_id': ...}
"""
_instances_label = 'instances' # label in the dict to store the list of {'ip': ip_string, 'instance_id': id_string}
def __init__(self, path, name):
"""
Initizlize.
:param path: path to the storage file; empty means the current direcory.
:param name: file name, json file.
"""
super(WorkerStorage, self).__init__(path, name)
if self._instances_label not in self.data:
self.set(self._instances_label, [])
@property
def instances(self):
"""
List of instances.
:return list of {'ip':..., 'instance_id':....}
"""
return self.get(self._instances_label)
@property
def all_ids(self):
"""
List all instance ids.
:return list of all instance ids.
"""
return list(map(lambda x: x['instance_id'], self.instances))
@property
def all_ips(self):
"""
List all instance ips.
:return list of all instance ips.
"""
return list(map(lambda x: x['ip'], self.instances))
def insert(self, index, ip, instance_id):
"""
Insert new instance at given index.
:param index: index to insert at.
:param ip: ip address of new instance.
:param instance_id: id of new instance.
"""
new_instance = {'ip': ip, 'instance_id': instance_id}
if index == len(self.instances):
self.instances.append(new_instance)
else:
self.instances.insert(index, new_instance)
self.dump()
def delete(self, index):
"""
Delete entry.
:param index: index of entry to be deleted.
"""
del self.instances[index]
self.dump()
def delete_all(self):
"""
Delete all entries.
"""
self.set(self._instances_label, [])
| [
"manfred@minimair.org"
] | manfred@minimair.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.