text stringlengths 8 6.05M |
|---|
from flask import g, abort
from functools import wraps
__all__ = ['AdminPermissionException', 'check_admin', 'check_moderator', 'check_reviewer',
'level_compare', 'is_level', 'check_level']
LEVELS = ['user', 'reviewer', 'moderator', 'admin']
class AdminPermissionException(Exception):
pass
def level_compare(current, required):
"""Compare admin levels by position in LEVELS list"""
l1 = LEVELS.index(current)
l2 = LEVELS.index(required)
return l1 >= l2
# decorator to check admin
def check_admin(s):
@wraps(s)
def wrapped(*args, **kw):
check_level('admin')
return s(*args, **kw)
return wrapped
def check_moderator(s):
@wraps(s)
def wrapped(*args, **kw):
check_level('moderator')
return s(*args, **kw)
return wrapped
def check_reviewer(s):
@wraps(s)
def wrapped(*args, **kw):
check_level('reviewer')
return s(*args, **kw)
return wrapped
def is_level(level='admin'):
return level_compare(g.admin_level, level)
def check_level(required):
if not (g.admin and level_compare(g.admin_level, required)):
abort(403)
|
# Moderate dtObs and non-0 Q.
# Named 'm30' in Datum.
from common import *
from mods.Lorenz63.sak12 import *
t.dkObs = 15
f['noise'] = 2
X0.C = CovMat(0.5*ones(m))
other = {'name': os.path.relpath(__file__,'mods/')}
HMM = HiddenMarkovModel(f,h,t,X0,**other)
####################
# Suggested tuning
####################
|
import argparse
import json
import os
from random import choice
command_file = "command.txt"
place_ship_file = "place.txt"
game_state_file = "state.json"
output_path = '.'
map_size = 0
data_file = "data.txt"
stack_file = "stack.txt"
def main(player_key):
#create initial external file
global map_size
# Retrieve current game state
with open(os.path.join(output_path, game_state_file), 'r') as f_in:
state = json.load(f_in)
map_size = state['MapDimension']
if state['Phase'] == 1:
place_ships()
else:
fire_shot(state['OpponentMap']['Cells'])
def fire_shot(opponent_map):
# To send through a command please pass through the following <code>,<x>,<y>
# Possible codes: 1 - Fireshot, 0 - Do Nothing (please pass through coordinates if
# code 1 is your choice)
with open(os.path.join("../..",data_file), 'r') as f:
last_cell_x, last_cell_y, last_state = f.read().split(',')
last_cell_x = int(last_cell_x)
last_cell_y = int(last_cell_y)
print ("data file", last_cell_x, last_cell_y, last_state)
#get last cell
for cell in opponent_map:
if cell['X']==last_cell_x and cell['Y']==last_cell_y:
last_cell = cell
break
if last_cell['Damaged']:
print("damaged")
else:
print("missed")
#load stack
stack = []
with open(os.path.join("../..", stack_file), 'r') as f:
print("isi stack")
for line in f:
x,y = line[:-1].split(',')
x = int(x)
y = int(y)
stack.append((x,y))
print(x,y)
print("current mode :", last_state)
#handling if last state hit
print("last st", last_state, "damaged :", last_cell['Damaged'])
if last_state=="hunt" and last_cell['Damaged']:
print ("adding aronund", last_cell['X'], last_cell['Y'])
for cell in opponent_map:
if not cell['Damaged'] and not cell['Missed']:
if cell['X']==last_cell_x+1 and cell['Y']==last_cell_y:
check = True
for s in stack:
if s[0]==cell['X'] and s[1]==cell['Y']:
check = False
if check:
print("added", cell['X'], cell['Y'])
stack.append((cell['X'],cell['Y']))
last_state="target"
if cell['X']==last_cell_x-1 and cell['Y']==last_cell_y:
check = True
for s in stack:
if s[0]==cell['X'] and s[1]==cell['Y']:
check = False
if check:
print("added", cell['X'], cell['Y'])
stack.append((cell['X'],cell['Y']))
last_state="target"
if cell['X']==last_cell_x and cell['Y']==last_cell_y+1:
check = True
for s in stack:
if s[0]==cell['X'] and s[1]==cell['Y']:
check = False
if check:
print("added", cell['X'], cell['Y'])
stack.append((cell['X'],cell['Y']))
last_state="target"
if cell['X']==last_cell_x and cell['Y']==last_cell_y-1:
check = True
for s in stack:
if s[0]==cell['X'] and s[1]==cell['Y']:
check = False
if check:
print("added", cell['X'], cell['Y'])
stack.append((cell['X'],cell['Y']))
last_state="target"
elif last_state=="target" and last_cell['Damaged']:
print ("adding aronund", last_cell['X'], last_cell['Y'])
for cell in opponent_map:
if not cell['Damaged'] and not cell['Missed']:
if cell['X']==last_cell_x+1 and cell['Y']==last_cell_y:
check = True
for s in stack:
if s[0]==cell['X'] and s[1]==cell['Y']:
check = False
if check:
stack.append((cell['X'],cell['Y']))
if cell['X']==last_cell_x-1 and cell['Y']==last_cell_y:
check = True
for s in stack:
if s[0]==cell['X'] and s[1]==cell['Y']:
check = False
if check:
stack.append((cell['X'],cell['Y']))
if cell['X']==last_cell_x and cell['Y']==last_cell_y+1:
check = True
for s in stack:
if s[0]==cell['X'] and s[1]==cell['Y']:
check = False
if check:
stack.append((cell['X'],cell['Y']))
if cell['X']==last_cell_x and cell['Y']==last_cell_y-1:
check = True
for s in stack:
if s[0]==cell['X'] and s[1]==cell['Y']:
check = False
if check:
stack.append((cell['X'],cell['Y']))
if stack==[]:
last_state = "hunt"
#hunt mode
if last_state=="hunt":
targets = []
for cell in opponent_map:
if not cell['Damaged'] and not cell['Missed'] and ((cell['X']+cell['Y'])%2==1):
valid_cell = cell['X'], cell['Y']
targets.append(valid_cell)
target = choice(targets)
with open(os.path.join("../..",data_file), 'w') as f:
f.write("{},{},{}".format(target[0],target[1],"hunt"))
output_shot(*target)
#target mode
elif last_state=="target":
#print("target")
target = stack[0]
stack = stack[1:]
with open(os.path.join("../..",data_file), 'w') as f:
f.write("{},{},{}".format(target[0],target[1],"target"))
output_shot(*target)
#rewrite stack
with open(os.path.join("../..",stack_file), 'w') as f:
for s in stack:
f.write("{},{}".format(s[0],s[1]))
f.write("\n")
def output_shot(x, y):
move = 1 # 1=fire shot command code
with open(os.path.join(output_path, command_file), 'w') as f_out:
f_out.write('{},{},{}'.format(move, x, y))
f_out.write('\n')
pass
def place_ships():
# Please place your ships in the following format <Shipname> <x> <y> <direction>
# Ship names: Battleship, Cruiser, Carrier, Destroyer, Submarine
# Directions: north east south west
ships = ['Battleship 1 0 north',
'Carrier 3 1 East',
'Cruiser 4 2 north',
'Destroyer 7 3 north',
'Submarine 1 8 East'
]
with open(os.path.join("../..", data_file), 'w') as f_out:
f_out.write("0,0,hunt");
with open(os.path.join("../..", stack_file), 'w') as f_out:
f_out.write("")
with open(os.path.join(output_path, place_ship_file), 'w') as f_out:
for ship in ships:
f_out.write(ship)
f_out.write('\n')
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('PlayerKey', nargs='?', help='Player key registered in the game')
parser.add_argument('WorkingDirectory', nargs='?', default=os.getcwd(), help='Directory for the current game files')
args = parser.parse_args()
assert (os.path.isdir(args.WorkingDirectory))
output_path = args.WorkingDirectory
main(args.PlayerKey)
|
import argparse
import subprocess
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
from time import time
from unet2d import UNet
from unet3d import UNet3D
parser = argparse.ArgumentParser(description='UNet3D benchmark')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disable CUDA')
parser.add_argument('--batch-size', default=16, type=int,
help='batch size')
parser.add_argument('--in-channel', default=32, type=int,
help='input channel')
parser.add_argument('--prof', action='store_true', default=False,
help='enable autograd profiler')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
num_warmups = 1
num_iterations = 10
if args.cuda:
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
p = subprocess.check_output('nvidia-smi --query-gpu=name --format=csv', shell=True)
device_name = str(p).split('\\n')[1]
else:
p = subprocess.check_output('cat /proc/cpuinfo | grep name | head -n 1', shell = True)
device_name = str(p).split(':')[1][:-3]
print('Running on device: %s' % (device_name))
def main():
n = args.batch_size
c = args.in_channel
h = 128
w = 128
d = 128
print('Model UNet, [N,C,H,W,D] = [%d,%d,%d,%d,%d]' % (n, c, h, w, d))
data_ = torch.randn(n, c, h, w, d)
target_ = torch.arange(1, n+1).long()
#net = UNet(3, depth=5, merge_mode='concat')
net = UNet3D(in_channel=args.in_channel, n_classes=6)
optimizer = optim.SGD(net.parameters(), lr=0.01)
if args.cuda:
data_, target_ = data_.cuda(), target_.cuda()
net.cuda()
net.eval()
data, target = Variable(data_), Variable(target_)
for i in range(num_warmups):
optimizer.zero_grad()
output = net(data)
output.mean().backward()
time_fwd, time_bwd, time_upt = 0, 0, 0
for i in range(num_iterations):
optimizer.zero_grad()
t1 = time()
output = net(data)
t2 = time()
output.mean().backward()
t3 = time()
optimizer.step()
t4 = time()
time_fwd += t2 - t1
time_bwd += t3 - t2
time_upt += t4 - t3
print("iteration %d forward %10.2f ms, backward %10.2f ms" % (i, time_fwd*1000, time_bwd*1000))
time_fwd_avg = time_fwd / num_iterations * 1000
time_bwd_avg = time_bwd / num_iterations * 1000
time_upt_avg = time_upt / num_iterations * 1000
time_total = time_fwd_avg + time_bwd_avg
print("%10s %10s %10s" % ('direction', "time(ms)", "imgs/sec"))
print("%10s %10.2f %10.2f" % (':forward:', time_fwd_avg, n*1000/time_fwd_avg))
print("%10s %10.2f" % (':backward:', time_bwd_avg))
print("%10s %10.2f" % (':update:', time_upt_avg))
print("%10s %10.2f %10.2f" % (':total:', time_total, n*1000/time_total))
if __name__ == '__main__':
if args.prof:
with torch.autograd.profiler.profile() as prof:
main()
f = open('profile.txt', 'w')
f.write(prof.__str__())
else:
main()
|
import os
import pwd
import grp
#常量参数配置
#评判机基础工作空间
JUDGER_WORKSPACE_BASE = "/judger/run"
# 日志基础路径
LOG_BASE = "/log"
# 编译器日志路径
COMPILER_LOG_PATH = os.path.join(LOG_BASE, "compile.log")
#评判机运行日志路径
JUDGER_RUN_LOG_PATH = os.path.join(LOG_BASE, "judger.log")
#服务器日志路径
SERVER_LOG_PATH = os.path.join(LOG_BASE, "judge_server.log")
#根据用户名设置运行用户和组ID
RUN_USER_UID = pwd.getpwnam("code").pw_uid
RUN_GROUP_GID = grp.getgrnam("code").gr_gid
#根据用户名设置编译器的用户和组ID
COMPILER_USER_UID = pwd.getpwnam("compiler").pw_uid
COMPILER_GROUP_GID = grp.getgrnam("compiler").gr_gid
#根据用户名设置特殊用户ID
SPJ_USER_UID = pwd.getpwnam("spj").pw_uid
SPJ_GROUP_GID = grp.getgrnam("spj").gr_gid
#注意:这几个路径是我在写entrypoint.sh的时候已经创建好的了,除了TEST_CASE_DIR文件路径。
#测试用例路径
TEST_CASE_DIR = "/test_case"
#特殊评判源码路径
SPJ_SRC_DIR = "/judger/spj"
#特殊评判执行文件路径
SPJ_EXE_DIR = "/judger/spj"
|
#!/usr/bin/python
import numpy as np
import math
from roboclaw import *
speed = 0
M1Forward(128,0)
M2Forward(128,0)
M2Forward(129,0)
M1Forward(128,0)
M2Forward(128,0)
M2Forward(129,0)
M1Forward(128,0)
M2Forward(128,0)
M2Forward(129,0)
M1Forward(128,0)
M2Forward(128,0)
M2Forward(129,0)
|
import pytest
from bromine.utils.geometry import RectSize
def test_adding_two_rect_sizes():
assert RectSize(1, 2) + RectSize(3, 4) == RectSize(4, 6)
def test_subtracting_two_rect_sizes():
assert RectSize(1, 2) - RectSize(1, 4) == RectSize(0, -2)
class TestRectSizeDecorator():
def test_undecorated_value(self, undecorated_function):
undecorated_value = undecorated_function()
assert not isinstance(undecorated_value, RectSize)
def test_decorated_value(self, decorated_function):
decorated_value = decorated_function()
assert isinstance(decorated_value, RectSize)
def test_decorated_function(self, decorated_function, undecorated_function):
assert decorated_function.__name__ == undecorated_function.__name__
@pytest.fixture(name='undecorated_function')
def undecorated_fixture(self):
def some_function_returning_a_tuple():
return (1, 2)
return some_function_returning_a_tuple
@pytest.fixture(name='decorated_function')
def decorated_fixture(self, undecorated_function):
return RectSize.wrapped(undecorated_function)
|
import uuid
import boto3
import botocore
from io import BytesIO
from datetime import datetime, timedelta
import os
BUCKETNAME = 'my-userdata'
bucket = boto3.resource('s3').Bucket(BUCKETNAME)
client = boto3.client('s3')
class NoFileException(Exception):
pass
class WrongTypeException(Exception):
pass
class FileOut():
def __init__(self, data, filename):
self.data = data
self.filename = filename
def read(self):
return self.data.read()
class FileManager:
def saveFile(self, file, filename):
# Pass as file a io.BytesIO data type
# or str
id = str(uuid.uuid4())
if (type(file) == BytesIO):
toSave = file
else:
try:
toSave = BytesIO()
toSave.write(file)
except:
raise WrongTypeException("cannot save the data in the type given")
delta = timedelta(seconds=1000)
bucket.upload_fileobj(toSave, id,
ExtraArgs={"Expires": datetime.utcnow() + delta,
"Metadata": {"filename": filename}})
ids = os.environ.get("savedIds", "")
if not ids:
os.environ["savedIds"] = id
else:
os.environ["savedIds"] = ids + "|" + id
return id
def loadFile(self, fileID):
try:
resp = client.get_object(Bucket=BUCKETNAME, Key=fileID)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
raise NoFileException("No file with id " + str(fileID))
else:
raise
else:
fout = FileOut(resp["Body"], resp["Metadata"]["filename"])
return fout
|
print("hello dhana")
|
import socket
import hashlib
sk = socket.socket()
sk.connect(('127.0.0.1',43))
yanzheng = sk.recv(1024)
sha = hashlib.sha1(b'043')
sha.update(yanzheng)
ret = sha.hexdigest().encode('utf-8')
sk.send(ret)
msg = sk.recv(1024)
print(msg) |
from random import randint
computador = randint(0, 10)
print(7*'=', 'Adivinhação', 7*'=')
print('Sou seu computador e pensei em um número...\n Tente advinhar')
acertou = False
while not acertou:
numeroJogador = int(input('É o número: '))
if numeroJogador == computador:
acertou = True
elif numeroJogador < computador:
print('Mais...')
elif numeroJogador > computador:
print('Menos')
print('Parabéns você acertou!!!!! é o número {}'.format(computador)) |
import nmap
while True:
nmScan = nmap.PortScanner()
host = input("host(ip/url :)")
port = input("port:")
output = nmScan.scan(host,port)
print(output) |
# -*- coding: utf-8 -*-
import os
import pytest
from scrapy.http import TextResponse
from coral.spiders.github import parse_release_link, parse_release_links
@pytest.fixture
def landing_page(page_path):
with open(os.path.join(page_path, 'landing_page.html'), 'r') as f:
return f.read()
@pytest.fixture
def release_page(page_path):
with open(os.path.join(page_path, 'release_page.html'), 'r') as f:
return f.read()
def test_parse_release_link(landing_page):
response = TextResponse(
url='https://github.com/vim/vim',
encoding='utf-8',
body=landing_page)
url = parse_release_link(response)
assert url == '/vim/vim/releases'
def test_parse_release_links(release_page):
response = TextResponse(
url='https://github.com/vim/vim/releases',
encoding='utf-8',
body=release_page)
urls = parse_release_links(response)
assert len(urls) > 0
|
#!/usr/bin/python
import pygame
from glm import vec3, ivec2
import random
import math
from game.entities.message import Message
from game.entities.weapons import WEAPONS
from game.base.entity import Entity
from game.constants import *
class Powerup(Message):
def __init__(self, app, scene, letter, **kwargs):
self.letter = letter
color = None
if self.letter == "heart":
self.letter = "♥"
if self.letter == "star":
self.letter = "*"
if letter is None: # random powerup
# no default weapon and add hearts
powerups = list(w.letter for w in WEAPONS[1:]) + ["♥"]
self.letter = random.choice(powerups)
self.heart = self.letter == "♥"
self.star = self.letter == "*"
# get color of item
if self.heart:
color = pygame.Color("red")
elif self.star:
color = pygame.Color("white")
else:
for wpn in WEAPONS:
if self.letter == wpn.letter:
color = pygame.Color(wpn.color)
break
assert color
super().__init__(app, scene, self.letter, color, **kwargs)
self.velocity.z = 100
self.solid = True
self.size = (10, 10) # About the same as the butterlies
self.collision_size = vec3(100, 100, 300)
self.time = 0
self.offset = vec3(0)
self.velocity.z = 100
self.velocity.z = 100
def __call__(self, script):
color = self.color
while True:
self.set(self.letter, "gray")
yield script.sleep(0.2)
self.set(self.letter, color)
yield script.sleep(0.2)
def update(self, dt):
super().update(dt)
self.time += dt
self.offset.y = math.sin(self.time * math.tau)
def render(self, camera):
half_diag = vec3(-self.size[0], self.size[1], 0) / 2
world_half_diag = camera.rel_to_world(half_diag) - camera.position
pos_tl = camera.world_to_screen(self.position + world_half_diag)
pos_bl = camera.world_to_screen(self.position - world_half_diag)
if None in (pos_tl, pos_bl):
# behind the camera
self.scene.remove(self)
return
self.font_size = ivec2(pos_bl.xy - pos_tl.xy) / 2
# fade = 2 == twice bright
super().render(camera, None, self.position + self.offset, fade=2)
|
import argparse
import select
import socket as socketlib
from gamelib.utils import patch_default_subcommand
from .game import SnakeGame
def runclient(args):
try:
sock = socketlib.socket(socketlib.AF_INET, socketlib.SOCK_STREAM)
host = socketlib.gethostname()
sock.connect((host, PORT))
sock.sendall(args.message)
finally:
sock.close()
def runserver(args):
server = Server()
server.run()
def runsingle(args):
game = SnakeGame()
game.run()
def ArgParser():
parser = argparse.ArgumentParser(main.__doc__)
subparsers = parser.add_subparsers(title='Modes', description='Mode to run game in.')
sp = subparsers.add_parser('server', help='Create a server.')
sp.set_defaults(func=runserver)
sp = subparsers.add_parser('client', help='Connect to game server.')
sp.add_argument('message', help='message to send server.')
sp.set_defaults(func=runclient)
sp = subparsers.add_parser('single', help='Single player mode.')
sp.set_defaults(func=runsingle)
patch_default_subcommand(parser, 'single')
return parser
def main():
"""
Snake clone in pygame with aspirations of online multiplayer.
"""
parser = ArgParser()
args = parser.parse_args()
args.func(args)
|
#!/usr/bin/env python3.6
# Copyright (c) 2019 Trail of Bits, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
from typing import ClassVar, List, Dict, Optional
from deepstate.core import FuzzerFrontend, FuzzFrontendError
L = logging.getLogger("deepstate.frontend.libfuzzer")
L.setLevel(os.environ.get("DEEPSTATE_LOG", "INFO").upper())
class LibFuzzer(FuzzerFrontend):
NAME: ClassVar[str] = "clang++" # placeholder, set as harness binary later
COMPILER: ClassVar[str] = "clang++"
@classmethod
def parse_args(cls) -> None:
parser: argparse.ArgumentParser = argparse.ArgumentParser(description="Use libFuzzer as a backend for DeepState")
# Execution options
parser.add_argument("--mem_limit", type=int, default=50, help="Child process memory limit in MB (default is 50).")
parser.add_argument("--runtime", type=int, default=0, help="Total time to run fuzzer for (default is 0 for indefinite).")
parser.add_argument("--dictionary", type=str, help="Optional fuzzer dictionary for libFuzzer.")
parser.add_argument("--use_counters", action="store_true", help="Use perf counters.")
parser.add_argument("--use_ascii", action="store_true", help="Use only ASCII characters for generated input seeds.")
parser.add_argument("--print_pcs", action="store_true", help="Print program counters during fuzzer execution.")
# Misc. post-processing
parser.add_argument("--minimize_crash", action="store_true", help="Automatically minimize crashing testcases after fuzzer execution.")
parser.add_argument("--post_stats", action="store_true", help="Output post-fuzzing stats.")
cls.parser = parser
super(LibFuzzer, cls).parse_args()
def compile(self) -> None: # type: ignore
lib_path: str = "/usr/local/lib/libdeepstate_LF.a"
flags: List[str] = ["-ldeepstate_LF"]
if self.compiler_args:
flags += [arg for arg in self.compiler_args.split(" ")]
super().compile(lib_path, flags, self.out_test_name + ".lfuzz")
def pre_exec(self) -> None:
"""
Perform argparse and environment-related sanity checks.
"""
super().pre_exec()
# first, redefine and override fuzzer as harness executable
self.fuzzer = self.binary # type: ignore
seeds: str = self.input_seeds # type: ignore
# check if seeds are present if specified
if seeds:
if os.path.exists(seeds):
if len([name for name in os.listdir(seeds)]) == 0:
raise FuzzFrontendError(f"Seeds path specified but none present in directory.")
@property
def cmd(self):
"""
Initializes a command for an in-process libFuzzer instance that runs
indefinitely until an interrupt.
"""
cmd_dict: Dict[str, str] = dict()
if self.input_seeds:
cmd_dict[""] = self.input_seeds
# preserve timeout, since libfuzzer exits after crash
cmd_dict.update({
"-max_len": str(self.max_input_size),
"-timeout": str(self.timeout),
"-rss_limit_mb": str(self.mem_limit),
"-max_total_time": str(self.runtime),
"-artifact_prefix": "deepstate_"
})
if self.dictionary:
cmd_dict["-dict"] = self.dictionary
if self.use_counters:
cmd_dict["-use_counters"] = self.use_counters
if self.use_ascii:
cmd_dict["-only_ascii"] = "1"
if self.print_pcs:
cmd_dict["-print_pcs"] = "1"
if self.post_stats:
cmd_dict["-print_final_stats"] = "1"
if self.minimize_crash:
cmd_dict["-minimize_crash"] = "1"
return cmd_dict
def main():
fuzzer = LibFuzzer()
fuzzer.parse_args()
fuzzer.run()
return 0
if __name__ == "__main__":
exit(main())
|
# 一开始看标签 单调栈,但是发现由于攻击力可能相同,所以不是很好处理
# 故还是得按照下面的方法排序才行
# 攻击力降序
# 防御力必须升序,因为同一攻击力下不可能出现弱角色
class Solution:
def numberOfWeakCharacters(self, properties: List[List[int]]) -> int:
properties.sort(key=lambda x : (-x[0],x[1]))
res, maxv = 0, -1
for item in properties:
if maxv > item[1]:
res+=1
else:
maxv = item[1]
return res
|
pytest_plugins = [
'fixtures.fixture_user',
'fixtures.fixture_data',
]
|
import getpass
import io
import os
from django.contrib.auth import get_user_model
from django.core.files import File
from django.core.files.base import ContentFile
from django.shortcuts import get_object_or_404, redirect, render
from django.views import generic
from django.urls import reverse
from urllib.parse import urlencode
from excelapp.forms.upload import ServiceForm
from excelapp.models import Tm_Department, Tm_Service
from excelapp.utils.base64_util import CustomBase64
from excelapp.utils.file_util import CustomFile
User = get_user_model()
def index(request):
return render(request, 'excelapp/upload/index.html')
class ServiceList(generic.ListView):
template_name = 'excelapp/upload/list.html'
model = Tm_Service
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
print(context)
if 'details' in self.request.session:
del self.request.session['details']
return context
def input(request):
details = request.session.get('details', None)
if request.method == 'GET':
initial = {}
if details and 'form_data' in details:
form_data = details['form_data']
initial['department'] = form_data['department']
initial['service_name'] = form_data['service_name']
form = ServiceForm(None, initial=initial, details=details)
else:
form = ServiceForm(data=request.POST, files=request.FILES, details=details)
if form.is_valid():
file_url, file_path = form.upload_file_save()
details = {}
details['form_data'] = request.POST
details['file_url'] = file_url
details['file_path'] = file_path
request.session['details'] = details
redirect_url = reverse('excelapp:upload_confirm')
parameters = urlencode({'back': 'excelapp:upload_input'})
url = f'{redirect_url}?{parameters}'
return redirect(url)
# return redirect('excelapp:upload_confirm', )
context = {
'form': form
}
if details:
context['file_url'] = details['file_url']
context['file_path'] = details['file_path']
return render(request, 'excelapp/upload/input_field.html', context)
def update(request, pk):
details = request.session.get('details', None)
tm_service = Tm_Service.objects.get(pk=pk)
if request.method == 'GET':
initial = {}
if details and 'form_data' in details:
form_data = details['form_data']
initial['department'] = form_data['department']
initial['service_name'] = form_data['service_name']
elif tm_service:
initial['department'] = tm_service.department
initial['service_name'] = tm_service.service_name
form = ServiceForm(None, initial=initial, details=details)
else:
form = ServiceForm(data=request.POST, files=request.FILES, details=details)
if form.is_valid():
tm_service.department = form.cleaned_data['department']
tm_service.service_name = form.cleaned_data['service_name']
upload_file = form.cleaned_data['upload_file']
tm_service.upload_file.delete(save=False)
tm_service.upload_file.save(upload_file.name, upload_file)
tm_service.save()
# tm_servicecopy = Tm_Service()
# tm_servicecopy.save()
return redirect('excelapp:upload_List')
else:
pass
context = {
'form': form
}
return render(request, 'excelapp/upload/update.html', context)
def confirm(request):
if 'HTTP_REFERER' in request.META and request.META['HTTP_REFERER']:
print(request.META['HTTP_REFERER'])
details = request.session.get('details', None)
if details is None:
return redirect('excelapp:upload_input')
form_data = details['form_data']
file_url = details['file_url']
file_path = details['file_path']
context = {
'form': ServiceForm(form_data),
'uploadfile_url': file_url,
'uploadfile_path': file_path,
'uploadfile_name': os.path.basename(file_path),
}
return render(request, 'excelapp/upload/confirm.html', context)
def create(request):
details = request.session.pop('details', None)
if details is None:
return redirect('excelapp:upload_input')
form_data = details['form_data']
file_url = details['file_url']
file_path = details['file_path']
tm_service = Tm_Service()
tm_service.department = Tm_Department.objects.get(pk=form_data['department'])
tm_service.service_name = form_data['service_name']
data = CustomFile.localfile_to_filefield(file_path)
tm_service.upload_file = data
tm_service.save()
# ret = CustomFile.remove_dir(os.path.dirname(file_path))
return redirect('excelapp:upload_List')
|
# -*- coding: utf-8 -*-
"""
pyxdevkit.event
~~~~~~~~~~~~~
module that implements the event object that is used in xdevkit
"""
from xbox_thread import XboxThread
class EventInfo(object):
def __init__(self, properties, ip_addr):
"""
Basically the event just holds information that we can use in our notify function
Note: These are not all the parameters, since the parameters are always different
They are instead some of the ones that are needed for other things.
"""
self.properties = properties
self.ip_addr = ip_addr
self.is_stopped = False
self.start = None
self.addr = None
for prop in properties:
if 'thread=' in prop:
# We create a thread object so we can get registers and stuff
self.thread = XboxThread(
int(prop.replace('thread=', ''), 16), self.ip_addr)
if 'stop' in prop:
self.is_stopped = True
if 'start=' in prop:
self.start = int(prop.replace('start=', ''), 16)
if 'addr=' in prop:
self.addr = int(prop.replace('addr=', ''), 16)
|
n = int(input())
arr = [n]
if n == 1:
print(1)
else:
while n > 1:
if n % 2 == 0:
n = n // 2
else:
n = 3*n + 1
arr.append(n)
print(*arr,sep = " ") |
'''
编写注释的主要目的是阐述代码要做什么,以及是如何做的。在开发项目期间,你对各个部分如何协同工作了如指掌,但过段时间后,有些细节你可能不记得了。当然,你总是
可以通过研究代码来确定各个部分的工作原理,但通过编写注释,以清晰的自然语言对解决方案进行概述,可节省很多时间。
要成为专业程序员或与其他程序员合作,就必须编写有意义的注释。当前,大多数软件都是合作编写的,编写者可能是同一家公司的多名员工,也可能是众多致力于同一个开源
项目的人员。训练有素的程序员都希望代码中包含注释,因此你最好从现在开始就在程序中添加描述性注释。作为新手,最值得养成的习惯之一是,在代码中编写清晰、简洁的
注释。
如果不确定是否要编写注释,就问问自己,找到合理的解决方案前,是否考虑了多个解决方案。如果答案是肯定的,就编写注释对你的解决方案进行说明吧。相比回过头去再添
加注释,删除多余的注释要容易得多。从现在开始,本书的示例都将使用注释来阐述代码的工作原理
'''
'''
The Zen of Python, by Tim Peters
Beautiful is better than ugly. 漂亮总好过丑陋
Explicit is better than implicit. 明确总好过含糊
Complex is better than complicated. 复杂总好过混乱
Flat is better than nested. 顺序总好过嵌套
Sparse is better than dense. 稀疏总好过密集
Readability counts. 可读性很重要
Special cases aren't special enough to break the rules. 特殊情况不足以打破规则
Although practicality beats purity. 实用性第一
Errors should never pass silently. 错误不该被默默地通过
Unless explicitly silenced. 除非明确沉默
In the face of ambiguity, refuse the temptation to guess. 面对歧义,不轻易通过猜测下结论
There should be one-- and preferably only one --obvious way to do it. 应该有且最好只有一种显而易见的方式来解决问题
Although that way may not be obvious at first unless you're Dutch. 尽管这种方式可能一开始并不明显
Now is better than never. 现在总比没有好
Although never is often better than *right* now. 尽管从来没有比现在更好
If the implementation is hard to explain, it's a bad idea. 如果实现方式很难解释,那它并不是一个好的方案
If the implementation is easy to explain, it may be a good idea. 如果实现方式容易解释,那它可能是是一个好的方案
Namespaces are one honking great idea -- let's do more of those! 命名空间是一个好的想法,让我们做更多的事吧
''' |
import datetime
import pytest
from prereise.gather.winddata.rap.noaa_api import NoaaApi
@pytest.fixture
def noaa():
box = {"north": 49.8203, "south": 25.3307, "west": -122.855, "east": -96.2967}
return NoaaApi(box)
start_date = "2018-03-05"
end_date = "2018-03-06"
start = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end = datetime.datetime.strptime(end_date, "%Y-%m-%d")
def test_get_url_list(noaa):
urls = noaa.get_path_list(start, end)
first = "201803/20180305/rap_130_20180305_0000_000.grb2"
last = "201803/20180306/rap_130_20180306_2300_000.grb2"
assert first == urls[0]
assert last == urls[-1]
def test_box_query_set(noaa):
keys = [k[0] for k in noaa.params]
for a in ["north", "south", "east", "west"]:
assert a in keys
def test_url_fallback_default(noaa):
url = noaa.build_url("month/day/filename")
fallback = noaa.build_url("month/day/filename", fallback=True)
assert "old" not in url
assert "old" in fallback
def test_box_validation():
for a in (None, [], "box", ("north", 4)):
with pytest.raises(TypeError):
NoaaApi(a)
wrong = {"foo": 5, "west": "whatever"}
missing = {"north": 49.8203, "west": -122.855, "east": -96.2967}
for a in (wrong, missing):
with pytest.raises(ValueError):
NoaaApi(a)
|
import patch
import validata_core
import requests
import yaml
import functools
from urllib.parse import urlencode
from collections import defaultdict
import csv
import datetime
import sys
import json
import os
import textwrap
CSV_PATH = "data/data.csv"
COMMENT_SUBJECT = "Conformité au schéma"
USER_SLUG = "validation-data-gouv-fr"
DATAGOUV_API = "https://www.data.gouv.fr/api/1"
@functools.lru_cache()
def schemas_details():
with requests.get("https://schema.data.gouv.fr/schemas/schemas.yml") as response:
response.raise_for_status()
return yaml.safe_load(response.content)
@functools.lru_cache()
def existing_data():
with open(CSV_PATH, "r") as f:
return [d for d in csv.DictReader(f)]
def file_is_new(file_url):
same_line = [row for row in existing_data() if file_url == row["file_url"]]
return len(same_line) == 0
def get_schema_url(slug):
schemas = schemas_details()[slug]["schemas"]
assert len(schemas) == 1
return schemas[0]["latest_url"]
def get_schema_version(slug):
return schemas_details()[slug]["latest_version"]
def get_details(dataset_id, slug):
response = requests.get(f"{DATAGOUV_API}/datasets/{dataset_id}/")
response.raise_for_status()
dataset_url = response.json()["resources"][0]["url"]
schema_url = get_schema_url(slug)
return {
"schema_url": schema_url,
"schema_slug": slug,
"schema_version": get_schema_version(slug),
"dataset_id": dataset_id,
"name": response.json()["title"],
"dataset_url": dataset_url,
"report_url": f"https://validata.etalab.studio/table-schema?input=url&schema_url={schema_url}&url={dataset_url}&repair=true",
}
def enrich_report(report, columns):
count_col_code = {}
for error in report["tables"][0]["errors"]:
if error["tag"] != "value":
continue
col = columns[(error["column-number"] - 1)]
if col not in count_col_code:
count_col_code[col] = defaultdict(int)
count_col_code[col][error["code"]] += 1
report["tables"][0]["error-stats"]["value-errors"][
"count-by-col-and-code"
] = count_col_code
return report
def validate(source, schema):
report = validata_core.validate(source, schema)
columns = report["tables"][0]["headers"]
return enrich_report(report, columns)
def build_report(report):
def badge_url(nb_errors, color):
query = urlencode(
{
"label": "Consolidation",
"message": f"{nb_errors} erreurs",
"color": color,
"style": "flat-square",
}
)
# See documentation on https://shields.io
return f"https://img.shields.io/static/v1?{query}"
percentage = int(report["nb_errors"] * 100 / report["nb_rows"])
if percentage == 0:
status, color = "ok", "green"
elif percentage <= 10:
status, color = "warning", "orange"
else:
status, color = "invalid", "red"
return {
**report,
**{
"status": status,
"error_percentage": percentage,
"badge_url": badge_url(report["nb_errors"], color),
},
}
def build_details(details, report):
errors = report["tables"][0]["error-stats"]
return {
"date": datetime.date.today().isoformat(),
"dataset_id": details["dataset_id"],
"name": details["name"],
"schema_slug": details["schema_slug"],
"schema_version": details["schema_version"],
"file_url": details["dataset_url"],
"report_url": details["report_url"],
"nb_rows": report["tables"][0]["row-count"],
"nb_errors": errors["count"],
"nb_rows_with_errors": errors["value-errors"]["rows-count"],
"errors_report": json.dumps(errors),
}
def post_comment(details):
def find_existing_discussion(dataset_id):
url = f"{DATAGOUV_API}/discussions/?for={dataset_id}&closed=false&sort=-created"
while True:
r = requests.get(url)
r.raise_for_status()
data = r.json()
for discussion in data["data"]:
if (
discussion["title"] == COMMENT_SUBJECT
and discussion["user"]["slug"] == USER_SLUG
):
return discussion["id"]
if data["next_page"] is None:
break
url = data["next_page"]
return None
def plural(count, word):
if count != 1:
return f"{count} {word}s"
return f"{count} {word}"
schema_doc_url = f"https://schema.data.gouv.fr/{details['schema_slug']}/latest.html"
comment = f"""\
Bonjour,
Vous recevez ce message car ce jeu de données est une consolidation qui se veut conforme au schéma [{details['schema_slug']}]({schema_doc_url}), ce qui a déclenché un contrôle automatique de vos données par notre robot de validation.
[Le fichier]({details["file_url"]}) que vous venez de publier ou mettre à jour comporte {plural(details["nb_errors"], "erreur")} sur un total de {plural(details["nb_rows"], "ligne")} par rapport au [schéma de référence]({schema_doc_url}).
Vous pouvez consulter le [dernier rapport de validation]({details["report_url"]}) pour vous aider à corriger les erreurs.
Une fois un fichier valide publié, vous pouvez clore cette discussion.
Une question ? Écrivez à validation@data.gouv.fr en incluant l'URL du jeu de données concerné.
"""
existing_discussion_id = find_existing_discussion(details["dataset_id"])
headers = {
"X-API-KEY": os.environ["DATAGOUV_API_KEY"],
"User-Agent": "https://github.com/etalab/monitor-consolidation",
}
if not existing_discussion_id:
# Creating a new discussion
requests.post(
f"{DATAGOUV_API}/discussions/",
headers=headers,
json={
"title": COMMENT_SUBJECT,
"comment": textwrap.dedent(comment),
"subject": {"id": details["dataset_id"], "class": "Dataset"},
},
).raise_for_status()
else:
# Adding a comment to an existing discussion
requests.post(
f"{DATAGOUV_API}/discussions/{existing_discussion_id}/",
headers=headers,
json={"comment": textwrap.dedent(comment)},
).raise_for_status()
daily_data = []
json_report = {}
for slug, data in schemas_details().items():
# Only Table Schema schemas are supported right now
# when finding out the quality of a consolidation
if data["type"] != "tableschema":
continue
if data["consolidation"] and data["consolidation"]["dataset_id"]:
dataset_id = data["consolidation"]["dataset_id"]
details = get_details(dataset_id, slug)
report = validate(details["dataset_url"], details["schema_url"])
details = build_details(details, report)
daily_data.append(details)
json_report[dataset_id] = build_report(details)
# If the file is new, post a comment on the dataset
# to report the validation's result
if file_is_new(details["file_url"]):
post_comment(details)
# Write today's data to a JSON file
with open("data/report.json", "w") as f:
json.dump(json_report, f, indent=2, ensure_ascii=False)
# Append daily data to a CSV file
with open(CSV_PATH, "a") as f:
writer = csv.DictWriter(f, daily_data[0].keys(), lineterminator="\n")
if f.tell() == 0:
writer.writeheader()
writer.writerows(daily_data)
|
__author__ = 'Greg Ziegan'
from rest_framework import serializers
from rest_framework import pagination
from .models import User, Location
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'phone', 'first_name', 'age', 'profile_picture', 'current_location', 'is_publishing')
class LocationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Location
fields = ('url', 'point', 'country_name', 'locality', 'postal_code')
class PaginatedUserSerializer(pagination.PaginationSerializer):
class Meta:
object_serializer_class = UserSerializer |
import asyncio
import time
n = 0
async def monitor():
global n
while True:
await asyncio.sleep(1)
print(f"{n} reqs/sec")
n = 0
async def client(address):
global n
reader, writer = await asyncio.open_connection(*address)
while True:
writer.write(b'10000')
await writer.drain()
resp = await reader.read(100000)
#print(resp)
n += 1
async def main():
await asyncio.gather(*[client(('localhost', 25000)) for i in range(16)], monitor())
asyncio.run(main()) |
def solution(n):
return round(n * 2) / 2.0
|
#include <Adafruit_NeoPixel.h>
from machine import Pin, SPI, ADC
from neopixel import NeoPixel
from time import sleep
import urandom
import util
import ustruct
import utime
import ntptime as np
leds = 8
width = 15
pixel = NeoPixel(Pin(14, Pin.OUT), leds) #D5
dimFactor = 4
num = "32"
color = urandom.getrandbits(8)
[r, g, b] = util.colorWheel(color)
font_name='font5x8.bin'
fontFile = open(font_name, 'rb')
font_width, font_height = (5, 8)
np.settime()
secondCounter = utime.localtime()[5]
def draw_text(text):
for ch in text:
draw_char(ch)
def draw_char(ch):
# Go through each column of the character.
for pos in range(font_width):
draw_char_line(ch, pos)
def draw_char_line(ch, pos):
# Grab the byte for the current column of font data.
fontFile.seek(2 + (ord(ch) * font_width) + pos)
line = ustruct.unpack('B', fontFile.read(1))[0]
# Go through each row in the column byte.
for y_pos in range(font_height):
# Draw a pixel for each bit that's flipped on.
if (line >> y_pos) & 0x1:
pixel[y_pos] = [64, 16, 0]
pixel.write()
sleep(0.03)
setPixels(0, 0, 0)
def setPixels(r, g, b):
for n in range(leds):
pixel[n] = [r, g, b]
pixel.write()
while True:
if (utime.localtime()[5] - secondCounter) > 21600:
settime()
hours = utime.localtime()[3]
minutes = utime.localtime()[4]
timeString = str(hours) + str(minutes)
draw_text(timeString)
#setPixels(r%dimFactor, g%dimFactor, b%dimFactor)
#sleep(0.04)
#draw_char("X")
#sleep(10)
#setPixels(0, 0, 0)
#sleep(1)
|
# -*- coding: utf-8 -*-
# @Time : 2018/12/24 14:16
# @Author : Monica
# @Email : 498194410@qq.com
# @File : Common_Datas.py
# 全局 - 系统访问地址 - 登录链接
web_login_url = "https://www-beta.mycloudhawk.com/login"
|
"""
6. Faça um Programa que peça o raio de um círculo, calcule e mostre sua área.
"""
from math import pi
raio_circulo = float(input("Digite o raio do círculo: "))
area_circulo = pi * (raio_circulo ** 2)
print(f"A área do círculo é: {area_circulo:.2f} m2") |
from django.contrib import admin
from .models import Event
# Register your models here.
class EventAdmin(admin.ModelAdmin):
list_display = ('event_name', 'is_published', 'event_start', 'event_end', 'event_country', 'event_city', 'event_state', 'get_partners')
list_display_links = ('event_name',)
search_fields = ('event_name', 'event_country', 'event_city', 'event_state', 'event_partners')
list_editable = ('is_published',)
list_per_page = 25
def get_partners(self, obj):
return ', '.join([p.partner_name for p in obj.event_partners.all()])
get_partners.short_description = 'Partners'
admin.site.register(Event, EventAdmin) |
import pandas as pd
import numpy as np
from tensorflow import keras
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler, QuantileTransformer
# Cols, not stored in file because its easier for date parsing. timestamp,Ttl Volume,Avg Volume,Ttl Through,
# Ttl Left Turn,Ttl Right Turn,Ttl Wrong Way,Overall Avg Speed, Zone 2, Zone 3, Zone 4, Zone 5, Zone 2.1, Zone 3.1,
# Zone 4.1, Zone 5.1,Class 1: 0-22ft,Class 2: 22-36ft,Class 3: 36-Up, , .1, 04 Eb Through #1, 04 Eb Through #3,
# 07 Eb Left Turn #1, 07 Eb Left Turn #2, 04 Eb Through #1.1, 04 Eb Through #3.1, 07 Eb Left Turn #1.1, 07 Eb Left
# Turn #2.1
# establish hyper parameters
percent_train = 0.95
units = 64
dropout = 0.20
optimizer = 'adam'
loss = 'mae'
epochs = 100
sequential_entries = 96 # 48 hours worth of memory
# Read the CSV source
data_source = "combined_csv.csv"
df_data = pd.read_csv(
data_source, parse_dates=True, index_col=0, header=None
)
# print(df_data.head())
# Display data on chart
fig, ax = plt.subplots()
df_data.plot(legend=False, ax=ax)
plt.show()
# Normalize data
scaler = MinMaxScaler()
scaler.fit(df_data[[1, df_data.shape[1]]])
df_data[[1, df_data.shape[1]]] = scaler.transform(df_data[[1, df_data.shape[1]]])
# Prep train and test data
train_size = int(df_data.shape[0] * percent_train)
test_size = len(df_data) - train_size
train, test = df_data.iloc[0:train_size], df_data.iloc[train_size:len(df_data)]
print('Train shape:', train.shape)
print('Test shape: ', test.shape)
# helper function to create the dataset for out model, straight from tf website
def create_dataset(X, y, time_steps=1):
a, b = [], []
for i in range(len(X) - time_steps):
v = X.iloc[i:(i + time_steps)].values
a.append(v)
b.append(y.iloc[i + time_steps])
return np.array(a), np.array(b)
# We’ll create sequences with 96 rows of historical data, 48 hours
# reshape to 3D [n_samples, n_steps, n_features]
trainable_data_cols = df_data.columns[1:]
X_train, y_train = create_dataset(train[trainable_data_cols], train[1], sequential_entries)
X_test, y_test = create_dataset(test[trainable_data_cols], test[1], sequential_entries)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
# Random model for now just to try to get things working
model = keras.Sequential()
model.add(keras.layers.LSTM(
units=64, input_shape=(X_train.shape[1], X_train.shape[2])
))
model.add(keras.layers.Dropout(rate=0.2))
model.add(keras.layers.RepeatVector(n=X_train.shape[1]))
model.add(keras.layers.LSTM(units=64, return_sequences=True))
model.add(keras.layers.Dropout(rate=0.2))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(units=X_train.shape[2])
)
)
model.compile(loss='mae', optimizer='adam')
history = model.fit(
X_train, y_train,
epochs=10,
batch_size=32,
validation_split=0.1,
shuffle=False
)
# history for loss
plt.figure(figsize=(10, 5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
from _typeshed import Incomplete
def random_reference(
G, niter: int = 1, connectivity: bool = True, seed: Incomplete | None = None
): ...
def lattice_reference(
G,
niter: int = 5,
D: Incomplete | None = None,
connectivity: bool = True,
seed: Incomplete | None = None,
): ...
def sigma(G, niter: int = 100, nrand: int = 10, seed: Incomplete | None = None): ...
def omega(G, niter: int = 5, nrand: int = 10, seed: Incomplete | None = None): ...
|
import datetime
print("Introduction to CI")
now=datetime.datetime.now()
print("Date& Time: ")
print (now.strftime("%Y/%m/%d - %H:%M:%S"))
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the DBCore database abstraction.
"""
import os
import shutil
import sqlite3
import unittest
from test import _common
from beets import dbcore
from tempfile import mkstemp
# Fixture: concrete database and model classes. For migration tests, we
# have multiple models with different numbers of fields.
class SortFixture(dbcore.query.FieldSort):
pass
class QueryFixture(dbcore.query.NamedQuery):
def __init__(self, pattern):
self.pattern = pattern
def clause(self):
return None, ()
def match(self):
return True
class ModelFixture1(dbcore.Model):
_table = 'test'
_flex_table = 'testflex'
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.STRING,
}
_types = {
'some_float_field': dbcore.types.FLOAT,
}
_sorts = {
'some_sort': SortFixture,
}
_queries = {
'some_query': QueryFixture,
}
@classmethod
def _getters(cls):
return {}
def _template_funcs(self):
return {}
class DatabaseFixture1(dbcore.Database):
_models = (ModelFixture1,)
pass
class ModelFixture2(ModelFixture1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
}
class DatabaseFixture2(dbcore.Database):
_models = (ModelFixture2,)
pass
class ModelFixture3(ModelFixture1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
'field_three': dbcore.types.INTEGER,
}
class DatabaseFixture3(dbcore.Database):
_models = (ModelFixture3,)
pass
class ModelFixture4(ModelFixture1):
_fields = {
'id': dbcore.types.PRIMARY_ID,
'field_one': dbcore.types.INTEGER,
'field_two': dbcore.types.INTEGER,
'field_three': dbcore.types.INTEGER,
'field_four': dbcore.types.INTEGER,
}
class DatabaseFixture4(dbcore.Database):
_models = (ModelFixture4,)
pass
class AnotherModelFixture(ModelFixture1):
_table = 'another'
_flex_table = 'anotherflex'
_fields = {
'id': dbcore.types.PRIMARY_ID,
'foo': dbcore.types.INTEGER,
}
class ModelFixture5(ModelFixture1):
_fields = {
'some_string_field': dbcore.types.STRING,
'some_float_field': dbcore.types.FLOAT,
'some_boolean_field': dbcore.types.BOOLEAN,
}
class DatabaseFixture5(dbcore.Database):
_models = (ModelFixture5,)
pass
class DatabaseFixtureTwoModels(dbcore.Database):
_models = (ModelFixture2, AnotherModelFixture)
pass
class ModelFixtureWithGetters(dbcore.Model):
@classmethod
def _getters(cls):
return {'aComputedField': (lambda s: 'thing')}
def _template_funcs(self):
return {}
@_common.slow_test()
class MigrationTest(unittest.TestCase):
"""Tests the ability to change the database schema between
versions.
"""
@classmethod
def setUpClass(cls):
handle, cls.orig_libfile = mkstemp('orig_db')
os.close(handle)
# Set up a database with the two-field schema.
old_lib = DatabaseFixture2(cls.orig_libfile)
# Add an item to the old library.
old_lib._connection().execute(
'insert into test (field_one, field_two) values (4, 2)'
)
old_lib._connection().commit()
old_lib._connection().close()
del old_lib
@classmethod
def tearDownClass(cls):
os.remove(cls.orig_libfile)
def setUp(self):
handle, self.libfile = mkstemp('db')
os.close(handle)
shutil.copyfile(self.orig_libfile, self.libfile)
def tearDown(self):
os.remove(self.libfile)
def test_open_with_same_fields_leaves_untouched(self):
new_lib = DatabaseFixture2(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
c.connection.close()
self.assertEqual(len(row.keys()), len(ModelFixture2._fields))
def test_open_with_new_field_adds_column(self):
new_lib = DatabaseFixture3(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
c.connection.close()
self.assertEqual(len(row.keys()), len(ModelFixture3._fields))
def test_open_with_fewer_fields_leaves_untouched(self):
new_lib = DatabaseFixture1(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
c.connection.close()
self.assertEqual(len(row.keys()), len(ModelFixture2._fields))
def test_open_with_multiple_new_fields(self):
new_lib = DatabaseFixture4(self.libfile)
c = new_lib._connection().cursor()
c.execute("select * from test")
row = c.fetchone()
c.connection.close()
self.assertEqual(len(row.keys()), len(ModelFixture4._fields))
def test_extra_model_adds_table(self):
new_lib = DatabaseFixtureTwoModels(self.libfile)
try:
c = new_lib._connection()
c.execute("select * from another")
c.close()
except sqlite3.OperationalError:
self.fail("select failed")
class TransactionTest(unittest.TestCase):
def setUp(self):
self.db = DatabaseFixture1(':memory:')
def tearDown(self):
self.db._connection().close()
def test_mutate_increase_revision(self):
old_rev = self.db.revision
with self.db.transaction() as tx:
tx.mutate(
'INSERT INTO {} '
'(field_one) '
'VALUES (?);'.format(ModelFixture1._table),
(111,),
)
self.assertGreater(self.db.revision, old_rev)
def test_query_no_increase_revision(self):
old_rev = self.db.revision
with self.db.transaction() as tx:
tx.query('PRAGMA table_info(%s)' % ModelFixture1._table)
self.assertEqual(self.db.revision, old_rev)
class ModelTest(unittest.TestCase):
def setUp(self):
self.db = DatabaseFixture1(':memory:')
def tearDown(self):
self.db._connection().close()
def test_add_model(self):
model = ModelFixture1()
model.add(self.db)
rows = self.db._connection().execute('select * from test').fetchall()
self.assertEqual(len(rows), 1)
def test_store_fixed_field(self):
model = ModelFixture1()
model.add(self.db)
model.field_one = 123
model.store()
row = self.db._connection().execute('select * from test').fetchone()
self.assertEqual(row['field_one'], 123)
def test_revision(self):
old_rev = self.db.revision
model = ModelFixture1()
model.add(self.db)
model.store()
self.assertEqual(model._revision, self.db.revision)
self.assertGreater(self.db.revision, old_rev)
mid_rev = self.db.revision
model2 = ModelFixture1()
model2.add(self.db)
model2.store()
self.assertGreater(model2._revision, mid_rev)
self.assertGreater(self.db.revision, model._revision)
# revision changed, so the model should be re-loaded
model.load()
self.assertEqual(model._revision, self.db.revision)
# revision did not change, so no reload
mod2_old_rev = model2._revision
model2.load()
self.assertEqual(model2._revision, mod2_old_rev)
def test_retrieve_by_id(self):
model = ModelFixture1()
model.add(self.db)
other_model = self.db._get(ModelFixture1, model.id)
self.assertEqual(model.id, other_model.id)
def test_store_and_retrieve_flexattr(self):
model = ModelFixture1()
model.add(self.db)
model.foo = 'bar'
model.store()
other_model = self.db._get(ModelFixture1, model.id)
self.assertEqual(other_model.foo, 'bar')
def test_delete_flexattr(self):
model = ModelFixture1()
model['foo'] = 'bar'
self.assertTrue('foo' in model)
del model['foo']
self.assertFalse('foo' in model)
def test_delete_flexattr_via_dot(self):
model = ModelFixture1()
model['foo'] = 'bar'
self.assertTrue('foo' in model)
del model.foo
self.assertFalse('foo' in model)
def test_delete_flexattr_persists(self):
model = ModelFixture1()
model.add(self.db)
model.foo = 'bar'
model.store()
model = self.db._get(ModelFixture1, model.id)
del model['foo']
model.store()
model = self.db._get(ModelFixture1, model.id)
self.assertFalse('foo' in model)
def test_delete_non_existent_attribute(self):
model = ModelFixture1()
with self.assertRaises(KeyError):
del model['foo']
def test_delete_fixed_attribute(self):
model = ModelFixture5()
model.some_string_field = 'foo'
model.some_float_field = 1.23
model.some_boolean_field = True
for field, type_ in model._fields.items():
self.assertNotEqual(model[field], type_.null)
for field, type_ in model._fields.items():
del model[field]
self.assertEqual(model[field], type_.null)
def test_null_value_normalization_by_type(self):
model = ModelFixture1()
model.field_one = None
self.assertEqual(model.field_one, 0)
def test_null_value_stays_none_for_untyped_field(self):
model = ModelFixture1()
model.foo = None
self.assertEqual(model.foo, None)
def test_normalization_for_typed_flex_fields(self):
model = ModelFixture1()
model.some_float_field = None
self.assertEqual(model.some_float_field, 0.0)
def test_load_deleted_flex_field(self):
model1 = ModelFixture1()
model1['flex_field'] = True
model1.add(self.db)
model2 = self.db._get(ModelFixture1, model1.id)
self.assertIn('flex_field', model2)
del model1['flex_field']
model1.store()
model2.load()
self.assertNotIn('flex_field', model2)
def test_check_db_fails(self):
with self.assertRaisesRegex(ValueError, 'no database'):
dbcore.Model()._check_db()
with self.assertRaisesRegex(ValueError, 'no id'):
ModelFixture1(self.db)._check_db()
dbcore.Model(self.db)._check_db(need_id=False)
def test_missing_field(self):
with self.assertRaises(AttributeError):
ModelFixture1(self.db).nonExistingKey
def test_computed_field(self):
model = ModelFixtureWithGetters()
self.assertEqual(model.aComputedField, 'thing')
with self.assertRaisesRegex(KeyError, 'computed field .+ deleted'):
del model.aComputedField
def test_items(self):
model = ModelFixture1(self.db)
model.id = 5
self.assertEqual({('id', 5), ('field_one', 0), ('field_two', '')},
set(model.items()))
def test_delete_internal_field(self):
model = dbcore.Model()
del model._db
with self.assertRaises(AttributeError):
model._db
def test_parse_nonstring(self):
with self.assertRaisesRegex(TypeError, "must be a string"):
dbcore.Model._parse(None, 42)
class FormatTest(unittest.TestCase):
def test_format_fixed_field_integer(self):
model = ModelFixture1()
model.field_one = 155
value = model.formatted().get('field_one')
self.assertEqual(value, '155')
def test_format_fixed_field_integer_normalized(self):
"""The normalize method of the Integer class rounds floats
"""
model = ModelFixture1()
model.field_one = 142.432
value = model.formatted().get('field_one')
self.assertEqual(value, '142')
model.field_one = 142.863
value = model.formatted().get('field_one')
self.assertEqual(value, '143')
def test_format_fixed_field_string(self):
model = ModelFixture1()
model.field_two = 'caf\xe9'
value = model.formatted().get('field_two')
self.assertEqual(value, 'caf\xe9')
def test_format_flex_field(self):
model = ModelFixture1()
model.other_field = 'caf\xe9'
value = model.formatted().get('other_field')
self.assertEqual(value, 'caf\xe9')
def test_format_flex_field_bytes(self):
model = ModelFixture1()
model.other_field = 'caf\xe9'.encode()
value = model.formatted().get('other_field')
self.assertTrue(isinstance(value, str))
self.assertEqual(value, 'caf\xe9')
def test_format_unset_field(self):
model = ModelFixture1()
value = model.formatted().get('other_field')
self.assertEqual(value, '')
def test_format_typed_flex_field(self):
model = ModelFixture1()
model.some_float_field = 3.14159265358979
value = model.formatted().get('some_float_field')
self.assertEqual(value, '3.1')
class FormattedMappingTest(unittest.TestCase):
def test_keys_equal_model_keys(self):
model = ModelFixture1()
formatted = model.formatted()
self.assertEqual(set(model.keys(True)), set(formatted.keys()))
def test_get_unset_field(self):
model = ModelFixture1()
formatted = model.formatted()
with self.assertRaises(KeyError):
formatted['other_field']
def test_get_method_with_default(self):
model = ModelFixture1()
formatted = model.formatted()
self.assertEqual(formatted.get('other_field'), '')
def test_get_method_with_specified_default(self):
model = ModelFixture1()
formatted = model.formatted()
self.assertEqual(formatted.get('other_field', 'default'), 'default')
class ParseTest(unittest.TestCase):
def test_parse_fixed_field(self):
value = ModelFixture1._parse('field_one', '2')
self.assertIsInstance(value, int)
self.assertEqual(value, 2)
def test_parse_flex_field(self):
value = ModelFixture1._parse('some_float_field', '2')
self.assertIsInstance(value, float)
self.assertEqual(value, 2.0)
def test_parse_untyped_field(self):
value = ModelFixture1._parse('field_nine', '2')
self.assertEqual(value, '2')
class QueryParseTest(unittest.TestCase):
def pqp(self, part):
return dbcore.queryparse.parse_query_part(
part,
{'year': dbcore.query.NumericQuery},
{':': dbcore.query.RegexpQuery},
)[:-1] # remove the negate flag
def test_one_basic_term(self):
q = 'test'
r = (None, 'test', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_one_keyed_term(self):
q = 'test:val'
r = ('test', 'val', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_colon_at_end(self):
q = 'test:'
r = ('test', '', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_one_basic_regexp(self):
q = r':regexp'
r = (None, 'regexp', dbcore.query.RegexpQuery)
self.assertEqual(self.pqp(q), r)
def test_keyed_regexp(self):
q = r'test::regexp'
r = ('test', 'regexp', dbcore.query.RegexpQuery)
self.assertEqual(self.pqp(q), r)
def test_escaped_colon(self):
q = r'test\:val'
r = (None, 'test:val', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
def test_escaped_colon_in_regexp(self):
q = r':test\:regexp'
r = (None, 'test:regexp', dbcore.query.RegexpQuery)
self.assertEqual(self.pqp(q), r)
def test_single_year(self):
q = 'year:1999'
r = ('year', '1999', dbcore.query.NumericQuery)
self.assertEqual(self.pqp(q), r)
def test_multiple_years(self):
q = 'year:1999..2010'
r = ('year', '1999..2010', dbcore.query.NumericQuery)
self.assertEqual(self.pqp(q), r)
def test_empty_query_part(self):
q = ''
r = (None, '', dbcore.query.SubstringQuery)
self.assertEqual(self.pqp(q), r)
class QueryFromStringsTest(unittest.TestCase):
def qfs(self, strings):
return dbcore.queryparse.query_from_strings(
dbcore.query.AndQuery,
ModelFixture1,
{':': dbcore.query.RegexpQuery},
strings,
)
def test_zero_parts(self):
q = self.qfs([])
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertEqual(len(q.subqueries), 1)
self.assertIsInstance(q.subqueries[0], dbcore.query.TrueQuery)
def test_two_parts(self):
q = self.qfs(['foo', 'bar:baz'])
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertEqual(len(q.subqueries), 2)
self.assertIsInstance(q.subqueries[0], dbcore.query.AnyFieldQuery)
self.assertIsInstance(q.subqueries[1], dbcore.query.SubstringQuery)
def test_parse_fixed_type_query(self):
q = self.qfs(['field_one:2..3'])
self.assertIsInstance(q.subqueries[0], dbcore.query.NumericQuery)
def test_parse_flex_type_query(self):
q = self.qfs(['some_float_field:2..3'])
self.assertIsInstance(q.subqueries[0], dbcore.query.NumericQuery)
def test_empty_query_part(self):
q = self.qfs([''])
self.assertIsInstance(q.subqueries[0], dbcore.query.TrueQuery)
def test_parse_named_query(self):
q = self.qfs(['some_query:foo'])
self.assertIsInstance(q.subqueries[0], QueryFixture)
class SortFromStringsTest(unittest.TestCase):
def sfs(self, strings):
return dbcore.queryparse.sort_from_strings(
ModelFixture1,
strings,
)
def test_zero_parts(self):
s = self.sfs([])
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(s, dbcore.query.NullSort())
def test_one_parts(self):
s = self.sfs(['field+'])
self.assertIsInstance(s, dbcore.query.Sort)
def test_two_parts(self):
s = self.sfs(['field+', 'another_field-'])
self.assertIsInstance(s, dbcore.query.MultipleSort)
self.assertEqual(len(s.sorts), 2)
def test_fixed_field_sort(self):
s = self.sfs(['field_one+'])
self.assertIsInstance(s, dbcore.query.FixedFieldSort)
self.assertEqual(s, dbcore.query.FixedFieldSort('field_one'))
def test_flex_field_sort(self):
s = self.sfs(['flex_field+'])
self.assertIsInstance(s, dbcore.query.SlowFieldSort)
self.assertEqual(s, dbcore.query.SlowFieldSort('flex_field'))
def test_special_sort(self):
s = self.sfs(['some_sort+'])
self.assertIsInstance(s, SortFixture)
class ParseSortedQueryTest(unittest.TestCase):
def psq(self, parts):
return dbcore.parse_sorted_query(
ModelFixture1,
parts.split(),
)
def test_and_query(self):
q, s = self.psq('foo bar')
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 2)
def test_or_query(self):
q, s = self.psq('foo , bar')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 2)
def test_no_space_before_comma_or_query(self):
q, s = self.psq('foo, bar')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 2)
def test_no_spaces_or_query(self):
q, s = self.psq('foo,bar')
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 1)
def test_trailing_comma_or_query(self):
q, s = self.psq('foo , bar ,')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 3)
def test_leading_comma_or_query(self):
q, s = self.psq(', foo , bar')
self.assertIsInstance(q, dbcore.query.OrQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 3)
def test_only_direction(self):
q, s = self.psq('-')
self.assertIsInstance(q, dbcore.query.AndQuery)
self.assertIsInstance(s, dbcore.query.NullSort)
self.assertEqual(len(q.subqueries), 1)
class ResultsIteratorTest(unittest.TestCase):
def setUp(self):
self.db = DatabaseFixture1(':memory:')
model = ModelFixture1()
model['foo'] = 'baz'
model.add(self.db)
model = ModelFixture1()
model['foo'] = 'bar'
model.add(self.db)
def tearDown(self):
self.db._connection().close()
def test_iterate_once(self):
objs = self.db._fetch(ModelFixture1)
self.assertEqual(len(list(objs)), 2)
def test_iterate_twice(self):
objs = self.db._fetch(ModelFixture1)
list(objs)
self.assertEqual(len(list(objs)), 2)
def test_concurrent_iterators(self):
results = self.db._fetch(ModelFixture1)
it1 = iter(results)
it2 = iter(results)
next(it1)
list(it2)
self.assertEqual(len(list(it1)), 1)
def test_slow_query(self):
q = dbcore.query.SubstringQuery('foo', 'ba', False)
objs = self.db._fetch(ModelFixture1, q)
self.assertEqual(len(list(objs)), 2)
def test_slow_query_negative(self):
q = dbcore.query.SubstringQuery('foo', 'qux', False)
objs = self.db._fetch(ModelFixture1, q)
self.assertEqual(len(list(objs)), 0)
def test_iterate_slow_sort(self):
s = dbcore.query.SlowFieldSort('foo')
res = self.db._fetch(ModelFixture1, sort=s)
objs = list(res)
self.assertEqual(objs[0].foo, 'bar')
self.assertEqual(objs[1].foo, 'baz')
def test_unsorted_subscript(self):
objs = self.db._fetch(ModelFixture1)
self.assertEqual(objs[0].foo, 'baz')
self.assertEqual(objs[1].foo, 'bar')
def test_slow_sort_subscript(self):
s = dbcore.query.SlowFieldSort('foo')
objs = self.db._fetch(ModelFixture1, sort=s)
self.assertEqual(objs[0].foo, 'bar')
self.assertEqual(objs[1].foo, 'baz')
def test_length(self):
objs = self.db._fetch(ModelFixture1)
self.assertEqual(len(objs), 2)
def test_out_of_range(self):
objs = self.db._fetch(ModelFixture1)
with self.assertRaises(IndexError):
objs[100]
def test_no_results(self):
self.assertIsNone(self.db._fetch(
ModelFixture1, dbcore.query.FalseQuery()).get())
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
import numpy as np
MAX = 26
def compare(arr1, arr2):
z = np.subtract(arr1,arr2)
# z = list(z)
if z.count(0) != 26:
return False
return True
class Solution(object):
def findAnagrams(self, s, p):
M = len(p)
N = len(s)
if N<M:
return
countP = [0]*MAX
countTW = [0]*MAX
res = []
for i in range(M):
(countP[ord(p[i])-97 ]) += 1
(countTW[ord(s[i])-97 ]) += 1
for i in range(M,N):
if compare(countP, countTW):
res.append(i-M)
(countTW[ ord(s[i])-97 ]) += 1
(countTW[ ord(s[i-M])-97 ]) -= 1
if compare(countP, countTW):
res.append(N-M)
return res |
#!/usr/bin/python
def divide(n):
curNum = 10
history = [10]
while curNum > 0:
temp = curNum // n
curNum -= n * temp
curNum *= 10
if curNum in history:
return len(history)
history.append(curNum)
return 0
index = 0
maxNum = 0
for i in range(1, 1000):
num = divide(i)
if num > maxNum:
maxNum = num
index = i
print(index)
|
import json
from django.core import serializers
from django.http import HttpResponse
from django.http import JsonResponse
from django.shortcuts import render
from django.urls import reverse_lazy as r
from django.views.generic import ListView, DetailView
from django.views.generic import UpdateView, DeleteView
from .mixins import NameSearchMixin
from .models import Person, Phone
from .forms import PersonForm
def home(request):
return render(request, 'index.html')
class PersonList(NameSearchMixin, ListView):
model = Person
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(PersonList, self).get_context_data(**kwargs)
context['form'] = PersonForm()
return context
person_detail = DetailView.as_view(model=Person)
def person_create(request):
if request.method == 'POST':
if request.is_ajax():
form = PersonForm(request.POST)
if form.is_valid():
form.save()
return HttpResponse('OK')
else:
return HttpResponse(status=400)
else:
return render(request, 'core/person_form.html', {'form': PersonForm(request.POST)})
return render(request, 'core/person_form.html', {'form': PersonForm()})
person_update = UpdateView.as_view(model=Person, form_class=PersonForm)
person_delete = DeleteView.as_view(
model=Person,
success_url=r('core:person_list')
)
def person_phones(request, pk):
phones = Phone.objects.filter(person=pk)
data = serializers.serialize('json', phones)
return HttpResponse(data, content_type='application/json')
def person_phone_create(request):
if request.method == 'POST':
person_pk = request.POST['person']
person = Person.objects.get(pk=person_pk)
phone = request.POST['phone']
Phone.objects.create(person=person, phone=phone)
data = phone
return HttpResponse(data, content_type='application/json')
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
train = pd.read_csv("../input/train.csv", index_col=None)
# train.head()
test = pd.read_csv("../input/test.csv", index_col=None)
# test.head()
target = train.TARGET
print (target.describe())
plt.hist(target)
plt.ylabel("freq")
plt.xlabel("target value")
plt.show()
uncomList = list(set(train.columns) ^ set(test.columns))
print (uncomList)
train.drop("TARGET", axis=1, inplace=True)
combi = pd.concat([train, test], axis=0)
print ("train.shape[0] + test.shape[0]:", train.shape[0]+test.shape[0])
print ("combi.shape:", combi.shape)
floatList = []
intList = []
objectList = []
for t in combi.columns:
if combi[t].dtypes==np.float64 or combi[t].dtypes==np.float32:
floatList.append(t)
elif combi[t].dtypes==np.int64 or combi[t].dtypes==np.int32:
intList.append(t)
else:
objectList.append(t)
print ("The number of float columns:", len(floatList))
print ("The number of int columns:", len(intList))
print ("The number of non-numeric columns:", len(objectList))
combiNan = np.sum(combi.isnull())
combiNanCounter = 0
combiNanCol = []
for n in range(len(combiNan)):
if combiNan[n] > 0:
print (combiNan.index[n])
combiNanCol.append(n)
combiNanCounter += 1
print ("Checked columns:", combiNanCounter)
print ("Columns with Nan:", len(combiNanCol))
uniq10 = []
uniq100 = []
uniqMany = []
for u in combi.columns:
if combi[u].nunique() <= 10:
uniq10.append(u)
elif combi[u].nunique() > 10 & combi[u].nunique() <= 100:
uniq100.append(u)
else:
uniqMany.append(u)
print ("The number of columns with <= 10 unique values:", len(uniq10))
print ("The number of columns with 10<x<=100 unique values", len(uniq100))
print ("The number of columns with >100 unique values:", len(uniqMany))
# Check only for the colums with float values to avoid categoricals to be incorporated
combiFloat = combi[floatList]
# <Removed from ver3>
# May not need this feature scaling (0-1) since probably matplotlib could deal with it
# But basically pearson correlation coefficient is sensitive to scale so this is just to make sure
# from sklearn.preprocessing import MinMaxScaler
# combiFloat = combiFloat.apply(lambda x: MinMaxScaler().fit_transform(x))
# get correlation coefficient as a matrix
corrFloat = combiFloat.corr()
print ("The shape of correlation coefficient matrix:", corrFloat.shape)
# Below code URL: https://stanford.edu/~mwaskom/software/seaborn/examples/many_pairwise_correlations.html
# Generate a mask for the upper triangle
mask = np.zeros_like(corrFloat, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(20, 17))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corrFloat, mask=mask, cmap=cmap, vmin=-1, vmax=1,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .8}, ax=ax)
# threshold is arbitrary, but in this example threshold = +/-0.8 (Pearson's correlation coefficient)
# check the number of unique combinations of 2 variables
import itertools
pairs = list(itertools.combinations(corrFloat.columns, 2))
print ("Variables pairs:", len(pairs))
hiCor = []
hiCorCounter = 0
for i in range(corrFloat.shape[1]):
for j in range(0, i):
if corrFloat[corrFloat.columns[i]][j] > 0.8 or corrFloat[corrFloat.columns[i]][j] < -0.8:
hiCor.append(corrFloat.index[j])
hiCorCounter += 1
# get unique values from the list
hiCor = list(set(hiCor))
print ("Checked pairs:", hiCorCounter)
print ("Columns to be removed due to high correlation:", len(hiCor))
combi.drop(hiCor, axis=1, inplace=True)
print ("New combi shape:", combi.shape)
train = combi[:train.shape[0]]
train["TARGET"] = target
test = combi[train.shape[0]:]
print ("new train shape:", train.shape)
print ("new test shape:", test.shape)
train.to_csv("../input/train_03_04.csv", index=False)
test.to_csv("../input/test_03_04.csv", index=False)
train_new = pd.read_csv("../input/train_03_04.csv", index_col=None)
# train.head()
test_new = pd.read_csv("../input/test_03_04.csv", index_col=None)
# test.head()
print ("new train shape:", train_new.shape)
print ("new test shape:", test_new.shape)
|
'''
本程序的目的是定义
——图像的布局
——并且指定非数据层面的信息
1. 非数据层面的信息指的是线型、颜色等
2. 整个ax的信息需在外部定义
'''
import matplotlib.pyplot as plt
import numpy as np
import my_ax
class Fig1:
color_list = ['#FF0000',
'#FF7D00',
'#FFFF00',
'#00FF00',
'#00FFFF',
'#0000FF',
'#FF00FF']
def __init__(self):
fig, ax = plt.subplots()
self.fig = fig
self.ax = my_ax.Axis(ax)
def fig_ax1(self, data):
n = len(data)
for i in range(n):
parameters = {'x': data[i][0],
'y': data[i][1],
'linestyle': '',
'color': self.color_list[i],
'linewidth': '',
'marker': '',
'markercolor': '',
'markersize': '',
'label': ''
}
self.ax.object1(parameters)
def fig_ax1_pre(self, parameters):
self.ax.ax_pre(parameters)
def show(self):
plt.show()
class Fig2:
def __init__(self):
fig, axs = plt.subplots(nrows=2, ncols=2)
self.fig = fig
self.ax00 = my_ax.Axis(axs[0, 0])
self.ax01 = my_ax.Axis(axs[0, 1])
self.ax10 = my_ax.Axis(axs[1, 0])
self.ax11 = my_ax.Axis(axs[1, 1])
def fig_ax00(self, data):
parameters = {'x': data[0],
'y': data[1],
'linestyle': '',
'color': '',
'linewidth': '',
'marker': '',
'markercolor': '',
'markersize': '',
'label': ''
}
self.ax00.object1(parameters)
def fig_ax00_pre(self, parameters):
self.ax00.ax_pre(parameters)
def fig_ax01(self, data):
parameters = {'x': data[0],
'y': data[1],
'linestyle': '',
'color': '',
'linewidth': '',
'marker': '',
'markercolor': '',
'markersize': '',
'label': ''
}
self.ax01.object1(parameters)
def fig_ax01_pre(self, parameters):
self.ax01.ax_pre(parameters)
def fig_ax10(self, data):
parameters = {'x': data[0],
'y': data[1],
'linestyle': '',
'color': '',
'linewidth': '',
'marker': '',
'markercolor': '',
'markersize': '',
'label': ''
}
self.ax10.object1(parameters)
def fig_ax10_pre(self, parameters):
self.ax10.ax_pre(parameters)
def fig_ax11(self, data):
parameters = {'x': data[0],
'y': data[1],
'linestyle': '',
'color': '',
'linewidth': '',
'marker': '',
'markercolor': '',
'markersize': '',
'label': ''
}
self.ax11.object1(parameters)
def fig_ax11_pre(self, parameters):
self.ax11.ax_pre(parameters)
def show(self):
plt.show()
|
def palindrome(string):
dict = {}
string = string.lower()
for char in string:
if char == " ":
pass
elif char not in dict:
dict[char] = 1
else:
dict[char] += 1
values = dict.values()
middle = False
for value in values:
if value % 2 == 1:
if not middle:
middle = True
else:
return False
return True
print(palindrome("racerac"))
|
int1 = 1
float2 = 1.0
float3 = 1.1
int4 = -1
print(int1,type(int1))
print(float2,type(float2))
print(float3,type(float3))
print(int4,type(int4))
print("0.4+0.6=",0.4 + 0.6)
print("0.4+0.55=",0.4 + 0.55);'浮点数计算不精确'
|
import re
from random import uniform
import amath.Computation.relationship as _gcd
import amath.Computation.trig as _trig
import amath.constants as const
from amath.Computation.Basic import sqrt
from amath.Computation.num_properties import factors
from amath.Computation.rounding import round
from amath.algebra.Function import Function
_superscript_map = {
"0": "⁰", "1": "¹", "2": "²", "3": "³", "4": "⁴", "5": "⁵", "6": "⁶",
"7": "⁷", "8": "⁸", "9": "⁹"}
_trans = str.maketrans(
''.join(_superscript_map.keys()),
''.join(_superscript_map.values()))
def _format(coe):
coe = coe[::-1]
string = ""
if coe == [0]:
return "0"
for i in range(len(coe)):
exp = str(len(coe) - i - 1).translate(_trans)
if exp == _superscript_map['1']:
exp = ''
if coe[i] == 0:
# if i == len(coe) - 1:
# string += " "
continue
string += f"{coe[i]}x{exp} + "
return string[:-5]
def _duplicates(seq):
dupes = [i for n, i in enumerate(seq) if i not in seq[:n]]
return [(x, seq.count(x)) for x in dupes]
class Polynomial(Function):
def __init__(self, coe):
"""Initializes Polynomial with list of coefficients
List of coef goes from largest to smallest degree
3x³ + 2x² - 5x - 3 -> [3, 2, -5, -3]
:param coe: list of Coefficients of polynomial
:type coe: list
"""
self.coe = coe[::-1] # Stored from smallest to largest degree
self.normalize()
if self.coe == [0]:
self.degree = -1
else:
self.degree = len(self.coe) - 1
def __repr__(self):
return _format(self.coe)
def __call__(self, x):
return sum(self.coe[i] * pow(x, i) for i in range(self.degree + 1))
def __eq__(self, other):
if isinstance(other, Polynomial):
if self.coe == other.coe:
return True
return False
def normalize(self):
"""Normalizes Polynomial to remove extra degrees with 0 as coefficient"""
while self.coe and self.coe[-1] == 0:
self.coe.pop()
if not self.coe:
self.coe.append(0)
def find_roots(self):
"""Finds all roots of Polynomial
Uses Aberth Method for polynomials of degree 4 or above. Otherwise uses formulas for polynomials of
degree 1, 2, or 3.
:return: list of all roots of polynomial
:rtype: list
Uses Formulas for low degrees
>>> Polynomial([1, 3, -4]).find_roots()
[-4.0, 1.0]
>>> Polynomial([2, -9, 4, 15]).find_roots()
[3.0, -1.0, 2.5]
Uses Aberth's method for higher degrees
>>> Polynomial([1, -8, 17, 14, -84, 72]).find_roots()
[3.0, 2.0, 2.0, -2.0, 3.0]
"""
if self.degree == 0:
return []
if self.degree == 1:
return [-self.coe[0] / self.coe[1]]
if self.degree == 2:
a = self.coe[2]
b = self.coe[1]
c = self.coe[0]
return [(0.5 * (-b - sqrt(b ** 2 - 4. * a * c))) / a, (0.5 * (-b + sqrt(b ** 2 - 4. * a * c))) / a]
if self.degree == 3:
a = self.coe[3]
b = self.coe[2]
c = self.coe[1]
d = self.coe[0]
r1 = -b / (3. * a) - (2 ** (1 / 3) * (-b ** 2 + 3 * a * c)) / (3. * a * (
-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d + sqrt(4 * (-b ** 2 + 3 * a * c) ** 3 + (
-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d) ** 2)) ** (1 / 3)) + (
-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d + sqrt(
4 * (-b ** 2 + 3 * a * c) ** 3 + (
-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d) ** 2)) ** (1 / 3) / (
3. * 2 ** (1 / 3) * a)
r2 = -b / (3. * a) + ((1 + 1j * sqrt(3)) * (-b ** 2 + 3 * a * c)) / (
3. * 2 ** (2 / 3) * a * (-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d + sqrt(
4 * (-b ** 2 + 3 * a * c) ** 3 + (
-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d) ** 2)) ** (1 / 3)) - (
(1 - 1j * sqrt(3)) * (-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d + sqrt(
4 * (-b ** 2 + 3 * a * c) ** 3 + (
-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d) ** 2)) ** (1 / 3)) / (
6. * 2 ** (1 / 3) * a)
r3 = -b / (3. * a) + ((1 - 1j * sqrt(3)) * (-b ** 2 + 3 * a * c)) / (
3. * 2 ** (2 / 3) * a * (-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d + sqrt(
4 * (-b ** 2 + 3 * a * c) ** 3 + (
-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d) ** 2)) ** (1 / 3)) - (
(1 + 1j * sqrt(3)) * (-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d + sqrt(
4 * (-b ** 2 + 3 * a * c) ** 3 + (
-2 * b ** 3 + 9 * a * b * c - 27 * a ** 2 * d) ** 2)) ** (1 / 3)) / (
6. * 2 ** (1 / 3) * a)
r1, r2, r3 = complex(round(r1.real, 12), round(r1.imag, 12)), \
complex(round(r2.real, 12), round(r2.imag, 12)), \
complex(round(r3.real, 12), round(r3.imag, 12))
return [r1 if abs(r1.imag) > 0 else r1.real, r2 if abs(r2.imag) > 0 else r2.real,
r3 if abs(r3.imag) > 0 else r3.real]
roots = []
lower, upper = self.get_root_bounds()
for i in range(self.degree):
radius = uniform(lower, upper)
angle = uniform(0, const.pi * 2)
roots.append(complex(radius * _trig.cos(angle), radius * _trig.sin(angle)))
derivative = self.derivative()
while True:
valid = 0
for k, r in enumerate(roots):
ratio = self(r) / derivative(r)
offset = ratio / (1 - (ratio * sum(1 / (r - x) for j, x in enumerate(roots) if j != k)))
if offset.real == 0 and offset.imag == 0:
valid += 1
roots[k] -= offset
if valid == len(roots):
break
return [complex(round(r.real, 6), round(r.imag, 6)) if round(r.imag, 6) != 0 else round(r.real, 6) for r in
roots]
def find_root(self, *, guess=None):
"""Finds singular root of function
If no initial guess given, uses Ratinal Roots Theorem to attempt to find a root, otherwise uses
Newton's method initialized with random value within root range.
If guess given, uses Netwon's method.
:param guess: Initial guess for Netwon's method
:type guess: float
:return: A root of the polynomial
:rtype: float
"""
if self.num_roots() > 0:
if guess is None:
a0 = factors(abs(self.coe[0]))
an = factors(abs(self.coe[-1]))
for f in a0:
for f2 in an:
if self(f / f2) == 0:
return f / f2
if self(-f / f2) == 0:
return -f / f2
return super(Polynomial, self).find_root(guess=guess)
return None
def get_root_bounds(self):
"""Finds the bounds at which the roots can be in
:return: tuple of negative bound and positive bound
:rtype: tuple
"""
u = 1 + max(abs(self.coe[self.degree - i] / self.coe[-1]) for i in range(1, self.degree + 1))
return -u, u
def derivative(self, value=None):
"""Finds the derivative of the Polynomial
Given value, it will return an exact value of the derivative at that point
:param value: x-value
:return: Polynomial or slope at x-value
>>> Polynomial([1, 3, 3]).derivative()
2x + 3
>>> Polynomial([4, 3, 2, 1, 4, 3]).derivative()
20x⁴ + 12x³ + 6x² + 2x + 4
>>> Polynomial([1, 4, 5]).derivative(5)
14
"""
final = []
for i in range(1, self.degree + 1):
final.append(self.coe[i] * i)
der = Polynomial(final[::-1])
if value is not None:
return der(value)
return der
def integrate(self, a=None, b=None):
"""Finds the integral of the Polynomial
Given a AND b, it will return a definite integral from a to b
:param a: lower bound
:param b: upper bound
:return: Integral or result of definite integral
>>> Polynomial([2, 3]).integrate()
1.0x² + 3.0x
>>> Polynomial([6, 2, 1]).integrate()
2.0x³ + 1.0x² + 1.0x
>>> Polynomial([5, 6]).integrate(0, 10)
310.0
"""
final = []
for i in range(self.degree + 1):
final.append(self.coe[i] / (i + 1))
final.insert(0, 0)
integ = Polynomial(final[::-1])
if a is not None:
return integ(b) - integ(a)
return integ
def isNumber(self):
"""Checks is Polynomial is just a number
:rtype: bool
:return: true or false
>>> Polynomial([2, 3]).isNumber()
False
>>> Polynomial([2]).isNumber()
True
"""
if len(self.coe) == 1:
return True
return False
@staticmethod
def _combine_list(l1: list, l2: list, f) -> list:
final = l1.copy() if len(l1) >= len(l2) else l2.copy()
other = l2.copy() if len(l1) >= len(l2) else l1.copy()
for i in range(len(final)):
try:
final[i] = f(final[i], other[i])
except IndexError:
pass
return final
def __add__(self, other):
"""Adds Polynomials
:param other: Polynomial or numeric value
:return: Polynomial
>>> Polynomial([2, 3]) + Polynomial([3, 4])
5x + 7
>>> Polynomial([2, 3, 5]) + Polynomial([2, 4])
2x² + 5x + 9
>>> Polynomial([4, 5]) + 7
4x + 12
"""
if isinstance(other, Polynomial):
return Polynomial(self._combine_list(self.coe, other.coe, lambda x, y: x + y)[::-1])
else:
return Polynomial(self._combine_list(self.coe, [other], lambda x, y: x + y)[::-1])
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, Polynomial):
return Polynomial(self._combine_list(self.coe, other.coe, lambda x, y: x - y)[::-1])
else:
return Polynomial(self._combine_list(self.coe, [other], lambda x, y: x - y)[::-1])
def __rsub__(self, other):
if isinstance(other, Polynomial):
return Polynomial(self._combine_list(other.coe, self.coe, lambda x, y: x - y)[::-1])
else:
return Polynomial(self._combine_list([other], self.coe, lambda x, y: x - y)[::-1])
def __mul__(self, other):
if isinstance(other, Polynomial):
final = []
for i in range(self.degree + 1):
iteration = [0] * (i + other.degree + 1)
for j in range(other.degree + 1):
iteration[i + j] = self.coe[i] * other.coe[j]
final = self._combine_list(final, iteration, lambda x, y: x + y)
return Polynomial(final[::-1])
else:
final = self.coe.copy()
for i in range(self.degree + 1):
final[i] *= other
return Polynomial(final[::-1])
__rmul__ = __mul__
def __truediv__(self, other):
if isinstance(other, Polynomial):
num = self.coe[:]
den = other.coe[:]
if len(num) >= len(den):
shiftlen = len(num) - len(den)
den = [0] * shiftlen + den
else:
return Polynomial([0])
q = []
divisor = float(den[-1])
for i in range(shiftlen + 1):
mult = num[-1] / divisor
q = [mult] + q
if mult != 0:
d = [mult * u for u in den]
num = [u - v for u, v in zip(num, d)]
num.pop()
den.pop(0)
return Polynomial(q[::-1])
else:
return self * (1 / other)
def __mod__(self, other):
if isinstance(other, Polynomial):
num = self.coe[:]
den = other.coe[:]
if len(num) >= len(den):
shiftlen = len(num) - len(den)
den = [0] * shiftlen + den
else:
return Polynomial(num[::-1])
q = []
divisor = float(den[-1])
for i in range(shiftlen + 1):
mult = num[-1] / divisor
q = [mult] + q
if mult != 0:
d = [mult * u for u in den]
num = [u - v for u, v in zip(num, d)]
num.pop()
den.pop(0)
return Polynomial(num[::-1])
else:
return self * (1 / other)
def __abs__(self):
final = [-x if x < 0 else x for x in self.coe]
return Polynomial(final[::-1])
def __pow__(self, power, modulo=None):
result = self
for i in range(power - 1):
result *= self
return result
def limit(self, direction=True):
if self.degree % 2 == 0:
if self.coe[-1] > 0:
return float("inf")
else:
return float("-inf")
else:
if self.coe[-1] > 0:
if direction:
return float("inf")
else:
return float("-inf")
else:
if direction:
return float("-inf")
else:
return float("inf")
def num_roots(self):
changes = 0
current = True if self.coe[-1] > 0 else False
for i in range(self.degree, -1, -1):
if (self.coe[i] > 0) != current:
current = True if self.coe[i] > 0 else False
changes += 1
coe = self.coe[:]
for i in range(self.degree + 1):
if i % 2 == 1:
coe[i] = -coe[i]
current = True if coe[-1] > 0 else False
for i in range(self.degree, -1, -1):
if (coe[i] > 0) != current:
current = True if coe[i] > 0 else False
changes += 1
return changes
def absmax(self):
if self.limit(True) == float("inf") or self.limit(False) == float("inf"):
return float("inf")
fprime = self.derivative()
critical_points = [x for x in fprime.find_roots() if not isinstance(x, complex)]
curmax = [0, float("-inf")]
for cp in critical_points:
if self(cp) > curmax[1]:
curmax = [cp, self(cp)]
return curmax[0], curmax[1]
def absmin(self):
if self.limit(True) == float("-inf") or self.limit(False) == float("-inf"):
return float("-inf")
fprime = self.derivative()
critical_points = [x for x in fprime.find_roots() if not isinstance(x, complex)]
curmin = [0, float("inf")]
for cp in critical_points:
if self(cp) < curmin[1]:
curmin = [cp, self(cp)]
return curmin[0], curmin[1]
def max(self, a, b):
fprime = self.derivative()
critical_points = [x for x in fprime.find_roots() if not isinstance(x, complex) and a < x < b]
critical_points += [a, b]
curmax = [0, float("-inf")]
for cp in critical_points:
if self(cp) > curmax[1]:
curmax = [cp, self(cp)]
return curmax[0], curmax[1]
def min(self, a, b):
fprime = self.derivative()
critical_points = [x for x in fprime.find_roots() if not isinstance(x, complex) and a < x < b]
critical_points += [a, b]
curmin = [0, float("inf")]
for cp in critical_points:
if self(cp) < curmin[1]:
curmin = [cp, self(cp)]
return curmin[0], curmin[1]
def factor(self):
p = Polynomial(self.coe[::-1])
poly = []
while p.degree >= 1:
root = p.find_root()
poly.append(Polynomial([1, -root]))
p = p / poly[-1]
if p.coe != [1]:
poly.append(p)
return poly
def factor_str(self):
facs = self.factor()
fac_set = _duplicates(facs)
string = ""
for i in range(len(fac_set)):
exponent = str(fac_set[i][1]).translate(_trans) if fac_set[i][1] > 1 else ""
string += f"({fac_set[i][0]}){exponent}"
return string
@staticmethod
def from_string(string: str):
sub = str.maketrans("⁰¹²³⁴⁵⁶⁷⁸⁹", "0123456789")
string = string.translate(sub).split(' + ')
try:
length = string[0].split('x')[1]
except IndexError:
return Polynomial([float(string[0])])
l = [0] * (2 if length == '' else int(length) + 1)
for term in string:
try:
coe, exp = term.split('x')
except ValueError:
coe, exp = term, 0
try:
coe, exp = float(coe), int(exp)
except ValueError:
coe, exp = float(coe), 1
l[exp] = coe
return Polynomial(l[::-1])
@staticmethod
def from_factors(facs):
p = Polynomial([1])
for f in facs:
p *= f
return p
@staticmethod
def from_factor_str(facs):
matches = re.findall(r"(\(.+?\))(\d+)?", facs.translate(str.maketrans("⁰¹²³⁴⁵⁶⁷⁸⁹", "0123456789")))
p = Polynomial([1])
for match in matches:
if match[-1] != '':
matches.extend([(match[0], '')] * int(match[1]))
continue
p *= Polynomial.from_string(match[0][1:-1])
return p
@staticmethod
def gcd(a, b):
while b.coe != [0]:
a, b = b, a % b
return a / Polynomial._gcd(a)
@staticmethod
def lcm(a, b):
return abs(a * b) / Polynomial.gcd(a, b)
@staticmethod
def _gcd(a):
if any([x >= 0 for x in a.coe]):
return _gcd.gcd(*a.coe)
else:
return -_gcd.gcd(*a.coe)
def __neg__(self):
return Polynomial([-x for x in self.coe[::-1]])
def monomial_list(self):
poly = []
for i in range(self.degree + 1):
monomial = [0] * (self.degree + 1)
monomial[i] = self.coe[i]
poly.append(Polynomial(monomial[::-1]))
return poly[::-1]
def discriminant(self):
if self.degree == 0:
return None
if self.degree == 1:
return 1
if self.degree == 2:
a = self.coe[2]
b = self.coe[1]
c = self.coe[0]
return b ** 2 - 4 * a * c
if self.degree == 3:
a = self.coe[3]
b = self.coe[2]
c = self.coe[1]
d = self.coe[0]
return b ** 2 * c ** 2 - 4 * a * c ** 3 - 4 * b ** 3 * d + 18 * a * b * c * d - 27 * a ** 2 * d ** 2
if self.degree == 4:
a = self.coe[4]
b = self.coe[3]
c = self.coe[2]
d = self.coe[1]
e = self.coe[0]
return b ** 2 * c ** 2 * d ** 2 - 4 * a * c ** 3 * d ** 2 - 4 * b ** 3 * d ** 3 + 18 * a * b * c * d ** 3 - 27 * a ** 2 * d ** 4 - 4 * b ** 2 * c ** 3 * e + \
16 * a * c ** 4 * e + 18 * b ** 3 * c * d * e - 80 * a * b * c ** 2 * d * e - 6 * a * b ** 2 * d ** 2 * e + 144 * a ** 2 * c * d ** 2 * e - \
27 * b ** 4 * e ** 2 + 144 * a * b ** 2 * c * e ** 2 - 128 * a ** 2 * c ** 2 * e ** 2 - 192 * a ** 2 * b * d * e ** 2 + 256 * a ** 3 * e ** 3
if self.degree == 5:
a = self.coe[5]
b = self.coe[4]
c = self.coe[3]
d = self.coe[2]
e = self.coe[1]
f = self.coe[0]
return b ** 2 * c ** 2 * d ** 2 * e ** 2 - 4 * a * c ** 3 * d ** 2 * e ** 2 - 4 * b ** 3 * d ** 3 * e ** 2 + 18 * a * b * c * d ** 3 * e ** 2 - \
27 * a ** 2 * d ** 4 * e ** 2 - 4 * b ** 2 * c ** 3 * e ** 3 + 16 * a * c ** 4 * e ** 3 + 18 * b ** 3 * c * d * e ** 3 - 80 * a * b * c ** 2 * d * e ** 3 - \
6 * a * b ** 2 * d ** 2 * e ** 3 + 144 * a ** 2 * c * d ** 2 * e ** 3 - 27 * b ** 4 * e ** 4 + 144 * a * b ** 2 * c * e ** 4 - 128 * a ** 2 * c ** 2 * e ** 4 - \
192 * a ** 2 * b * d * e ** 4 + 256 * a ** 3 * e ** 5 - 4 * b ** 2 * c ** 2 * d ** 3 * f + 16 * a * c ** 3 * d ** 3 * f + 16 * b ** 3 * d ** 4 * f - \
72 * a * b * c * d ** 4 * f + 108 * a ** 2 * d ** 5 * f + 18 * b ** 2 * c ** 3 * d * e * f - 72 * a * c ** 4 * d * e * f - 80 * b ** 3 * c * d ** 2 * e * f + \
356 * a * b * c ** 2 * d ** 2 * e * f + 24 * a * b ** 2 * d ** 3 * e * f - 630 * a ** 2 * c * d ** 3 * e * f - 6 * b ** 3 * c ** 2 * e ** 2 * f + \
24 * a * b * c ** 3 * e ** 2 * f + 144 * b ** 4 * d * e ** 2 * f - 746 * a * b ** 2 * c * d * e ** 2 * f + 560 * a ** 2 * c ** 2 * d * e ** 2 * f + \
1020 * a ** 2 * b * d ** 2 * e ** 2 * f - 36 * a * b ** 3 * e ** 3 * f + 160 * a ** 2 * b * c * e ** 3 * f - 1600 * a ** 3 * d * e ** 3 * f - \
27 * b ** 2 * c ** 4 * f ** 2 + 108 * a * c ** 5 * f ** 2 + 144 * b ** 3 * c ** 2 * d * f ** 2 - 630 * a * b * c ** 3 * d * f ** 2 - \
128 * b ** 4 * d ** 2 * f ** 2 + 560 * a * b ** 2 * c * d ** 2 * f ** 2 + 825 * a ** 2 * c ** 2 * d ** 2 * f ** 2 - 900 * a ** 2 * b * d ** 3 * f ** 2 - \
192 * b ** 4 * c * e * f ** 2 + 1020 * a * b ** 2 * c ** 2 * e * f ** 2 - 900 * a ** 2 * c ** 3 * e * f ** 2 + 160 * a * b ** 3 * d * e * f ** 2 - \
2050 * a ** 2 * b * c * d * e * f ** 2 + 2250 * a ** 3 * d ** 2 * e * f ** 2 - 50 * a ** 2 * b ** 2 * e ** 2 * f ** 2 + 2000 * a ** 3 * c * e ** 2 * f ** 2 + \
256 * b ** 5 * f ** 3 - 1600 * a * b ** 3 * c * f ** 3 + 2250 * a ** 2 * b * c ** 2 * f ** 3 + 2000 * a ** 2 * b ** 2 * d * f ** 3 - \
3750 * a ** 3 * c * d * f ** 3 - 2500 * a ** 3 * b * e * f ** 3 + 3125 * a ** 4 * f ** 4
|
import numpy as np
from scipy import linalg
from scipy.integrate import dblquad
#For Olivier and qcm
#Copyright Charles-David Hebert
#MIT Licencse, use it as you see fit, but please give retributions to the author.
class ModelNambu:
""" """
def __init__(self, t: float, tp: float, tpp:float, mu: float, z_vec, sEvec_c) -> None:
""" """
self.t = t ; self.tp = tp; self.tpp = tpp; self.mu = mu ;
self.z_vec = z_vec ; self.sEvec_c = sEvec_c
self.cumulants = self.build_cumulants()
return None
def t_value(self, kx: float, ky: float) : # This is t_ij(k_tilde)
"""this t_value is only good if tpp = 0.0"""
t = self.t ; tp = self.tp; tpp = self.tpp
t_val_up = np.zeros((4, 4), dtype=complex)
ex = np.exp(-2.0j*kx) ; emx = np.conjugate(ex)
ey = np.exp(-2.0j*ky) ; emy = np.conjugate(ey)
tloc = np.array([[0.0, -t, -t, -tp],
[-t, 0.0, -tp, -t],
[-t, -tp, 0.0, -t],
[-tp, -t, -t, 0.0]])
t_val_up += tloc
t_val_up[0, 0] += -tpp*(ex+emx+ey+emy); t_val_up[0, 1] += -t*ex; t_val_up[0, 3] += -tp*(ex + ey + ex*ey); t_val_up[0, 2] += -t*ey
t_val_up[1, 0] += -t*emx; t_val_up[1, 1] += -tpp*(ex+emx+ey+emy); t_val_up[1, 3] += -t*ey; t_val_up[1, 2] += -tp*(emx + ey + emx*ey)
t_val_up[3, 0] += -tp*(emx + emy + emx*emy); t_val_up[3, 1] += -t*emy; t_val_up[3, 3] += -tpp*(ex+emx+ey+emy); t_val_up[3, 2] += -t*emx
t_val_up[2, 0] += -t*emy; t_val_up[2, 1] += -tp*(ex + emy + ex*emy); t_val_up[2, 3] += -t*ex; t_val_up[2, 2] += -tpp*(ex+emx+ey+emy)
t_val_down = -t_val_up.copy()
zeros = np.zeros((4,4), dtype=complex)
tmp1 = np.concatenate((t_val_up, zeros), axis=0)
tmp2 = np.concatenate((zeros, t_val_down), axis=0)
t_val = np.concatenate((tmp1, tmp2), axis=1)
return (t_val)
def build_gf_ktilde(self, kx: float, ky: float, ii: int):
""" """
gf_ktilde = np.zeros((8,8), dtype=complex)
(zz, mu, sE) = (self.z_vec[ii], self.mu, self.sEvec_c[ii])
gf_ktilde[0, 0] = gf_ktilde[1, 1] = gf_ktilde[2, 2] = gf_ktilde[3, 3] = (zz + mu)
gf_ktilde[4, 4] = gf_ktilde[5, 5] = gf_ktilde[6, 6] = gf_ktilde[7, 7] = -np.conjugate(-np.conjugate(zz) + mu)
gf_ktilde -= self.t_value(kx, ky)
gf_ktilde -= sE
gf_ktilde = linalg.inv(gf_ktilde.copy())
return gf_ktilde
def build_cumulants(self):
""" """
cumulants = np.zeros(self.sEvec_c.shape, dtype=complex)
for ii in range(cumulants.shape[0]):
tmp = np.zeros((8, 8), dtype=complex)
(zz, mu, sE) = (self.z_vec[ii], self.mu, self.sEvec_c[ii])
tmp[0, 0] = tmp[1, 1] = tmp[2, 2] = tmp[3, 3] = (zz + mu)
tmp[4, 4] = tmp[5, 5] = tmp[6, 6] = tmp[7, 7] = -np.conjugate(-np.conjugate(zz) + mu)
tmp -= sE
tmp = linalg.inv(tmp.copy())
cumulants[ii] = tmp.copy()
return cumulants
def Y1Limit(self, x: float) -> float:
return -np.pi
def Y2Limit(self, x: float) -> float:
return np.pi
def periodize(self, kx: float, ky: float, arg):
""" """
ex = np.exp(1.0j*kx)
ey = np.exp(1.0j*ky)
exQ = np.exp(1.0j*(kx+np.pi))
eyQ = np.exp(1.0j*(ky+np.pi))
vk = np.array([1.0, ex, ey, ex*ey], dtype=complex)
vkQ = np.array([1.0, exQ, eyQ, exQ*eyQ], dtype=complex)
nambu_periodized = np.zeros((4, 4), dtype=complex)
gup = arg[:4:, :4:]
gdown = arg[4::, 4::]
ff = arg[:4:, 4::]
ffdag = arg[4::, :4:]
llgreen = [gup, ff, gdown, ffdag]
llperiodized = [None]*4
for ii in range(4):
llperiodized[ii] = np.array([
[np.dot(np.conjugate(vk), np.dot(llgreen[ii], vk)), np.dot(np.conjugate(vk), np.dot(llgreen[ii], vkQ))],
[np.dot(np.conjugate(vkQ), np.dot(llgreen[ii], vk)), np.dot(np.conjugate(vkQ), np.dot(llgreen[ii], vkQ))]
],
dtype=complex)
nambu_periodized[:2:, :2:] = llperiodized[0]
nambu_periodized[:2:, 2::] = llperiodized[1]
nambu_periodized[2::, 2::] = llperiodized[2]
nambu_periodized[2::, :2:] = llperiodized[3]
return (0.25*nambu_periodized)
def periodize_orbitale(self, kx: float, ky: float, arg):
ex = np.exp(1.0j*kx)
ey = np.exp(1.0j*ky)
vkx = np.array([1.0, ex], dtype=complex)
vky = np.array([1.0, ey], dtype=complex)
vkxy = np.array([ex, ey], dtype=complex)
vk1xy = np.array([1.0, ex*ey], dtype=complex)
nambu_periodized = np.zeros((4, 4), dtype=complex)
gup = arg[:4:, :4:]
gdown = arg[4::, 4::]
ff = arg[:4:, 4::]
ffdag = arg[4::, :4:]
llgreen = [gup, ff, gdown, ffdag]
llperiodized = [None]*4
for ii in range(4):
block00 = np.array([
[llgreen[ii][0, 0], llgreen[ii][0, 3]],
[llgreen[ii][3, 0], llgreen[ii][3, 3]]
])
gAB00 = np.dot(np.conjugate(vk1xy), np.dot(block00, vk1xy))
block01 = np.array([
[llgreen[ii][0, 1], llgreen[ii][0, 2]],
[llgreen[ii][3, 1], llgreen[ii][3, 2]]
])
gAB01 = np.dot(np.conjugate(vkxy), np.dot(block01, vkxy))
block10 = np.array([
[llgreen[ii][1, 0], llgreen[ii][2, 0]],
[llgreen[ii][1, 3], llgreen[ii][2, 3]]
])
gAB10 = np.dot(np.conjugate(vkxy), np.dot(block10, vkxy))
block11 = np.array([
[llgreen[ii][1, 1], llgreen[ii][1, 2]],
[llgreen[ii][2, 1], llgreen[ii][2, 2]]
])
gAB11 = np.dot(np.conjugate(vk1xy), np.dot(block11, vk1xy))
llperiodized[ii] = np.array([[gAB00, gAB01], [gAB10, gAB11]] ,dtype=complex)
nambu_periodized[:2:, :2:] = llperiodized[0]
nambu_periodized[:2:, 2::] = llperiodized[1]
nambu_periodized[2::, 2::] = llperiodized[2]
nambu_periodized[2::, :2:] = llperiodized[3]
Nc = 2.0 #Should be 2.0
return (nambu_periodized/Nc)
def periodize_nambu(self, kx: float, ky: float, ii: int): # Green periodization
""" """
nambu_ktilde = self.build_gf_ktilde(kx, ky, ii)
return self.periodize(kx, ky, nambu_ktilde)
def stiffness(self, kx: float, ky: float, ii: int) -> float:
""" """
nambu_periodized = self.periodize(kx, ky, self.build_gf_ktilde(kx, ky, ii)) #self.periodize_nambu(kx, ky, ii)
coskx: float = np.cos(kx)
cosky: float = np.cos(ky)
tperp = -(coskx - cosky)*(coskx - cosky) # t_perp = -1.0
tperp_squared = 2.0*tperp*tperp # integrated over kz (integrate cos(kz)**2.0 = 2.0)
return (-1.0 * np.real(-tperp_squared*
(2.0*nambu_periodized[0, 2]*nambu_periodized[2, 0] +
4.0*(nambu_periodized[0, 3]*nambu_periodized[3, 0]) + # le signe ici depend si il ya couplage AFM en z.
2.0*nambu_periodized[1, 3]*nambu_periodized[3, 1]
)
)
)
def stiffness_orbital(self, kx, ky, ii):
nambu_periodized = self.periodize_orbitale(kx, ky, self.build_gf_ktilde(kx, ky, ii)) #self.periodize_nambu(kx, ky, ii)
coskx: float = np.cos(kx)
cosky: float = np.cos(ky)
tperp = -(coskx - cosky)*(coskx - cosky) # t_perp = -1.0
tperp_squared = 2.0*tperp*tperp # integrated over kz (integrate cos(kz)**2.0 = 2.0)
return (-1.0 * np.real(-tperp_squared*
4.0*(nambu_periodized[0, 3]*nambu_periodized[2, 1] + nambu_periodized[1, 2]*nambu_periodized[3, 0])
)
)
def eps0(self, kx, ky):
return (-2.0*self.t*(np.cos(kx) + np.cos(ky)) - 2.0*self.tp*(np.cos(kx+ky) + np.cos(kx-ky)) -2.0*self.tpp*(np.cos(2.0*kx)+np.cos(2.0*ky)) )
def periodize_cumulant(self, kx: float, ky: float, ii: int): # cumulant periodization
""" """
tmp = linalg.inv(self.periodize(kx, ky, self.cumulants[ii]))
tmp[0, 0] -= self.eps0(kx, ky); tmp[1, 1] -= self.eps0(kx+np.pi, ky+np.pi)
tmp[2, 2] += self.eps0(kx, ky); tmp[3, 3] += self.eps0(kx+np.pi, ky+np.pi)
return linalg.inv(tmp)
def stiffness_cum(self, kx: float, ky: float, ii: int) -> float:
""" """
nambu_periodized = self.periodize_cumulant(kx, ky, ii) #linalg.inv(tmp.copy())
coskx: float = np.cos(kx)
cosky: float = np.cos(ky)
tperp = -(coskx - cosky)*(coskx - cosky) # t_perp = -1.0
tperp_squared = 2.0*tperp*tperp # integrated over kz (integrate cos(kz)**2.0 = 2.0)
return (-1.0 * np.real(-tperp_squared*
(2.0*nambu_periodized[0, 2]*nambu_periodized[2, 0] +
4.0*nambu_periodized[0, 3]*nambu_periodized[3, 0] +
2.0*nambu_periodized[1, 3]*nambu_periodized[3, 1]
)
)
)
# for d_z p-wave SC, nambu space is not enlarged.
def stiffness_trace(self, kx: float, ky: float, ii: int) -> float:
"""4/N_c Trace(F F^Dag) """
gf_ktilde = self.build_gf_ktilde(kx, ky, ii)
trace = np.trace(np.dot(gf_ktilde[:4:, 4::], gf_ktilde[4::, :4:]))
coskx: float = np.cos(kx)
cosky: float = np.cos(ky)
tperp = -(coskx - cosky)*(coskx - cosky) # t_perp = -1.0
tperp_squared = 2.0*tperp*tperp # integrated over kz (integrate cos(kz)**2.0 = 2.0)
return (tperp_squared*np.real(trace))
|
from django.contrib.auth.hashers import check_password
# from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from .models import Student, Faculty
User = get_user_model()
class StudentBackend:
def authenticate(self, request, username=None, password=None):
if "-" not in username:
return
registration_number = username.split("-")
# get user based on registration_number
try:
student = Student.objects.get(batch__name=registration_number[0],
program__name=registration_number[1],
number=registration_number[2])
user = student.user
if user:
# check password of user
if check_password(password, user.password):
return user
return None
except Student.DoesNotExist:
user = User.objects.create_user(username, username, password)
user.is_superuser = False
user.is_staff = True
user.save()
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class FacultyBackend:
def authenticate(self, request, username=None, password=None):
# get user based on email
try:
user = User.objects.get(email=username)
if user is not None:
if user.check_password(password):
return user
return None
except User.DoesNotExist:
user = User.objects.create_user(username,username,password)
user.is_superuser = False
user.is_staff = True
user.save()
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None |
p1 = int(input('Primeiro termo: '))
r = int(input('Razão da PA: '))
cont = 1
total = 0
c = 10
while c != 0:
total += c
while cont <= total:
p1 += r
cont += 1
print('{}...'.format(p1), end='')
print('PAUSA')
c = int(input('Quer adicionar quantos mais termos? '))
|
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine, or_
from datetime import datetime
from .app_config import SQLALCHEMY_DATABASE_URI
from . import create_app
app = create_app()
db = SQLAlchemy(app)
engine = create_engine(SQLALCHEMY_DATABASE_URI, convert_unicode=True)
from api.models import SuperHero, Status
@app.route('/heroes/', methods=['GET'])
def heroes_list():
query = '''
SELECT
superhero.id,
superhero_alias,
email_address,
first_name,
last_name,
to_char(started_on, 'YYYY-MM-DD') as started_on,
to_char(finished_on, 'YYYY-MM-DD') as finished_on,
CAST(income AS VARCHAR) AS income,
status.status
FROM superhero
JOIN status
ON superhero.status_id=status.id
'''
if request.args:
query += ' WHERE superhero_alias is not null'
started_after = request.args.get('started_after')
if started_after:
query += " AND started_on > to_date('{}', 'YYYY-MM-DD')".format(started_after)
started_before = request.args.get('started_before')
if started_before:
query += " AND started_on < to_date('{}', 'YYYY-MM-DD')".format(started_before)
finished_after = request.args.get('finished_after')
if finished_after:
query += " AND finished_on > to_date('{}', 'YYYY-MM-DD')".format(finished_after)
finished_before = request.args.get('finished_before')
if finished_before:
query += " AND finished_on < to_date('{}', 'YYYY-MM-DD')".format(finished_before)
income_below = request.args.get('income_below')
if income_below:
query += ' AND income < {}'.format(income_below)
income_above = request.args.get('income_above')
if income_above:
query += ' AND income > {}'.format(income_above)
income_equal = request.args.get('income_equal')
if income_equal:
query += ' AND income = {}'.format(income_equal)
status = request.args.get('status')
if status:
query += ' AND status = {}'.format(status)
sql_results = engine.execute(query).fetchall()
jsonify_data = [dict(r) for r in sql_results]
return jsonify(jsonify_data), 200
@app.route('/heroes', methods=['POST'])
def heroes_create():
hero_json = request.get_json()
status = Status.query.filter_by(status=hero_json['status']).first()
if not status:
status = Status(status=hero_json['status'])
db.session.add(status)
db.session.commit()
# https://docs.sqlalchemy.org/en/latest/orm/tutorial.html#common-filter-operators
hero = db.session.query(SuperHero).filter(or_(SuperHero.superhero_alias == hero_json.get('superhero_alias'),\
SuperHero.email_address == hero_json.get('email_address')))\
.first()
if not hero:
new_hero = SuperHero(superhero_alias=hero_json.get('superhero_alias'),
email_address=hero_json.get('email_address'),
first_name=hero_json.get('first_name'),
last_name=hero_json.get('last_name'),
started_on=hero_json.get('started_on', datetime.utcnow()),
finished_on=hero_json.get('finished_on'),
income=hero_json.get('income'),
status=status)
db.session.add(new_hero)
db.session.commit()
return 'KER-SPLOOSH! New hero added to the data trust.', 201
else:
# https://httpstatuses.com/409
return 'ZOWIE! A hero with the same superhero_alias or email_address already exists.', 409
@app.route('/heroes/<int:hero_id>', methods=['GET'])
def heroes_detail(hero_id):
hero = db.session.query(SuperHero).get(hero_id)
if hero:
return jsonify(hero.serialize()), 200
else:
return 'Not found', 404
@app.route('/api/health', methods=['GET'])
def health():
return 'BAM! Successful request.', 201
if __name__ == '__main__':
app.run() |
class Solution(object):
"""
https://leetcode.com/problems/rotate-list/
find the tail. connect head with tail.
find the new tail of new list. Set its next to None. Return its previous element.
"""
def rotateRight(self, head, k):
if head is None:
return None
length = 0
tail = head
while(tail.next):
length += 1
tail = tail.next
length += 1
if k % length == 0:
return head
else:
k = length - k % length
leftOfNewHead = head
while(k>1):
leftOfNewHead = leftOfNewHead.next
k -= 1
newHead = leftOfNewHead.next
leftOfNewHead.next = None
tail.next = head
return newHead |
"""
Priority Queue in Python
1. Use heapq module
The heapq implements a min-heap sort algorithm suitable for use with Python's lists.
2. Use queue.PriorityQueue
Note: The PriorityQueue uses the same heapq implementation internally
"""
# Use heapq
import heapq
customers = []
heapq.heappush(customers, (2, "Harry"))
heapq.heappush(customers, (3, "Charles"))
heapq.heappush(customers, (1, "Riya"))
heapq.heappush(customers, (4, "Stacy"))
while customers:
print(heapq.heappop(customers))
# Will print names in the order: Riya, Harry, Charles, Stacy.
# Can use heapify() to turn a list into a heap.
a = [3, 5, 1, 2, 6, 8, 7]
heapq.heapify(a)
print(a) # [1, 2, 3, 5, 6, 8, 7]
# Use queue.PriorityQueue
from queue import PriorityQueue
# we initialise the PQ class instead of using a function to operate upon a list.
customers = PriorityQueue()
customers.put((2, "Harry"))
customers.put((3, "Charles"))
customers.put((1, "Riya"))
customers.put((4, "Stacy"))
while not customers.empty():
print(customers.get())
# Will print names in the order: Riya, Harry, Charles, Stacy.
|
import numpy as np
class IncorrectArraySize():
array = list(arr.shape)
if array[0] != array[1]:
raise ValueError('Input should be a square matrix')
arr = np.array([[1,2,3,7],
[4,5,6,8],
[5,8,9,7],
[4,3,5,2]])
try:
hold = []
for i in range(len(arr)+1):
hold.append(-i)
hold_left = [i for i in hold if i<0]
hold_right = [abs(i) for i in hold if i+len(arr)!=0]
total_right = 0
total_left = 0
right = []
left = []
for x,y in enumerate(hold_right):
num = arr[x][y]
right.append(num)
total_right+=num
for i,j in enumerate(hold_left):
num = arr[i][j]
left.append(num)
total_left+=num
total = total_right + total_left
print('The sum of the diagonals is {}'.format(total))
except IncorrectArraySize:
print('Error')
|
from nipype.pipeline.engine import Node, Workflow
import nipype.interfaces.fsl as fsl
from nipype.algorithms.misc import TSNR
import nipype.interfaces.utility as util
import nipype.interfaces.freesurfer as fs
import nipype.interfaces.afni as afni
import nipype.algorithms.rapidart as ra
from compcor import extract_noise_components
from normalize_timeseries import time_normalizer
from nuissance_regression import create_filter_matrix
def create_denoise_pipeline(name='denoise'):
# workflow
denoise = Workflow(name='denoise')
# Define nodes
inputnode = Node(interface=util.IdentityInterface(fields=['func', #realigned and coregistered
'motion_parameters',
'highpass_sigma',
'lowpass_sigma',
'resamp_brain',
'brain_seg',
'tr']),
name='inputnode')
outputnode = Node(interface=util.IdentityInterface(fields=['tsnr_file',
'noise_mask',
'wmcsf_mask',
'brain_mask_resamp',
'compcor_components',
'combined_motion',
'outlier_files',
'intensity_files',
'outlier_stats',
'outlier_plots',
'nuissance_regressors',
'denoised_file',
'bandpassed_file',
'normalized_file']),
name='outputnode')
# binarize resampled brain
binarize = Node(fs.Binarize(min=0.5,
out_type='nii.gz',
binary_file='brain_mask_resamp.nii.gz'),
name='binarize')
denoise.connect([(inputnode, binarize, [('resamp_brain', 'in_file')]),
(binarize, outputnode, [('binary_file', 'brain_mask_resamp')])
])
# mask functional data
mask_epi = Node(fsl.ApplyMask(out_file='rest_masked.nii.gz'),
name='mask_epi')
denoise.connect([(inputnode, mask_epi, [('func', 'in_file')]),
(binarize, mask_epi, [('binary_file', 'mask_file')])
])
# detrend epi
detrend = Node(afni.Detrend(args='-polort 2',
outputtype='NIFTI_GZ'),
name='detrend')
denoise.connect([(mask_epi, detrend, [('out_file', 'in_file')])])
# calculate tsnr file
tsnr = Node(TSNR(),name='tsnr')
denoise.connect([(detrend, tsnr, [('out_file', 'in_file')]),
(tsnr, outputnode, [('tsnr_file', 'tsnr_file')])])
# threshold the tsnr stddev file to 98th percentile as noise mask for compcor
getthresh = Node(interface=fsl.ImageStats(op_string='-p 98'),
name='getthreshold')
threshold_stddev = Node(fsl.Threshold(out_file='noise_mask.nii.gz'),
name='threshold')
denoise.connect([(tsnr, threshold_stddev, [('stddev_file', 'in_file')]),
(tsnr, getthresh, [('stddev_file', 'in_file')]),
(getthresh, threshold_stddev, [('out_stat', 'thresh')]),
(threshold_stddev, outputnode, [('out_file','noise_mask')])
])
# perform artefact detection
artefact=Node(ra.ArtifactDetect(save_plot=True,
parameter_source='FSL',
mask_type='file',
norm_threshold=1,
zintensity_threshold=3,
),
name='artefact')
denoise.connect([(inputnode, artefact, [('func', 'realigned_files'),
('motion_parameters', 'realignment_parameters')]),
(binarize, artefact, [('binary_file', 'mask_file')]),
(artefact, outputnode, [('norm_files', 'combined_motion'),
('outlier_files', 'outlier_files'),
('intensity_files', 'intensity_files'),
('statistic_files', 'outlier_stats'),
('plot_files', 'outlier_plots')])])
# extract eroded wmcsf mask for a compcor
csf=[10,11,12,13,14,17,18] #14 4th ventricle
wm=[46,47,48] #43 brain stem 38, 39 are thalamus,
wmcsfmask = Node(fs.Binarize(match = wm+csf,
out_type = 'nii.gz',
binary_file='wmcsf_mask.nii.gz'),
name='wmcsfmask')
# resample wmcsf mask
resamp_wmcsf = Node(afni.Resample(outputtype='NIFTI_GZ',
resample_mode='NN',
out_file='wmcsf_mask_resamp.nii.gz'),
name='resamp_wmcsf')
# erode wmcsf mask
erode_wmcsf = Node(fs.Binarize(min = 0.5,
erode = 1,
out_type = 'nii.gz',
binary_file='wmcsf_mask.nii.gz'),
name='erode_wmcsf')
denoise.connect([(inputnode, wmcsfmask, [('brain_seg', 'in_file')]),
(wmcsfmask, resamp_wmcsf, [('binary_file', 'in_file')]),
(inputnode, resamp_wmcsf, [('resamp_brain', 'master')]),
(resamp_wmcsf, erode_wmcsf, [('out_file', 'in_file')]),
(erode_wmcsf, outputnode, [('binary_file', 'wmcsf_mask')])
])
# extracting physiological noise components using both acompcor and tcompcor
compcor = Node(util.Function(input_names=['realigned_file',
'noise_mask_file',
'num_components',
'csf_mask_file',
'realignment_parameters',
'outlier_file',
'selector',
'regress_before_PCA'],
output_names=['noise_components','pre_svd'],
function=extract_noise_components),
name='compcor')
compcor.inputs.num_components = 6
compcor.inputs.regress_before_PCA = True #regress out motion and outliers before deriving components
compcor.inputs.selector = [True,True] # do only tcompcor
denoise.connect([(inputnode, compcor, [('func', 'realigned_file'),
('motion_parameters', 'realignment_parameters')]),
(threshold_stddev, compcor, [('out_file', 'noise_mask_file')]),
(erode_wmcsf, compcor, [('binary_file', 'csf_mask_file')]),
(compcor, outputnode, [('noise_components', 'compcor_components')]),
(artefact, compcor, [('outlier_files', 'outlier_file')])
])
# create design matrix for nuissance regression
designmatrix = Node(util.Function(input_names=['motion_params',
'composite_norm',
'compcorr_components',
'global_signal',
'art_outliers',
'selector',
'demean'],
output_names=['filter_file'],
function=create_filter_matrix),
name='designmatrix')
designmatrix.inputs.selector=[True, False, True, False, True, True]
designmatrix.inputs.demean=False
#[motion_params, composite_norm, compcorr_components, global_signal, art_outliers, motion derivatives]
denoise.connect([(artefact, designmatrix, [('outlier_files','art_outliers'),
('norm_files', 'composite_norm'),
('intensity_files', 'global_signal'),
]),
(compcor, designmatrix, [('noise_components', 'compcorr_components')]),
(inputnode, designmatrix, [('motion_parameters', 'motion_params')]),
(designmatrix, outputnode, [('filter_file', 'nuissance_regressors')])
])
# filter out noise from detrended file
remove_noise = Node(fsl.FilterRegressor(filter_all=True,
out_file='rest_denoised.nii.gz'),
name='remove_noise')
denoise.connect([(detrend, remove_noise, [('out_file', 'in_file')]),
(designmatrix, remove_noise, [('filter_file', 'design_file')]),
(remove_noise, outputnode, [('out_file', 'denoised_file')])
])
# bandpass filter denoised file
bandpass_filter = Node(fsl.TemporalFilter(out_file='rest_denoised_bandpassed.nii.gz'),
name='bandpass_filter')
denoise.connect([(inputnode, bandpass_filter,[( 'highpass_sigma','highpass_sigma'),
('lowpass_sigma', 'lowpass_sigma')]),
(remove_noise, bandpass_filter, [('out_file', 'in_file')]),
(bandpass_filter, outputnode, [('out_file', 'bandpassed_file')])
])
# normalize scans
normalize_time=Node(util.Function(input_names=['in_file','tr'],
output_names=['out_file'],
function=time_normalizer),
name='normalize_time')
denoise.connect([(inputnode, normalize_time, [('tr', 'tr')]),
(bandpass_filter, normalize_time, [('out_file', 'in_file')]),
(normalize_time, outputnode, [('out_file', 'normalized_file')])
])
return denoise
|
def removevow(string1):
newstring = ""
vow_list = ['a','e','i','o','u']
for letters in string1:
if letters.lower() not in vow_list:
newstring = newstring + letters
return newstring
print (removevow("Vivek"))
|
import requests as Re
import execjs as jsexe
import re
import time
from smtp import send_email
from html_escape_sequence import escape2normal
user=""
pd=""#你的账号密码
rsa_key=""
lt_str=""
execution=""
vatify_code=""
s=Re.Session()
login_flag=0
class_list_info=[]
pwd="/root/lazy_student_assist/"
records_file=pwd+"inform_records.dat"
log_file=pwd+"log.dat"
column_type2name={"study":"课程学习","discuss":"答疑讨论"}
#python参数传递 永远是传递指针而不是新分配空间 再拷贝,因此对函数内部的参数进行修改,会修改传入的变量
#python变量作用域 全局变量在函数中使用时,只能读,不能写(改) 需要写,就要在函数里声明 global 全局变量名
#log函数 level等级越低 记录越不重要
def write_log(log,level=0):
f=open(log_file,"a+")
f.write("{} {} {}\n".format(str(level),str(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))),log))
f.close()
#以下函数从本地读取帖子 如果有,返回元组,如果没有返回空元组
def syntax_class_forum(forumid):
f=open(records_file,"r")
lines=f.readlines()
f.close()
ret=[]
for x in lines:
result=re.match("(forum )([\d]+)( )([\S]+?)( )([\d]+)( )(.+)",x)
if(result):
if(int(forumid)==int(result[2])):
ret.append(result[2])
ret.append(result[4])
ret.append(result[6])
ret.append(result[8])
break
return ret
#以下函数读取records.dat,查看各个column的id,如果没有就解析网页,获取id
def syntax_class_column_id(session,column_type,classid):
f=open(records_file,"r")
lines=f.readlines()
f.close()
ret=-1
for x in lines:
#study_column_id 10722 7753
result=re.match("("+column_type+"_column_id )([\d]+)( )([\d]+)",x)
if(result):
if(int(result[2])==int(classid)):
ret=int(result[4])
break
if(ret==-1):
res=session.get("http://jxpt.whut.edu.cn:81/meol/jpk/course/layout/newpage/index.jsp",\
params={"courseId":str(classid)})
ctx=re.compile("(columnId=)([\d]+)([\s\S]{20,160})(<span>)(.*?)(</span>)")
result=ctx.findall(res.text)
for x in result:
if(column_type2name[column_type]==str(x[4])):
f=open(records_file,"a+")
f.write("{} {} {}\n".format(column_type+"_column_id",str(classid),str(x[1])))
f.close()
ret=int(x[1])
break
return ret
#该函数返回上个月1号这个时间的时间戳
def last_n_day_timestamp(n):
return (int(time.time())-24*60*60*n)
#以下函数读取records.dat,查看class对应的最后一次已读通知的时间戳,如果没有记录就使用上个月1号的时间戳
def syntax_class_records_last_time(select_type,classid):
f=open(records_file,"r")
lines=f.readlines()
f.close()
last_time_rec=last_n_day_timestamp(30)
for x in lines:
result=re.match("("+select_type+" )([\d]+)( )(.+)",x)
if(result):
class_id=result[2]
last_time_str=result[4]
if(class_id==classid and int(last_time_str)>last_time_rec):
last_time_rec=int(last_time_str)
break
return last_time_rec
#以下函数读通知内容
def get_info_content(session,informid):
res=session.get("http://jxpt.whut.edu.cn:81/meol/common/inform/message_content.jsp",params={"nid":str(informid)})
origin_content=re.search("(id=.*?_content.*?value=')(.*?)(')",res.text)
if(origin_content):
origin_content=origin_content[2]
else:
origin_content="暂无内容"
origin_content=escape2normal(origin_content)
return origin_content
#以下函数获取登陆cookie
def login_index(session):
global login_flag
global class_list_info
global rsa_key
global lt_str
global execution
global vatify_code
res=session.get("http://zhlgd.whut.edu.cn/tpass/login?service=http%3A%2F%2Fjxpt.whut.edu.cn%3A81%2Fmeol%2Fhomepage%2Fcommon%2Fsso_login.jsp")
#<input type="hidden" id="lt" name="lt" value="LT-748312-6ualcdMgkORamuLjzbiNZKYe9AZmi6-tpass" />
lt_str=re.search("(<.*?id.*?\"lt\".*?value.*?\")([^\"\n]*?)(\")",res.text)[2]#第二段即是lt
#<input type="hidden" name="execution" value="e3s2" />
execution=re.search("(<.*?name.*?execution.*?value.*?\")(.*?)(\")",res.text)[2]
f=open(pwd+"des.js","r")
jsfile=f.read()
f.close()
ctx=jsexe.compile(jsfile)
rsa_key=ctx.call("strEnc",user+pd+lt_str,"1","2","3")
data_to_send={"rememberName":"on",\
"rsa":rsa_key,\
"ul":str(len(user)),\
"pl":str(len(pd)),\
"lt":lt_str,\
"execution":execution,\
"_eventId":"submit"}
# if(res.text.find("id=\"vali\"")>0):
# print("要输入验证码!")
# write_log("此次登录需要验证码")
# img_src="http://zhlgd.whut.edu.cn/tpass/code"#暂时是不变的 懒得解析了
# res=session.get(img_src)
# f=open(pwd+"vatify.jpeg","wb+")
# f.write(res.content)
# f.close()
# vatify_code=str(input("请查看并输入验证码!保存在/home/lsm/vatify.jpeg"))
# data_to_send["code"]=vatify_code
res=session.post("http://zhlgd.whut.edu.cn/tpass/login?service=http%3A%2F%2Fzhlgd.whut.edu.cn%2Ftp_up%2F",data=data_to_send)
if(len(res.history)>0 and res.text.find("在线人数")>0):
print("login success")
write_log("login success")
login_flag=1
else:
print("login fail")
write_log("login success")
#此函数浏览并点赞未读帖子 class_list_info[index][5]是未读forum列表
def view_and_support_unread_forum(session,class_list_info,index):
for x in class_list_info[index][5]:
res=session.get("http://jxpt.whut.edu.cn:81/meol/homepage/threadAction.do",params={"threadid":str(x[0])})
res=session.post("http://jxpt.whut.edu.cn:81/meol/common/faq/forumnSupport_do.jsp",params={"threadId":str(x[0])})
if(res.text.find("已支持")!=-1):
write_log("已读已点赞帖子:{} {} {}".format(x[0],x[1],x[3]))
f=open(records_file,"a+")
f.write("forum {} {} {} {}\n".format(x[0],x[1],x[2],x[3]))
f.close()
elif(res.text.find("不能重复支持")!=-1):
f=open(records_file,"a+")
f.write("forum {} {} {} {}\n".format(x[0],x[1],x[2],x[3]))
f.close()
#此函数传入三个参数 1会话session,2课程id,3学习资源所在的栏目名字
def view_class_column_page(session,classid,column_type):
res=session.get("http://jxpt.whut.edu.cn:81/meol/jpk/course/layout/newpage/index.jsp",\
params={"courseId":str(classid)})
columnid=syntax_class_column_id(session,column_type,classid)
res=session.get("http://jxpt.whut.edu.cn:81/meol/jpk/course/course_column_preview_transfer.jsp",\
params={"tagbug":"client","columnId":str(columnid)})
return res.text
#以下函数增加学习时间
def study_class_over_time(session,classid):
res=session.post("http://jxpt.whut.edu.cn:81/meol/lesson/onlinetime_listener.jsp",\
data={"lessId":str(classid)})
if(res.text.find("success\",\"status\":0")>0):
write_log("学习一分钟,发送请求")
#以下函数获取课程学习时间
def syntax_study_info(study_html_text):
ret=[]
study_time=re.search("(本课程网络学习总时长[\S\s]*?needstar\">[\s]+)([\d]+)",study_html_text)[2]
ret.append(study_time)
return ret
#以下函数获取学习情况
def get_study_info(session,classid,uid):
res=session.get("http://jxpt.whut.edu.cn:81/meol/common/newscoremanagement/stu_course_detail.jsp",\
params={"lid":str(classid),"uid":str(uid)})
return syntax_study_info(res.text)
#以下函数填写个人所有课程信息数组 每门课程:name teachername id class_inform class_discuss
def class_syntax(class_li_text):
ret=[]
#<span class='realname'>张进</span>
#courseId=
class_name=re.search("(title[\S\s]*?>[\S\s]*?>[\s]*)([\S]*)",class_li_text)[2]
teacher_name=re.search("(realname.*?>)([\S]*)(</span)",class_li_text)[2]
class_id=re.search("(courseId=)([\d]+)",class_li_text)[2]
class_inform=[]
class_discuss=[]
ret.append(class_name)
ret.append(teacher_name)
ret.append(class_id)
ret.append(class_inform)
ret.append(class_discuss)
return ret
#此函数直接填充class_list_info数组,每个数组代表一门课 每门课有 name teachername id info discuss
def get_all_class_info(session):
#登陆之后的事情
global login_flag
global class_list_info
if(login_flag==1):
#获取课程列表
res=session.get("http://jxpt.whut.edu.cn:81/meol/welcomepage/student/course_list_v8.jsp")
pattern=re.compile("<li>[\s\S]*?</li>")
class_list_text=pattern.findall(res.text)
for x in class_list_text:
class_list_info.append(class_syntax(x))
assert(len(class_list_info)>0)
#获取课程学习时长:
index=0
for x in class_list_info:
x.append(get_study_info(s,x[2],"159857"))
print("编号",index,end=":")
index=index+1
print(x)
write_log("编号 {}:{}".format(index,str(x)))
#以下函数获取所有未记录的帖子 保存在records.dat文件里
def get_class_discuss(session,classid):
discuss_html=view_class_column_page(session,classid,"discuss")
ctx=re.compile("(<tr>[\s\S\r\n]*?href.*?threadid=)([\d]+)([\s\S\r]*?title=\")(.+)(\"[\S\s]*?<td.*?>[\s]*)(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d)([\s\S]*?<td.*?>[\s]*)([\S]*)")
result=ctx.findall(discuss_html)
unread_discuss_list=[]
if(result):
for x in result:
single_forum=[]
forum_id=x[1]
forum_title=x[3]
forum_timestamp=int(time.mktime(time.strptime(x[5],"%Y-%m-%d %H:%M:%S")))
forum_sender=x[7]
single_forum.append(forum_id)
single_forum.append(forum_title)
single_forum.append(forum_timestamp)
single_forum.append(forum_sender)
if(syntax_class_forum(forum_id)==[]):
unread_discuss_list.append(single_forum)
return unread_discuss_list
#print(x[1],x[3],x[5],x[7])
#以下函数获取未读通知列表(未读时间保存在records.dat里 如果没有记录就默认是上个月1号)
def get_class_inform(session,classid):
res=session.get("http://jxpt.whut.edu.cn:81/meol/common/inform/index_stu.jsp",\
params={"tagbug":"client","s_order":"0","lid":str(classid),"strStyle":"new06"})
ctx=re.compile("(<tr>[\s\S]*?<td>[\s\S]*?</td>[\s\S]*?</tr>)")
class_inform_text=ctx.findall(res.text)
class_unread_inform_list=[]
index=0
for x in class_inform_text:
single_inform=[]
informid=re.search("(jsp\?nid=)([\d]+)",x)[2]
#class="align_c">罗先平
inform_time=re.search("(<td[\s\S]*?\"align_c\">)(.*?)(\n[\s\S]*?\"align_c\">)(.*?)(\n)",x)[2]
inform_time=int(time.mktime(time.strptime(inform_time,"%Y-%m-%d %H:%M:%S")))
if(inform_time>syntax_class_records_last_time("inform",classid)):
inform_title=re.search("(title=\")([\S\s]*?)(\"\n *onClick)",x)[2]
inform_sender=re.search("(<td[\s\S]*?\"align_c\">)(.*?)(\n[\s\S]*?\"align_c\">)(.*?)(\n)",x)[4]
inform_content=get_info_content(session,informid)
single_inform.append(informid)
single_inform.append(inform_title)
single_inform.append(inform_sender)
single_inform.append(inform_time)
single_inform.append(inform_content)
class_unread_inform_list.append(single_inform)
index=index+1
return class_unread_inform_list
#以下函数发送邮件提醒未读通知
def send_unread_info(class_list_info,index):
for x in class_list_info[index][4]:
flag=send_email(class_list_info[index][0],"{}-{}-{}".format(x[1],x[2],time.strftime("%Y-%m-%d %H:%M:%S",\
time.localtime(x[3]))),x[4],"html")
if(flag==1):
write_log("通知未读,邮件提醒")
f=open(records_file,"a+")
f.write("{} {} {}\n".format("inform",class_list_info[index][2],x[3]))
f.close()
#以下更新未读通知列表
def update_unread_list(session,class_list_info,index):
class_list_info[index][4]=get_class_inform(session,class_list_info[index][2])
write_log("更新未读通知列表:"+str(class_list_info[index][4]))
#以下更新未读帖子列表
def update_unread_discuss(session,class_list_info,index):
class_list_info[index][5]=get_class_discuss(session,class_list_info[index][2])
write_log("更新未读讨论贴列表:"+str(class_list_info[index][5]))
##
##以下是程序主线程##
class_list_info=[]
login_index(s)
get_all_class_info(s)
index=int(input("请选择一个课程自动学习(输入对应的数字编号):"))
update_unread_list(s,class_list_info,index)
view_class_column_page(s,class_list_info[index][2],"study")
update_unread_discuss(s,class_list_info,index)
while(1):
time.sleep(20)
try:
update_unread_list(s,class_list_info,index)
update_unread_discuss(s,class_list_info,index)
send_unread_info(class_list_info,index)
now_tuple=time.localtime(time.time())
#每天到指定时刻开始增加学习时间并且给帖子点赞
if(int(now_tuple[3])==22 and int(now_tuple[4])>8):
#刷新登陆状态
write_log("到时间了开始学习")
rsa_key=""
lt_str=""
execution=""
vatify_code=""
s=Re.Session()
login_flag=0
class_list_info=[]
login_index(s)
get_all_class_info(s)
update_unread_list(s,class_list_info,index)
update_unread_discuss(s,class_list_info,index)
view_class_column_page(s,class_list_info[index][2],"study")
view_and_support_unread_forum(s,class_list_info,index)
cnt=0
while(1):
update_unread_list(s,class_list_info,index)
send_unread_info(class_list_info,index)
study_class_over_time(s,class_list_info[index][2])
time.sleep(60)
cnt=cnt+1
if(cnt>35):
write_log("今天的学习结束")
break
except Exception as e:
write_log(str(e))
write_log("崩溃一次")
continue
|
from django.contrib import admin
# Register your models here.
from .models import Author, BlogPost
admin.site.register(Author)
admin.site.register(BlogPost) |
class Solution:
def maxProduct(self, nums: List[int]) -> int:
n = len(nums)
dp = [0]*n
dp[0] = nums[0]
dpn = [0]*n
dpn[0] = nums[0]
for i in range(1,n):
if nums[i] > 0:
dp[i] = max(nums[i],nums[i]*dp[i-1])
dpn[i] = nums[i]*dpn[i-1]
else:
dp[i] = dpn[i-1]*nums[i]
dpn[i] = min(nums[i]*dp[i-1],nums[i])
return max(dp)
class Solution:
def maxProduct(self, nums: List[int]) -> int:
maxval, minval = nums[0], nums[0]
if len(nums) == 1:
return maxval
gmax = maxval
for n in nums[1:]:
maxval, minval = max(n, n * maxval, n * minval), min(n, n * minval, n * maxval)
gmax = max(maxval, gmax)
return gmax
|
# Deylik bizga bitta list berilgan va bu list elemnetlarining ichidan eng kattasini topish talab qilinsa
numbers=[3,1,5,2,6,3,10,32,5,21]
max=numbers[0]
min=numbers[0]
for number in numbers:
if number<min:
min=number
print(f"Min number={min}")
for number in numbers:
if number>max:
max=number
print(f"Max number={max}")
c=min+max
print(c) |
import math
##
# A class to represent a vector in 3D space, with various operations that can be applied to it
##
class Vector3D:
def __init__(self, x, y, z, cols=None):
self.x = float(x)
self.y = float(y)
self.z = float(z)
self.cn = None
self.spec = 0
self.col = cols
self.mag = math.sqrt(x ** 2 + y ** 2 + z ** 2)
# Move formatting for debugging
def __repr__(self):
return "<Vector3D x=%s, y=%s, z=%s, cn=%s, mag=%s>" % (self.x, self.y, self.z, self.cn, self.mag)
## Overrides '=='
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.z == other.z
## Overrides '-'
def __sub__(self, other):
return Vector3D(self.x - other.x, self.y - other.y, self.z - other.z)
## Overrides '+'
def __add__(self, other):
return Vector3D(self.x + other.x, self.y + other.y, self.z + other.z)
## Overrides '/'
def __div__(self, num):
return Vector3D(self.x / num, self.y / num, self.z / num)
## Overrides '*', we use this to represent dot product.
def __mul__(self, other):
return self.x * other.x + self.y * other.y + self.z * other.z
## Overrides '**', we use this to represent cross product.
def __pow__(self, other):
nx = self.y * other.z - self.z * other.y
ny = self.z * other.x - self.x * other.z
nz = self.x * other.y - self.y * other.x
return Vector3D(nx, ny, nz)
def __hash__(self):
return hash((self.x, self.y, self.z))
##
# Returns the average of a collection of vectors.
##
def average(self, others, num):
x = self.x
y = self.y
z = self.z
for vec in others:
norm = vec.surfaceNormal
x += norm.x
y += norm.y
z += norm.z
x /= num
y /= num
z /= num
return Vector3D(x, y, z)
def unit_vector(self):
if(self.mag <= 0.0):
return Vector3D(1.0, 0.0, 0.0)
else:
return Vector3D(self.x / self.mag, self.y / self.mag, self.z / self.mag)
def apply(self, transform):
x = transform[0][0] * self.x + transform[0][1] * self.y + transform[0][2] * self.z + transform[0][3]
y = transform[1][0] * self.x + transform[1][1] * self.y + transform[1][2] * self.z + transform[1][3]
z = transform[2][0] * self.x + transform[2][1] * self.y + transform[2][2] * self.z + transform[2][3]
return Vector3D(x, y, z, cols=self.col)
|
__author__ = """Xuanzhe Wang"""
__email__ = 'wangxuanzhealbert@gmail.com'
__version__ = '0.0.1'
from . import app
|
'''
Created on 2017年1月3日
@author: admin
'''
import socket
s = socket.socket()
host = socket.gethostname()
port = 1234
s.connect((host, port))
print(s.recv(1024)) |
import cv2
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
from scipy import signal
from scipy import ndimage
#reading image
#####################
img = mpimg.imread('maze1.jpg')
print("img=" + str(img))
print("img shape=" + str(img.shape))
plt.imshow(img, cmap='gray', vmin=0, vmax=255)
plt.show()
#####################
#resize
#####################
# img_res = cv2.resize(img, dsize=(200, 400), interpolation=cv2.INTER_CUBIC)
# print("img_res=" + str(img_res))
# print("img_res shape=" + str(img_res.shape))
# plt.imshow(img_res, cmap='gray', vmin=0, vmax=255)
# plt.show()
#####################
#to Grayscale
#####################
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print("img_gray=" + str(img_gray))
print("img_gray shape=" + str(img_gray.shape))
plt.imshow(img_gray, cmap='gray', vmin=0, vmax=255)
plt.show()
#####################
#contrast enhancement
#####################
img_blur = cv2.GaussianBlur(img_gray, (45,45), 0)
print(img_blur)
print(img_blur.shape)
plt.imshow(img_blur, cmap='gray', vmin=0, vmax=255)
plt.show()
#####################
#binerize enhancement
#####################
ret,img_thresh = cv2.threshold(img_gray,60,255,cv2.THRESH_BINARY_INV)
print(img_thresh)
print(img_thresh.shape)
plt.imshow(img_thresh, cmap='gray', vmin=0, vmax=1)
plt.show()
#####################
#img dilation
#####################
img_dilation = cv2.dilate(img_thresh, np.ones((11,11)), iterations=2)
print(img_dilation)
print(img_dilation.shape)
plt.imshow(img_dilation, cmap='gray', vmin=0, vmax=1)
plt.show()
#####################
#finding objects
#####################
# Label objects
labeled_image, num_features = ndimage.label(img_dilation)
# Find the location of all objects
objs = ndimage.find_objects(labeled_image)
# Get the height and width
measurements = []
for idx, ob in enumerate(objs):
measurements.append((int(ob[0].stop - ob[0].start), int(ob[1].stop - ob[1].start)))
# plt.imshow(measurements[idx], cmap='gray', vmin=0, vmax=255)
# plt.imshow(img_dilation[ob[0].start:ob[0].stop, ob[1].start:ob[1].stop], cmap='gray', vmin=0, vmax=255)
# plt.show()
sizes = [w * h for w, h in measurements]
biggest_idx = sizes.index(max(sizes))
#####################
#Bounding box
#####################
img_bound = img_gray[objs[biggest_idx][0].start:objs[biggest_idx][0].stop, objs[biggest_idx][1].start:objs[biggest_idx][1].stop]
print(img_bound)
print(img_bound.shape)
plt.imshow(img_bound, cmap='gray', vmin=0, vmax=255)
plt.show()
#####################
#contrast enhancement
####################
img_enhanced = cv2.equalizeHist(img_bound)
print(img_enhanced)
print(img_enhanced.shape)
plt.imshow(img_enhanced, cmap='gray', vmin=0, vmax=255)
plt.show()
####################
# blurring
#####################
img_blur = cv2.GaussianBlur(img_enhanced,(9,9),0)
print("img_blur=" + str(img_blur))
print("img_blur shape=" + str(img_blur.shape))
plt.imshow(img_blur, cmap='gray', vmin=0, vmax=255)
plt.show()
#####################
# binerization
#####################
for threshold in list(range(50,100,5)):
ret,th3 = cv2.threshold(img_blur,threshold,255,cv2.THRESH_BINARY)
print("threshold = " + str(threshold))
print("binerization = th3=" + str(th3))
print("th3 shape=" + str(th3.shape))
plt.imshow(th3, cmap='gray', vmin=0, vmax=255)
plt.show()
#####################
# Edge detection
#####################
# get canny thresholds
high_thresh, thresh_im = cv2.threshold(img_blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
lowThresh = 0.5*high_thresh
img_edge = cv2.Canny(img_blur, lowThresh, high_thresh)
print("img_edge=" + str(img_edge))
print("img_edge shape=" + str(img_edge.shape))
plt.imshow(img_edge, cmap='gray', vmin=0, vmax=1)
plt.show()
# #####################
# #img dilation
# #####################
img_erosion = cv2.erode(img_edge, np.ones((3,3)), iterations=1)
print(img_erosion)
print(img_erosion.shape)
plt.imshow(img_erosion, cmap='gray', vmin=0, vmax=1)
plt.show()
#####################
# #img dilation
# #####################
img_dilation = cv2.dilate(img_erosion, np.ones((3,3)), iterations=1)
print(img_dilation)
print(img_dilation.shape)
plt.imshow(img_dilation, cmap='gray', vmin=0, vmax=1)
plt.show()
#####################
# Otsu's thresholding after Gaussian filtering
# blur = cv2.GaussianBlur(img_bound,(9,9),0)
# plt.imshow(blur, cmap='gray', vmin=0, vmax=255)
# plt.show()
# for threshold in list(range(245,250,1)):
# print(threshold)
# ret3, image_binarized = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# print(image_binarized)
# print(image_binarized.shape)
# plt.imshow(image_binarized, cmap='gray', vmin=0, vmax=255)
# plt.show()
#contrast enhancement
#####################
# img_enhanced = cv2.equalizeHist(img_gray)
# print(img_enhanced)
# print(img_enhanced.shape)
# plt.imshow(img_enhanced, cmap='gray', vmin=0, vmax=255)
# plt.show()
##################### |
from unittest import TestCase
"""
测试模板
"""
class TestSolver(TestCase):
def test_demo(self):
self.fail()
|
import pandas as pd
def clean_cov_df(df_tsv,ID):
df=pd.read_csv(df_tsv, sep="\t",dtype={"Chr":"str"})
df["parent_gene"]=df["info"].str.split(";").str[0].str.split(":").str[1]
df["exon_id"]=df["info"].str.split(";").str[1].str.split("=").str[1]
cov_dict=dict(zip(df["exon_id"],df["cov"]))
cov_dict["ID"]=ID
return cov_dict
def norm_cov(df):
df["normfac"]=df.sum(axis=1)
df["normfac"]=df["normfac"]/1000000
df2=df[list(df)[:-2]].divide(list(df["normfac"]), axis='rows')
df2[["ID","normfac"]]=df[list(df)[-2:]]
return df2
def Gene_extractor(gff):
df=pd.read_csv(gff, sep="\t", skiprows=33, names=["Chr", "Prog", "type", "start", "end", "value", "str","value2", "info"])
df=df[df["start"]!= df["end"]]
df=df[df["Chr"]!="MT"]
df=df.drop_duplicates(subset=["start","end"], keep="first")
df_gene=df[df.loc[:,("type")]=="gene"][["Chr","info"]].copy()
df_gene.loc[:,("ID_gene")]=df_gene.loc[:,("info")].str.split(";").str[0].str.split(":").str[1]
return df_gene
def exon_extractor(gff):
df=pd.read_csv(gff, sep="\t", skiprows=33, names=["Chr", "Prog", "type", "start", "end", "value", "str","value2", "info"])
df=df[df["start"]!= df["end"]]
df=df[df["Chr"]!="MT"]
df=df.drop_duplicates(subset=["start","end"], keep="first")
df_exon=df[df.loc[:,("type")]=="exon"][["Chr","info","start"]].copy()
df_exon["exon_id"]=df_exon["info"].str.split(";").str[1].str.split("=").str[1]
df_exon["ID_transcript"]=df_exon["info"].str.split(";").str[0].str.split(":").str[1]
return df_exon
def split_df(df,cores):
if str(len(df)/cores)[str(len(df)/cores).index(".")+1]==0:
chunks=cores*[float(len(df)/cores)]
else:
rest="0."+str(len(df)/cores)[str(len(df)/cores).index(".")+1:]
chunks=(cores-1)*[float(str(len(df)/cores)[:str(len(df)/cores).index(".")])]
chunks.append(float(str(len(df)/cores)[:str(len(df)/cores).index(".")])+((cores))*float(rest))
return chunks
|
import sublime, sublime_plugin
import re
# Plugin Globals
NoRegion = sublime.Region(-1, -1)
openTagRx = r"<\w[^>]+>"
closeTagRx = r"</\w+>"
tagRx = r"<[^>]+>"
tagsNotAllowedInSpanRx = r"<(p|div|br)\b[^>]*>"
startSentenceRx = (
r"("
"[“\"(]+" # open quote or parenthesis
"(<[^>]+>)*" # possibly followed by tags
")?" # (maybe)
"[A-Z0-9]" # followed by a capital latter or digit
"(?!\w*\s(said|asked|exclaimed|declared|remarked)\.)"
)
closeQuoteRx = r"[”\"’]"
closeQuoteOrParenRx = r"[”\"’)]"
closeQuoteBangOrEllipsesRx = r"(...|[…!”\"’])"
upToNextNonspaceTextRx = r"(\s*<[^>]+>)*\s*(?=[^<>\s])"
upToNextTextRx = r"(<[^>]+>)*(?!=<)"
endSentenceRx = (
r"("
"("
"(?<!Mr)(?<!Mrs)(?<!Ms)(?<!Dr)(?<!Sr)(?<!Jr)" # exclude dots after common abbreviations
"(?<![A-Z]\.[A-Z])" # exclude dots in doted abbreviations (doesn't catch first dot)
"(?<!\s[A-Z]\.\s[A-Z])" # exclude dots after initials (doesn't catch first initial)
"\.+" # final punctuation is "."
"|" # or
"[?!]+" # final punctuation is ? or ! (possibly repeated)
"|" # or
"…" # final punctuation is ellipses "…". The case of an ellipses followed by a lower case letter will be excluded in code.
"|"
" -" # final punctuation is dash " -". The case of a dash not followed by a </p> will be excluded in code.
")"
"[”\"’]?" # sentence may end with a close quote
"\)?" # sentence may end with a close parentheses
")"
"(?!\w)" # last character of sentence cannot be succeeded directly by a letter (catches first dot in abbreviations)
"(?!\s[A-Z]\.)" # exclude first dot in spaced initials such as "J. K. Rowling"
)
labellingElemOpenTagRx = r"<(p|span) [\w= \"]*id=\"s\d{5}\"[\w= \"]*>"
labelledRegionRx = labellingElemOpenTagRx + r"(.(?!</\1>))+.</\1>"
class TestStuffCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
s = v.sel()
start = _getNextSentenceStartAfter(self, s[0].begin())
s.clear()
s.add(sublime.Region(start, start))
class SelectLabelledSentences(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
s = v.sel()
sentences = v.find_all(labelledRegionRx)
s.clear()
s.add_all(sentences)
# select the next sentence after the current selection
class SelectNextSentenceCommand(sublime_plugin.TextCommand):
def run(self, edit):
_selectNextSentence(self, edit)
# surround the current selection with <span> opening and closing tags, with id="s00000"
class SurroundSelectionWithSpanCommand(sublime_plugin.TextCommand):
def run(self, edit):
_surroundSelectionsWithSpan(self, edit)
class SurroundSelectionAndFindNextSentenceCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
s = v.sel()
if len(s) > 0:
r = s[0] # only support single selection for this command
if r.empty(): # selection is empty so select the next sentence after cursor
sentence = _findNextSentenceAfterPoint(self, r.begin())
_selectRegion(self, sentence)
else: # we already have a selection
# surround the current selection with labelling span unless it is already labeled
if not _regionIsLabeled(self, r):
_surroundRegionWithSpan(self, edit, r)
# select the next sentence
_selectNextSentence(self, edit)
class ReNumberSentenceTagsCommand(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
s = v.sel()
tagIds = v.find_all("(?<=\"s)\d{5}")
if len(tagIds) > 0:
s.clear()
s.add_all(tagIds)
v.run_command("insert_nums", {"format": "1:1~0=5d", "quiet": True})
# ====== private helpers ======
def _selectRegion(self, region):
v = self.view
s = v.sel()
print("select region: "+textPositionAsString(v, region.begin())+" - "+textPositionAsString(v, region.end()))
s.clear()
s.add(region)
v.show_at_center(region)
# select the next sentence after the current selection
# return whether the operation succeeded
def _selectNextSentence(self, edit):
v = self.view
s = v.sel()
if len(s) > 0:
r = s[0] # only support single selection for this command
sentence = _findNextSentenceAfterPoint(self, r.begin()) if r.empty() else _findNextSentenceAfterRegion(self, r)
if sentence is not None:
# print("found sentence: "+textPositionAsString(v, sentence.begin())+" - "+textPositionAsString(v, sentence.end()))
_selectRegion(self, sentence)
return True
else:
print("could not find another sentence after selection")
else:
print("this command only works with a single selection")
return False
# surround the current selection with <span> opening and closing tags, with id="s00000"
# return whether the operation succeeded
def _surroundSelectionsWithSpan(self, edit):
v = self.view
for r in v.sel():
if not _surroundRegionWithSpan(self, edit, r):
return False
return True
# returns True if the given region contains or is immediately preceded by a sentence labelling element
def _regionIsLabeled(self, region):
v = self.view
if region.empty():
return False
regionText = v.substr(region)
# does region contain a labeled span?
if re.search(labellingElemOpenTagRx, regionText):
return True
# is region immediately preceded by labeling span tag?
precedingOpeningSpan = _findLastOpeningTagBefore(self, "span", region.begin(), [])
if precedingOpeningSpan.end() == region.begin() and \
re.search(labellingElemOpenTagRx, v.substr(precedingOpeningSpan)):
return True
return False
# surround the given region with <span> opening and closing tags, with id="s00000"
# return whether the operation succeeded
def _surroundRegionWithSpan(self, edit, region):
v = self.view
if not region.empty():
regionText = v.substr(region)
# don't surround if region is already labeled
if _regionIsLabeled(self, region):
print("selection is already labeled with a span")
return False
# fail if the region contains elements that can't be nested inside a span
res = re.search(tagsNotAllowedInSpanRx, regionText)
if res:
print("elements of type '"+res.group(1)+"' are not allowed inside span elements")
return False
# surround region with labeled span tags
surrounded = "<span id=\"s00000\">" + regionText + "</span>"
v.replace(edit, region, surrounded)
return True
return False
# Find the next sentence starting after the given point
def _findNextSentenceAfterPoint(self, point):
v = self.view
sentenceStart = _getNextSentenceStartAfter(self, point)
if sentenceStart == -1: return None
return _findSentenceStartingAt(self, sentenceStart)
# returns the position of the next character of non-space text within the xml doc,
# starting from `searchFrom`
def _findNextNonspaceTextStart(view, searchFrom):
return view.find(upToNextNonspaceTextRx, searchFrom).end()
# returns the position of the next character of text within the xml doc,
# starting from `searchFrom`
def _findNextTextStart(view, searchFrom):
return view.find(upToNextTextRx, searchFrom).end()
# Find the sentence immediately following the sentence delineated by the given region.
# The command fails with an error message if non-space text is found
# between the end of the given region and the start of the next sentence.
def _findNextSentenceAfterRegion(self, region):
v = self.view
sentenceStart = _getNextSentenceStartAfter(self, region.end())
if sentenceStart == -1: return None
nextTextStart = _findNextNonspaceTextStart(v, region.end())
if nextTextStart < sentenceStart:
print("There is text between the end of the selection and the start of the next sentence.")
return None
return _findSentenceStartingAt(self, sentenceStart)
def _findSentenceStartingAt(self, sentenceStart):
v = self.view
firstWord = v.find("[A-Z0-9]", sentenceStart).begin()
sentenceEnd = _findEndOfSentenceStartingAt(self, sentenceStart)
if sentenceEnd == -1: return None
# continue searching for sentence end if sentence is 2 characters long: e.g. "A. first example"
if sentenceEnd - firstWord == 2:
sentenceEnd = v.find(endSentenceRx, sentenceEnd).end()
region = sublime.Region(sentenceStart, sentenceEnd)
return _extendRegionToValidSentence(self, region)
def _findEndOfSentenceStartingAt(self, sentenceStart):
v = self.view
candidateR = v.find(endSentenceRx, sentenceStart)
candidateStr = v.substr(candidateR)
candidateEnd = candidateR.end()
nextNonspaceTextStart = _findNextNonspaceTextStart(v, candidateEnd)
# " -" followed by text before the end of the paragraph is not the end of a sentence
nextCloseP = v.find(r"</p>", candidateEnd)
if re.search(" -$", candidateStr):
if nextNonspaceTextStart < nextCloseP.begin():
print("dash inside paragraph")
return _findEndOfSentenceStartingAt(self, candidateEnd)
# if candidate ends with an ellipses or close quote that isn't followed by a sentence start, extend to next sentence end.
nextSentenceStart = _getNextSentenceStartAfter(self, candidateEnd)
if re.search(closeQuoteBangOrEllipsesRx+r"$", candidateStr):
if nextNonspaceTextStart < nextSentenceStart:
print("ellipses or close quote not followed by sentence start")
return _findEndOfSentenceStartingAt(self, candidateEnd)
return candidateEnd
def _extendRegionToValidSentence(self, region):
v = self.view
# ensure that opening tags have matching closing tags (and vice versa) inside sentence region
correctedRegion = _expandRegionToEnsureMatchingTags(self, region)
# if the next text after candidate end is a close quote or parentheses, extend to include it.
trailingQuoteOrParen = _findTrailingCloseQuoteOrParenAt(v, correctedRegion.end())
if trailingQuoteOrParen:
correctedRegion = correctedRegion.cover(trailingQuoteOrParen)
# if we had to make adjustments, some conditions may no longer be satisfied so run them again
if correctedRegion != region:
return _extendRegionToValidSentence(self, correctedRegion)
return region
# if the next text after the given position is a string of close quote or close parentheses,
# return the region containing those characters
def _findTrailingCloseQuoteOrParenAt(v, position):
nextTextStart = _findNextTextStart(v, position)
closeQuoteOrParen = v.find(closeQuoteOrParenRx+r"+", nextTextStart)
if nextTextStart == closeQuoteOrParen.begin():
return closeQuoteOrParen
return None
# find the next sentence start position after the point specified in startAt
# a sentence can only start in an xml text region (outside a tag)
# and will be the first capital letter or number after a sentence end.
def _getNextSentenceStartAfter(self, startAt):
v = self.view
# if startAt is inside a tag, move it to the end of the tag
nextOpenBracket = v.find(r"<", startAt)
nextCloseBracket = v.find(r">", startAt)
if nextCloseBracket.begin() < nextOpenBracket.begin():
startAt = nextCloseBracket.end()
nextTag = v.find(tagRx, startAt)
candidate = v.find(startSentenceRx, startAt).begin()
if nextTag.begin() == -1 or candidate < nextTag.begin():
# candidate start position is before next tag, so it's definitely outside a tag
return candidate
else:
# recursively search starting at end of next tag
return _getNextSentenceStartAfter(self, nextTag.end())
# returns a list of regions that match the given regular expression in the given region
def _findMatchesInRegion(self, matchRx, region):
v = self.view
nextMatch = v.find(matchRx, region.begin())
if not nextMatch.empty() and nextMatch.end() <= region.end():
subsequentRegion = sublime.Region(nextMatch.end(), region.end())
subsequentMatches = _findMatchesInRegion(self, matchRx, subsequentRegion)
return [nextMatch] + subsequentMatches
else:
return []
# returns a list of regions that delineate the opening xml tags (e.g. <span>) in the given region
def _findOpeningTagsInRegion(self, region):
return _findMatchesInRegion(self, openTagRx, region)
# returns a list of regions that delineate the closing xml tags (e.g. <span>) in the given region
def _findClosingTagsInRegion(self, region):
return _findMatchesInRegion(self, closeTagRx, region)
# returns a bool indicating whether the given `regions` intersect the `region`
def _regionsIntersectRegion(self, regions, region):
for r in regions:
if r.intersects(region):
return True
return False
# returns the region enclosing the first closing tag of type `tagName`
# that doesn't intersect any region in `exclusions`
# starting at position `startingAt`
def _findFirstClosingTagAfter(self, tagName, startingAt, exclusions):
v = self.view
matchRx = "</" + tagName + ">"
match = v.find(matchRx, startingAt)
if _regionsIntersectRegion(self, exclusions, match):
return _findFirstClosingTagAfter(self, tagName, match.end(), exclusions)
else:
return match
# returns a region whose end is extended forward compared to `region` enough
# such that all tags that open inside the region also close inside the region
def _expandRegionToEncloseMatchingClosingTags(self, region):
v = self.view
openingTags = _findOpeningTagsInRegion(self, region)
openingTags.reverse() # need to add excluded regions from right to left
exclusions = []
lastClosingTagEnd = region.end()
for openingTag in openingTags:
tagName = v.substr(v.find("\w+", openingTag.begin()))
closingTag = _findFirstClosingTagAfter(self, tagName, openingTag.end(), exclusions)
newExclusion = sublime.Region(openingTag.begin(), closingTag.end())
exclusions.append(newExclusion)
lastClosingTagEnd = max(lastClosingTagEnd, closingTag.end())
return sublime.Region(region.begin(), lastClosingTagEnd)
# returns a region enclosing the last opening tag of type `tagName`
# that begins before `endingAt` and which doesn't intersect any of the
# regions in `exclusions`
def _findLastOpeningTagBefore(self, tagName, endingAt, exclusions):
v = self.view
matchRx = "<" + tagName + "\\b[^>]*>"
# finds the last tag of type `tagName` starting in region `region`
def findLastMatchInRegion(region):
firstMatch = v.find(matchRx, region.begin())
# print("first match for '"+matchRx+"' in region = "+str(firstMatch))
if firstMatch.empty() or firstMatch.begin() > region.end():
return NoRegion
lastMatch = findLastMatchInRegion(sublime.Region(firstMatch.end(), region.end()))
if not lastMatch.empty():
return lastMatch
if _regionsIntersectRegion(self, exclusions, firstMatch):
return NoRegion
return firstMatch
# finds the region enclosing the last tag of type `tagName` ending before point `endingAt`
def findLastBeforePoint(endPoint):
v = self.view
# initially search the 100 characters before endPoint
startPoint = max(0, endPoint - 100)
# print("searching for "+tagName+" between "+textPositionAsString(v, startPoint)+" and "+textPositionAsString(v, endPoint))
searchRegion = sublime.Region(startPoint, endPoint)
match = findLastMatchInRegion(searchRegion)
if match.empty():
if startPoint <= 0: # already searched back to beginning of file
print("failed to find an opening tag of type '"+tagName+"' before position "+str(endPoint))
return NoRegion
# continue searching in the block of 100 characters before this searchRegion
return findLastBeforePoint(startPoint)
return match
return findLastBeforePoint(endingAt)
# Returns a region whose beginning is extended backward compared to `region` enough
# such that all tags that close inside the region also open inside the region.
def _expandRegionToEncloseMatchingOpeningTags(self, region):
v = self.view
closingTags = _findClosingTagsInRegion(self, region)
exclusions = []
firstOpeningTagBegin = region.begin()
for closingTag in closingTags:
tagName = v.substr(v.find("\w+", closingTag.begin()))
openingTag = _findLastOpeningTagBefore(self, tagName, closingTag.begin(), exclusions)
newExclusion = sublime.Region(openingTag.begin(), closingTag.end())
exclusions.append(newExclusion)
firstOpeningTagBegin = min(firstOpeningTagBegin, openingTag.begin())
return sublime.Region(firstOpeningTagBegin, region.end())
# Returns a region whose beginning and end are extended outward compared to
# `region` enough that all tags that open inside the region also close
# inside the region and vice versa.
def _expandRegionToEnsureMatchingTags(self, region):
expanded = _expandRegionToEncloseMatchingClosingTags(self, region)
expanded = _expandRegionToEncloseMatchingOpeningTags(self, expanded)
return expanded
# Returns the view position in the form (row,col) where row and col are 1-indexed
def textPositionAsString(view, position):
(row,col) = view.rowcol(position)
return "("+str(row+1)+","+str(col+1)+")"
|
import plugins
import importlib
class Store(object):
"""
"""
def __init__(self):
"""
"""
#TO-DO: this id broken in the tests, we need to fix the plugin importing for tests
self.services = []
try:
module_list = plugins.get_all_plugins()
for module in module_list:
i = importlib.import_module("plugins." + module)
service = i.init_service_class()
if service is not None:
self.register_service(service)
except Exception as e:
print "Failed to load plugin: {}".format(e.message)
def register_service(self, service):
"""
"""
#TO-DO: merge if the id exists
self.services.append(service)
def get_service_using_id(self, uniqueid):
"""
"""
for service in self.services:
if service.id == uniqueid:
return service
def clean_dict(self):
"""
"""
return [s.clean_dict() for s in self.services]
class Service(object):
"""
"""
def __init__(self, name, elements=None):
"""
"""
self.id = name
self.name = self.id
self.elements = []
if elements is not None:
for element in elements:
self.register_element(element)
def register_element(self, element):
"""
"""
#TO-DO: merge is alredy exists
self.elements.append(element)
def get_element_using_id(self, uniqueid):
"""
"""
for element in self.elements:
if element.id == uniqueid:
return element
def clean_dict(self):
"""
"""
return dict(id=self.id, name=self.name,
elements=[e.clean_dict() for e in self.elements])
class Element(object):
"""
"""
def __init__(self, name, parent, controls=None, data=None):
"""
"""
self.id = name
self.name = self.id
self.controls = []
self.parent = parent
if data is not None:
self.data = data
else:
self.data = {}
if controls is not None:
for control in controls:
self.register_control(control)
def register_control(self, control):
"""
"""
#TO-DO: merge is alredy exists
self.controls.append(control)
def get_control_using_id(self, uniqueid):
"""
"""
for control in self.controls:
if control.id == uniqueid:
return control
def clean_dict(self):
"""
"""
data = []
for key, value in self.data.iteritems():
data.append({"name": key, "value": value})
return dict(id=self.id, name=self.name, parent=self.parent,
data=data, controls=[c.clean_dict() for c in self.controls])
class Control(object):
"""
"""
def __init__(self, name, parent, desc, action, callback=None, dataset=None):
"""
"""
self.id = name
self.name = self.id
self.parent = parent
self.desc = desc
self.action = action
self.callback = callback
if dataset is not None:
self.dataset = dataset
else:
self.dataset = {}
def call(self, data=None):
"""
"""
if self.callback is None:
return
if data is None:
data = []
kwargs = {}
for element in data:
kwargs.update({element["name"]: element["value"]})
self.callback(**kwargs)
def clean_dict(self):
"""
"""
dataset = []
for key, value in self.dataset.iteritems():
dataset.append({"name": key, "description": value["desc"], "type": value["type"]})
return dict(id=self.id, name=self.name, parent=self.parent,
description=self.desc, action=self.action,
dataset=dataset)
|
Not_found = {"Error": "Not Found"}
Bad_request = {"Error": "Bad Request"}
Not_modified = {"Error": "Not Modified"}
No_Content = {"Error": "Not Content"}
Not_Allowed = {"Error": "Method Not Allowed"}
|
from operator import mul
def numbers_with_digit_inside(x, d):
num_str = str(d)
nums = [a for a in xrange(1, x + 1) if num_str in str(a)]
return [len(nums), sum(nums), reduce(mul, nums) if nums else 0]
|
import matplotlib.pyplot as plt
reg_val = {"000": 0, "001": 0, "010": 0, "011": 0, "100": 0, "101": 0, "110": 0}
reg_list = ["000", "001", "010", "011", "100", "101", "110"]
flag_val = {"V": 0, "L": 0, "G": 0, "E": 0}
var_storage = {}
PC = 0
mem_touched = []
cycle_touched = []
list_in = []
output = []
halted = False
cycle = 0
def BinPC(n):
n = int(n)
x = bin(n).replace("0b", "")
y = len(x)
z = (8 - y) * "0" + str(x)
return z
def last16(x):
return x[-16:]
def bin_to_dec(n):
return int(n, 2)
def decimalToBinary(n):
n = int(n)
x = bin(n).replace("0b", "")
y = len(x)
z = (16 - y) * "0" + str(x)
return z
def update_PC(new_PC):
PC = new_PC
return PC
def left_shift(x, y):
x = x + y * "0"
x = x[-16:]
return x
def right_shift(x, y):
x = y * "0" + x
x = x[:16]
return x
def bit_wise_not(x):
y = ""
for i in range(len(x)):
if x[i] == "0":
y += "1"
else:
y += "0"
return y
def reset_flag():
flag_val["E"] = 0
flag_val["G"] = 0
flag_val["L"] = 0
flag_val["V"] = 0
def compare(x, y):
flag_val["E"] = 0
flag_val["G"] = 0
flag_val["L"] = 0
if x == y:
flag_val["E"] = 1
if x > y:
flag_val["G"] = 1
if x < y:
flag_val["L"] = 1
def read_mem(PC):
global halted
global cycle
global cycle_touched
global mem_touched
if list_in[PC][0:5] == "00000": # add
reset_flag()
if reg_val[list_in[PC][10:13]] + reg_val[list_in[PC][13:16]] <= 65535:
reg_val[list_in[PC][7:10]] = reg_val[list_in[PC][10:13]] + reg_val[list_in[PC][13:16]]
else:
reg_val[list_in[PC][7:10]] = bin_to_dec(last16(decimalToBinary(reg_val[list_in[PC][10:13]] + reg_val[list_in[PC][13:16]])))
flag_val["V"] = 1
if list_in[PC][0:5] == "00001": # sub
reset_flag()
if (reg_val[list_in[PC][10:13]] - reg_val[list_in[PC][13:16]]) <= 0:
reg_val[list_in[PC][7:10]] = 0
else:
reg_val[list_in[PC][7:10]] = reg_val[list_in[PC][10:13]] - reg_val[list_in[PC][13:16]]
if list_in[PC][0:5] == "00010": # mov imm
reset_flag()
reg_val[list_in[PC][5:8]] = bin_to_dec(list_in[PC][8:16])
if list_in[PC][0:5] == "00011": # mov reg
if list_in[PC][13:16] == "111":
reg_val[list_in[PC][10:13]] = bin_to_dec(get_flag_val())
else:
reg_val[list_in[PC][10:13]] = reg_val[list_in[PC][13:16]]
reset_flag()
if list_in[PC][0:5] == "00100": # load
reset_flag()
reg_val[list_in[PC][5:8]] = bin_to_dec(list_in[bin_to_dec(list_in[PC][8:16])])
mem_touched.append(bin_to_dec(list_in[PC][8:16]))
cycle_touched.append(cycle)
if list_in[PC][0:5] == "00101": # store
reset_flag()
list_in[bin_to_dec(list_in[PC][8:16])] = decimalToBinary(reg_val[list_in[PC][5:8]])
mem_touched.append(bin_to_dec(list_in[PC][8:16]))
cycle_touched.append(cycle)
if list_in[PC][0:5] == "00110": # mul
reset_flag()
if (reg_val[list_in[PC][10:13]] * reg_val[list_in[PC][13:16]]) <= 65535:
reg_val[list_in[PC][7:10]] = reg_val[list_in[PC][10:13]] * reg_val[list_in[PC][13:16]]
else:
reg_val[list_in[PC][7:10]] = bin_to_dec(
last16(decimalToBinary(reg_val[list_in[PC][10:13]] * reg_val[list_in[PC][13:16]])))
flag_val["V"] = 1
if list_in[PC][0:5] == "00111": # div
reset_flag()
reg_val["000"] = reg_val[list_in[PC][10:13]] // reg_val[list_in[PC][13:16]]
reg_val["001"] = reg_val[list_in[PC][10:13]] % reg_val[list_in[PC][13:16]]
if list_in[PC][0:5] == "01000": # right shift
reset_flag()
reg_val[list_in[PC][5:8]] = bin_to_dec(
right_shift(decimalToBinary(reg_val[list_in[PC][5:8]]), bin_to_dec(list_in[PC][8:16])))
if list_in[PC][0:5] == "01001": # left shift
reset_flag()
reg_val[list_in[PC][5:8]] = bin_to_dec(
left_shift(decimalToBinary(reg_val[list_in[PC][5:8]]), bin_to_dec(list_in[PC][8:16])))
if list_in[PC][0:5] == "01010": # xor
reset_flag()
reg_val[list_in[PC][7:10]] = (reg_val[list_in[PC][10:13]] ^ reg_val[list_in[PC][13:16]])
if list_in[PC][0:5] == "01011": # or
reset_flag()
reg_val[list_in[PC][7:10]] = (reg_val[list_in[PC][10:13]] | reg_val[list_in[PC][13:16]])
if list_in[PC][0:5] == "01100": # and
reset_flag()
reg_val[list_in[PC][7:10]] = (reg_val[list_in[PC][10:13]] & reg_val[list_in[PC][13:16]])
if list_in[PC][0:5] == "01101": # not
reset_flag()
reg_val[list_in[PC][10:13]] = bin_to_dec(bit_wise_not(decimalToBinary(reg_val[list_in[PC][13:16]])))
if list_in[PC][0:5] == "01110": # cmp
reset_flag()
compare(reg_val[list_in[PC][10:13]], reg_val[list_in[PC][13:16]])
if list_in[PC][0:5] == "01111": # jmp
PC = update_PC(bin_to_dec(list_in[PC][8:16]))
reset_flag()
if list_in[PC][0:5] == "10000": # jlt
if flag_val["L"] == 1:
PC = update_PC(bin_to_dec(list_in[PC][8:16]))
reset_flag()
if list_in[PC][0:5] == "10001": # jgt
if flag_val["G"] == 1:
PC = update_PC(bin_to_dec(list_in[PC][8:16]))
reset_flag()
if list_in[PC][0:5] == "10010": # je
if flag_val["E"] == 1:
PC = update_PC(bin_to_dec(list_in[PC][8:16]))
reset_flag()
if list_in[PC][0:5] == "10011": # hlt
reset_flag()
halted = True
mem_touched.append(PC)
cycle_touched.append(cycle)
cycle+=1
list1 = []
list1.append(BinPC(PC))
for i in reg_list:
list1.append(decimalToBinary(reg_val[i]))
list1.append(get_flag_val())
return list1, halted
def get_flag_val():
return "000000000000" + str(flag_val["V"]) + str(flag_val["L"]) + str(flag_val["G"]) + str(flag_val["E"])
def memory_dump(memory):
for i in memory:
print(i)
for i in range(len(memory), 256):
print("0000000000000000")
def main():
global PC
global cycle
global cycle_touched
global mem_touched
while True:
try:
line = input()
if line != "":
list_in.append(line)
except EOFError:
break
for i in range(len(list_in), 256):
list_in.append("0000000000000000")
while (True):
x, halted = read_mem(PC)
output.append(x)
PC = update_PC(PC + 1)
if halted == True:
break
plt.scatter(x=cycle_touched,y=mem_touched)
for i in output:
print(' '.join(i))
memory_dump(list_in)
plt.show()
if __name__ == '__main__':
main()
|
import numpy as np
from keras import backend as K
import scipy
import scipy.misc
from skimage.measure import label, regionprops
def pro_process(temp_img,input_size):
img = np.asarray(temp_img).astype('float32')
img = scipy.misc.imresize(img, (input_size, input_size, 3))
return img
def BW_img(input, thresholding):
binary = input > thresholding
label_image = label(binary)
regions = regionprops(label_image)
area_list = []
for region in regions:
area_list.append(region.area)
if area_list:
idx_max = np.argmax(area_list)
binary[label_image != idx_max+1] = 0
return scipy.ndimage.binary_fill_holes(np.asarray(binary).astype(int))
def dice_coef(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef2(y_true, y_pred):
score0 = dice_coef(y_true[:, :, :, 0], y_pred[:, :, :, 0])
score1 = dice_coef(y_true[:, :, :, 1], y_pred[:, :, :, 1])
score = 0.5 * score0 + 0.5 * score1
return score
def dice_coef_loss(y_true, y_pred):
return -dice_coef2(y_true, y_pred)
'''
divide the image into two part by color, segment when no seg_color in current row.
cup:(255,255,0)
disc:(255,0,0)
other:(0,0,0)
'''
def divide_by_color(polar_seg_img, seg_color):
img_shape = polar_seg_img.shape
#init
boundary_top = 0
boundary_bottom = img_shape[0]
color_change = False
for row in range(img_shape[0]):
count = 0
for column in range(img_shape[1]):
if (polar_seg_img[row,column,:] == seg_color).all():
#first enter
if not color_change:
boundary_top = row
color_change = True
break
else:
count += 1
if count == img_shape[1] and color_change:
boundary_bottom = row
break
return boundary_top, boundary_bottom
'''
divide the image into three part
'''
def divide_three_parts(polar_seg_img):
img_shape = polar_seg_img.shape
cup_color = np.array([255, 255, 0])
disc_color = np.array([255, 0, 0])
#init
cup_disc_boundary = 0
disc_other_boundary = img_shape[0]
seg_color = cup_color
#when covered color less than 1/2, then segment.
divide_criteria = 128
for row in range(img_shape[0]):
count = 0
for column in range(img_shape[1]):
if (polar_seg_img[row,column,:] == seg_color).all():
count += 1
if count < divide_criteria:
if (seg_color == cup_color).all():
cup_disc_boundary = row
seg_color = disc_color
elif (seg_color == disc_color).all():
disc_other_boundary = row
break
return cup_disc_boundary, disc_other_boundary
|
import os
import json
import pickle
import threading
from time import sleep, time
def delayed(timeout):
def __dec(func):
def __wrapper(*args, **kwargs):
sleep(timeout)
return func(*args, **kwargs)
return __wrapper
return __dec
def repeated(timeout):
def __dec(func):
def __wrapper(*args, **kwargs):
while True:
func(*args, **kwargs)
sleep(timeout)
return __wrapper
return __dec
def silent(*args, exc=Exception):
def __dec(func):
def __wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except exc as e:
return e
else:
return None
return __wrapper
if args:
func = args[0]
return __dec(func)
else:
return __dec
def memorized(*args, expires=None, file_=None):
cache = {}
if file_ and os.path.exists(file_):
with open(file_, 'rb') as f:
cache = pickle.load(f)
def __save_cache():
if file_:
with open(file_, 'wb') as f:
pickle.dump(cache, f)
def __dec(func):
def __wrapper(*args, **kwargs):
key = json.dumps((args, kwargs))
now = time()
key_cond = key not in cache
expires_cond = expires is not None and \
key in cache and \
cache[key]['time'] + expires < now
if key_cond or expires_cond:
res = func(*args, **kwargs)
cache[key] = {
'time': now,
'res': res
}
else:
cache[key]['time'] = now
res = cache[key]['res']
__save_cache()
return res
return __wrapper
if args:
func = args[0]
return __dec(func)
else:
return __dec
def async_(*args, callback=None):
def __dec(func):
def __wrapper(*args, **kwargs):
def __target():
res = func(*args, **kwargs)
if callback:
if not isinstance(res, tuple):
res = (res,)
callback(*res)
thread = threading.Thread(target=__target)
thread.start()
return __wrapper
if args:
func = args[0]
return __dec(func)
else:
return __dec
|
# Copyright 2022 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rcl_interfaces.msg import SetParametersResult
import rclpy
from rclpy.executors import ExternalShutdownException
from rclpy.node import Node
from rclpy.parameter import Parameter
# Example usage: changing param1 successfully will result in setting of param2.
# ros2 service call /set_parameters_callback/set_parameters rcl_interfaces/srv/SetParameters
# "{parameters: [{name: "param1", value: {type: 3, double_value: 1.0}}]}"
# node for demonstrating correct usage of pre_set, on_set
# and post_set parameter callbacks
class SetParametersCallback(Node):
def __init__(self):
super().__init__('set_parameters_callback')
# tracks 'param1' value
self.internal_tracked_param_1 = self.declare_parameter('param1', 0.0).value
# tracks 'param2' value
self.internal_tracked_param_2 = self.declare_parameter('param2', 0.0).value
# setting another parameter from the callback is possible
# we expect the callback to be called for param2
def pre_set_parameter_callback(parameter_list):
modified_parameters = parameter_list.copy()
for param in parameter_list:
if param.name == 'param1':
modified_parameters.append(Parameter('param2', Parameter.Type.DOUBLE, 4.0))
return modified_parameters
# validation callback
def on_set_parameter_callback(parameter_list):
result = SetParametersResult()
for param in parameter_list:
if param.name == 'param1':
result.successful = True
result.reason = 'success param1'
elif param.name == 'param2':
result.successful = True
result.reason = 'success param2'
return result
# can change internally tracked class attributes
def post_set_parameter_callback(parameter_list):
for param in parameter_list:
if param.name == 'param1':
self.internal_tracked_param_1 = param.value
elif param.name == 'param2':
self.internal_tracked_param_2 = param.value
self.add_pre_set_parameters_callback(pre_set_parameter_callback)
self.add_on_set_parameters_callback(on_set_parameter_callback)
self.add_post_set_parameters_callback(post_set_parameter_callback)
def main(args=None):
rclpy.init(args=args)
node = SetParametersCallback()
try:
rclpy.spin(node)
except (KeyboardInterrupt, ExternalShutdownException):
pass
finally:
node.destroy_node()
rclpy.try_shutdown()
if __name__ == '__main__':
main()
|
s=int(input())
for x in range(1,s+1):
if(s%x==0):
print(x,end=" ")
|
#!/usr/bin/env python3
# coding=utf-8
import pdb
# 命令行执行调试 python -m pdb my_script.py
def show_trace():
pdb.set_trace()
return "show_trace done"
def outer_func():
var_str = show_trace()
var_str += "succ"
print("var_str " + var_str)
if __name__ == "__main__":
outer_func()
|
# services/users/project/api/__init__.py
|
class Demo:
Value = 50
def __init__(self,no1,no2):
self.i= no1
self.j= no2
def fun(self):
print(self.i,self.j)
def gun(self):
print(self.i,self.j)
def main():
obj1 = Demo(11,21)
obj2 = Demo(51,101)
print("Fun:")
obj1.fun()
obj2.fun()
print("Gun:")
obj1.gun()
obj2.gun()
print("Done!")
if __name__=="__main__":
main()
print("="*30)
class Circle:
PI = 3.14;
def __init__(self,radius):
self.R = radius
def Accept(self):
print(self.R)
#print("Radius is:"self.R)
def area(self):
print(self.R**2*3.14)
def circum(self):
print(2*self.R*3.14)
@classmethod
def DisplayPI(cls):
print(cls.PI)
#obj = Circle(3)
R = int(input("Enter radius of circle: "))
obj = Circle(R)
print("Radius is:")
obj.Accept()
print("Area is:")
obj.area()
print("Circumference is: ")
obj.circum()
print("Value of PI: ")
obj.DisplayPI()
print("-----------------------------")
class Circle:
PI = 3.14;
def __init__(self,radius):
self.R = radius
def Accept(self):
print("Radius is: ",self.R)
print(self.R)
def area(self):
print("Area is: ",self.R**2*3.14)
def circum(self):
print("Circumference is: ",2*self.R*3.14)
@classmethod
def DisplayPI(cls):
print("Value of PI:",cls.PI)
def main():
Newcircle = Circle(3)
print(Newcircle.Accept())
print(Newcircle.area())
print(Newcircle.circum())
print(Newcircle.DisplayPI())
print("Done!")
if __name__=="__main__":
main()
print("="*30)
class Arithmetic:
Value = 0
def __init__(self,Value1,Value2):
self.i = Value1
self.j = Value2
def Accept(self):
print("Accepted Numbers:",self.i,self.j)
def Addition(self):
print("Addition is:",self.i + self.j)
def Subtraction(self):
print("Sub is:",self.i - self.j)
def Multiplication(self):
print("Multi is:",self.i * self.j)
def Division(self):
print("Div is:", self.i / self.j)
def main():
obj1 = Arithmetic(200,50);
obj1.Accept();
obj1.Addition();
obj1.Subtraction();
obj1.Multiplication();
obj1.Division();
if __name__=="__main__":
main() |
from mod_base import*
class Run(Command):
"""Run a command as another user (and/or on another channel). Usage: [channel] user command"""
def run(self, win, user, data, caller=None):
args = Args(data)
target_win = win
if len(args) < 2:
win.Send("provide at least nick, command")
if len(args) > 2:
if args.IsChannel(args[0]):
new_win = self.bot.GetWindow(args[0], create=False)
if new_win:
target_win = new_win
args.Drop(0) # Remove the channel argument from the argument list
else:
win.Send("Can't find that channel.")
return False
target_user = self.bot.FindUser(args[0])
self.Log("mod", "arg0:" + str(args[0]) + " user:" + str(target_user))
if not target_user:
win.Send("user not found")
return False
cmd = args[1]
arg = None
if len(args) > 2:
arg = " ".join(args[2:])
inst = self.bot.GetCommand(cmd)
if not inst:
win.Send("no such command")
return False
if inst.zone == IRC_ZONE_QUERY:
status = inst.run(target_user.GetQuery(), target_user, arg, caller=user)
else:
status = inst.run(target_win, target_user, arg, caller=user)
module = {
"class": Run,
"type": MOD_COMMAND,
"level": 5,
"zone":IRC_ZONE_BOTH
} |
""" pubsub.py -- simple Publish/Subscribe implementation """
from tl_logger import TLLog
log = TLLog.getLogger( 'pubsub' )
class PubSub(object):
""" Simple Publish/Subscribe implementation
"""
def __init__(self, name='pubsub'):
self.name = name
self._dctSubEvents = {}
self._lstAllEvents = []
def subscribe(self, eventType, cbFunc):
""" subscribe to an event """
log.debug('subscribe() %s - eventType:%s cbFunc:%s' % (self.name,eventType,cbFunc))
if eventType in self._dctSubEvents:
lst = self._dctSubEvents[eventType]
# check if callback already exists
if cbFunc in lst:
log.warn('subscribe() - eventType:%s - callback already defined' % eventType)
else:
lst.append( cbFunc )
else:
# no callbacks for this event -- add new entry to dict
lst = [cbFunc]
self._dctSubEvents[eventType] = lst
def subscribeList(self, lstEvents, cbFunc):
""" subscribe to a list of events """
for event in lstEvents:
self.subscribe(event, cbFunc)
def subscribeAll(self, cbFunc):
""" subscribe to all events """
self._lstAllEvents.append(cbFunc)
def unsubscribeAll(self, cbFunc):
""" unsubscribe to all events """
self._lstAllEvents.remove(cbFunc)
def unsubscribe(self, eventType, cbFunc):
""" unsubscribe to an event """
log.debug('unsubscribe() %s - eventType:%s cbFunc:%s' % (self.name,eventType,cbFunc))
if eventType in self._dctSubEvents:
lst = self._dctSubEvents[eventType]
if cbFunc in lst:
lst.remove(cbFunc)
else:
log.warn('unsubscribe() - eventType:%s - callback was not subscribed' % eventType)
else:
log.warn('unsubscribe() - eventType:%s - has not subscriptions' % eventType)
def unsubscribeList(self, lstEvents, cbFunc):
""" unsubscribe to an event """
for event in lstEvents:
self.unsubscribe(event, cbFunc)
def publish(self, event):
""" publist an event, call all subscribers """
log.debug('publish() %s - event:%s' % (self.name,event))
# send to all event subscribers
for cbFunc in self._lstAllEvents:
cbFunc(event)
# Use event type to send to callbacks
evtType = event.evtType
if evtType in self._dctSubEvents:
lst = self._dctSubEvents[evtType]
for cbFunc in lst:
cbFunc(event)
def __str__(self):
return '%s _dctSubEvents:%s' % (self.name,self._dctSubEvents)
if __name__ == '__main__':
class TestPubSub(object):
def cb_1(self,event):
print 'cb_1 - evt:%s' % event
def cb_2(self,event):
print 'cb_2 - evt:%s' % event
def cb_3(self,event):
print 'cb_3 - evt:%s' % event
EVT_1 = 'One'
EVT_2 = 'Two'
EVT_3 = 'Three'
EVT_4 = 'Four'
lstEvents = [EVT_1,EVT_2,EVT_3, EVT_4]
pbsb = PubSub()
obj = TestPubSub()
print pbsb
for event in lstEvents:
pbsb.publish(event)
print
pbsb.subscribe(EVT_1, obj.cb_1)
print pbsb
for event in lstEvents:
pbsb.publish(event)
print
pbsb.subscribe(EVT_2, obj.cb_2)
print pbsb
for event in lstEvents:
pbsb.publish(event)
print
pbsb.subscribeList([EVT_3,EVT_4], obj.cb_3)
print pbsb
for event in lstEvents:
pbsb.publish(event)
print
pbsb.unsubscribe(EVT_1, obj.cb_1)
print pbsb
for event in lstEvents:
pbsb.publish(event)
print
pbsb.unsubscribeList([EVT_3,EVT_4], obj.cb_3)
print pbsb
for event in lstEvents:
pbsb.publish(event)
print
|
import numpy as np
from chardet import detect
import pandas as pd
import collections
import pickle
import csv
path='C:/users/anjali/environments/acl/data/handeset.csv'
df = pd.read_csv(path)
names = np.unique(df['name'].values)
with open('C:/users/anjali/environments/acl/theo.pkl','rb') as f:
edge_list,users_list = pickle.load(f)
name_to_ind = dict(map(lambda x:(x[1], x[0]), enumerate(users_list)))
#name_to_ind = dict(map(lambda x:(x[1], x[0]), enumerate(names)))
ind_to_name = dict(map(lambda x:(x[1], x[0]), name_to_ind.items()))
def word2dict(filename):
embed =collections.defaultdict()
orig_embed = collections.defaultdict()
ind = 0
count = 0
with open(filename) as f:
lines = f.read().split('\n')
no = int(lines[0].split(' ')[0])
dim = int(lines[0].split(' ')[1])
for line in lines[1:]:
sp = line.strip().split(' ')
node = sp[0]
if node == '':
continue
#node = ind_to_name[int(node)]
vec = np.array(list(map(float, sp[1:])))
embed[node] = vec
orig_embed[node] = vec
vec = np.zeros((dim, ))
for user in names:
if user not in embed:
embed[user] = vec
return embed, orig_embed
embed,orig_embed = word2dict('C:/users/anjali/environments/node2vec/emb/finalres.emb')
res = []
data = open('C:/users/anjali/environments/acl/data/handeset.csv',encoding='utf-8')
debates = list(csv.reader(data))[1:]
for row in debates:
res.append(embed[row[14]])
res = np.array(res)
with open('theoemb.pkl','wb') as f:
pickle.dump(res,f) |
#!/bin/env python
import os, sys
fileList = open('list.txt')
fileInfo = [item for sublist in [map(lambda x:'%s%s' % (info[1],x), os.listdir(info[1])) for info in [line.split() for line in fileList.readlines() if (len(line) > 1 and line.split()[0] == sys.argv[1])]] for item in sublist]
fileList.close()
newFile = open('file.list', 'w')
newFile.write(fileInfo[int(sys.argv[2])])
newFile.close()
|
def test_plate_from_zero():
# Plate geometry and laminate data
a = 0.406
b = 0.254
E1 = 1.295e11
E2 = 9.37e9
nu12 = 0.38
G12 = 5.24e9
G13 = 5.24e9
G23 = 5.24e9
plyt = 1.9e-4
laminaprop = (E1, E2, nu12, G12, G13, G23)
angles = [0, 45, -45, 90, 90, -45, 45, 0]
# Generating Mesh
# ---
import numpy as np
from scipy.spatial import Delaunay
xs = np.linspace(0, a, 8)
ys = np.linspace(0, b, 8)
points = np.array(np.meshgrid(xs, ys)).T.reshape(-1, 2)
tri = Delaunay(points)
# Using Meshless Package
# ---
from scipy.sparse import coo_matrix
from composites.laminate import read_stack
from structsolve import solve, lb
from meshless.espim.read_mesh import read_delaunay
from meshless.espim.plate2d_calc_k0 import calc_k0
from meshless.espim.plate2d_calc_kG import calc_kG
from meshless.espim.plate2d_add_k0s import add_k0s
mesh = read_delaunay(points, tri)
nodes = np.array(list(mesh.nodes.values()))
prop_from_nodes = True
nodes_xyz = np.array([n.xyz for n in nodes])
# **Applying properties
# applying heterogeneous properties
for node in nodes:
lam = read_stack(angles, plyt=plyt, laminaprop=laminaprop)
node.prop = lam
# **Defining Boundary Conditions**
#
DOF = 5
def bc(K, mesh):
for node in nodes[nodes_xyz[:, 0] == xs.min()]:
for dof in [1, 3]:
j = dof-1
K[node.index*DOF+j, :] = 0
K[:, node.index*DOF+j] = 0
for node in nodes[(nodes_xyz[:, 1] == ys.min()) |
(nodes_xyz[:, 1] == ys.max())]:
for dof in [2, 3]:
j = dof-1
K[node.index*DOF+j, :] = 0
K[:, node.index*DOF+j] = 0
for node in nodes[nodes_xyz[:, 0] == xs.max()]:
for dof in [3]:
j = dof-1
K[node.index*DOF+j, :] = 0
K[:, node.index*DOF+j] = 0
# **Calculating Constitutive Stiffness Matrix**
k0s_method = 'cell-based'
k0 = calc_k0(mesh, prop_from_nodes)
add_k0s(k0, mesh, prop_from_nodes, k0s_method, alpha=0.2)
bc(k0, mesh)
k0 = coo_matrix(k0)
# **Defining Load and External Force Vector**
def define_loads(mesh):
loads = []
load_nodes = nodes[(nodes_xyz[:, 0] == xs.max()) &
(nodes_xyz[:, 1] != ys.min()) &
(nodes_xyz[:, 1] != ys.max())]
fx = -1. / (nodes[nodes_xyz[:, 0] == xs.max()].shape[0] - 1)
for node in load_nodes:
loads.append([node, (fx, 0, 0)])
load_nodes = nodes[(nodes_xyz[:, 0] == xs.max()) &
((nodes_xyz[:, 1] == ys.min()) |
(nodes_xyz[:, 1] == ys.max()))]
fx = -1. / (nodes[nodes_xyz[:, 0] == xs.max()].shape[0] - 1) / 2
for node in load_nodes:
loads.append([node, (fx, 0, 0)])
return loads
n = k0.shape[0] // DOF
fext = np.zeros(n*DOF, dtype=np.float64)
loads = define_loads(mesh)
for node, force_xyz in loads:
fext[node.index*DOF + 0] = force_xyz[0]
print('Checking sum of forces: %s' % str(fext.reshape(-1, DOF).sum(axis=0)))
# **Running Static Analysis**
d = solve(k0, fext, silent=True)
total_trans = (d[0::DOF]**2 + d[1::DOF]**2)**0.5
print('Max total translation', total_trans.max())
# **Calculating Geometric Stiffness Matrix**
kG = calc_kG(d, mesh, prop_from_nodes)
bc(kG, mesh)
kG = coo_matrix(kG)
# **Running Linear Buckling Analysis**
eigvals, eigvecs = lb(k0, kG, silent=True)
print('First 5 eigenvalues')
print('\n'.join(map(str, eigvals[:5])))
assert np.allclose(eigvals[:5], [
1004.29332981,
1822.11577078,
2898.3728806,
2947.17499169,
3297.54959342,
]
)
if __name__ == '__main__':
test_plate_from_zero()
|
def sieve(N):
s = [0,0,1]+[1,0]*(N/2)
i = 3
while i*i < N:
if s[i]:
for itr in xrange(i*2,N,i):
s[itr] = 0
i += 2
return [i for i in range(N) if s[i]==1]
from sys import argv
with open(argv[1], 'r') as f:
for line in f:
print ','.join(str(i) for i in sieve(int(line)))
|
'''
You're given an ancient book that unfortunately has a few pages in the wrong position,
fortunately your computer has a list of every page number in order from 1 to n.
You're supplied with an array of numbers, and should return an array with each page
number that is out of place. Incorrect page numbers will not appear next
to each other. Duplicate incorrect page numbers are possible.
Example:
Given: list = [1,2,10,3,4,5,8,6,7]
Return: [10,8]
Your returning list should have the incorrect page numbers in the order they were found.
'''
def find_page_number(pages):
output = []
tr_index = 1
for x in pages:
if x != tr_index:
output.append(x)
continue
tr_index += 1
return output
|
import pytest
# If you want to assert that some code raises an exception you can use the raises helper:
def f():
raise SystemExit(1)
def test_mytest():
with pytest.raises(SystemExit):
f()
"""
Run in "quiet" reporting mode:
$ py.test -q test_sysexit.py
.
1 passed in 0.12 seconds
"""
|
from django.conf.urls import url
from django.conf.urls import url, include
from django.urls import path
from .bars_merge.views import FindSimilarEP, FindSimilarWP, CreateCheckPoint
from .educational_program.views import DepartmentCreateAPIView, DepartmentListAPIView, DepartmentDetailsView, \
DepartmentDestroyView, DepartmentUpdateView
# Контроллеры
from .expertise.views import ExpertiseCommentCreateView, UserExpertiseCreateView, UserExpertiseListView, \
ExpertiseCommentsView, ChangeUserExpertiseView, \
ChangeExpertiseView, ExpertiseCreateView, ExpertiseWorkProgramView, ExpertiseListView, ExpertiseViewById, \
DeleteUserExpertise
from .folders_ans_statistic.views import FoldersListView, WorkProgramInFolderView, DeleteFolderView, \
CreateFolderView, EditFolderView, AddToFolderView, RemoveFromFolderView, DeleteFolderView, WorkProgramStatistic, \
AcademicPlanInFolderView, AddToFolderAcademicPlanView, RemoveFromFolderAcademicPlanView, \
ModuleInFolderView, AddToFolderModuleView, \
RemoveFromFolderModuleView, IndividualImplementationAcademicPlanInFolderView, \
AddToFolderndividualImplementationAcademicPlanView, RemoveFromFolderImplementationAcademicPlanView
from .notifications.views import NotificationListView
from .op_slection.views import CreateProfessionByKeywords
from .profession.views import ProfessionsListApi, ProfessionCreateAPIView, ProfessionDetailsView, ProfessionDestroyView, \
ProfessionUpdateView, ItemWithProfessions, ItemWithRoles, ProfessionsListWithoutPaginationApi
from .profession.views import RolesListApi, RoleCreateAPIView, RoleDetailsView, RoleDestroyView, RoleUpdateView
from .profession.views import SkillsOfProfessionInProfessionList, SkillsOfProfessionInProfessionCreateAPIView, \
SkillsOfProfessionInProfessionUpdateView, SkillsOfProfessionInProfessionDestroyView
from .profession.views import SkillsOfRoleInRoleList, SkillsOfRoleInRoleCreateAPIView, SkillsOfRoleInRoleUpdateView, \
SkillsOfRoleInRoleDestroyView
from .views import AcademicPlanCreateAPIView, AcademicPlanListAPIView, AcademicPlanDetailsView, AcademicPlanDestroyView, \
AcademicPlanUpdateView, ImplementationAcademicPlanAPIView, DisciplineBlockModuleShortListView, \
DisciplineBlockModuleDetailListView, DisciplineBlockModuleDetailListForUserView, DisciplineBlockModuleDetailView, \
DisciplinesByNumber, InsertModule
from .views import BibliographicReferenceListCreateAPIView, BibliographicReferenceDetailsView, \
BibliographicReferenceDestroyView, \
BibliographicReferenceUpdateView, WorkProgramBibliographicReferenceUpdateView, \
BibliographicReferenceInWorkProgramList, EvaluationToolInWorkProgramList, \
FileUploadWorkProgramAPIView, CompetenceCreateView, CompetencesListView, \
FileUploadWorkProgramOutcomesAPIView
from .views import CloneWorkProgramm
from .views import EvaluationToolListAPI, EvaluationToolDetailAPI, DisciplineSectionListAPI, DisciplineSectionDetailAPI, \
TopicsListAPI, TopicDetailAPI, NewOrdinalNumbersForDesciplineSectionAPI
from .views import FieldOfStudyDetailUpdateDeleteView, FieldOfStudyListCreateView
from .views import ImplementationAcademicPlanDetailsView, ImplementationAcademicPlanDestroyView, \
ImplementationAcademicPlanUpdateView, ImplementationAcademicPlanListAPIView
from .views import IndicatorCreateAPIView, IndicatorListAPIView, IndicatorDetailsView, IndicatorDestroyView, \
IndicatorUpdateView
from .views import NewOrdinalNumbersForTopicAPI, TopicCreateAPI
from .views import OutcomesOfWorkProgramDestroyView, OutcomesOfWorkProgramCreateAPIView, OutcomesOfWorkProgramUpdateView
from .views import PrerequisitesOfWorkProgramDestroyView, PrerequisitesOfWorkProgramCreateAPIView, \
PrerequisitesOfWorkProgramUpdateView, PrerequisitesOfWorkProgramList
from .views import CompetenceListView, CompetenceUpdateView, CompetenceIndicatorDetailView, DeleteIndicatorFromCompetenceView, \
AddIndicatorToCompetenceView, OutcomesOfWorkProgramList
from .views import WorkProgramChangeInDisciplineBlockModuleCreateAPIView, \
WorkProgramChangeInDisciplineBlockModuleListAPIView, WorkProgramChangeInDisciplineBlockModuleDetailsView, \
WorkProgramChangeInDisciplineBlockModuleDestroyView, WorkProgramChangeInDisciplineBlockModuleUpdateView, \
DisciplineBlockModuleCreateAPIView, DisciplineBlockModuleDestroyView, DisciplineBlockModuleUpdateView, \
FileUploadAPIView, WorkProgramInFieldOfStudyListView, FieldOfStudiesForWorkProgramList, \
WorkProgramInFieldOfStudyListAPI, WorkProgramInFieldOfStudyDetailAPI, \
ZunListAPI, ZunDetailAPI, OutcomesForWorkProgramChangeBlock, WorkProgramDetailsWithDisciplineCodeView, \
AcademicPlanListShortAPIView, \
NewRealtionsForWorkProgramsInFieldOfStudyAPI, WorkProgramsWithOutcomesToPrerequisitesForThisWPView, \
WorkProgramsWithPrerequisitesToOutocomesForThisWPView, WorkProgramsWithOutocomesForThisWPView
from .views import WorkProgramCreateAPIView, WorkProgramDetailsView, WorkProgramDestroyView, WorkProgramUpdateView
from .views import EvaluationToolListAPI, EvaluationToolDetailAPI, DisciplineSectionListAPI, DisciplineSectionDetailAPI, TopicsListAPI, TopicDetailAPI, NewOrdinalNumbersForDesciplineSectionAPI
from .views import OutcomesOfWorkProgramDestroyView, OutcomesOfWorkProgramCreateAPIView, OutcomesOfWorkProgramUpdateView
from .views import PrerequisitesOfWorkProgramDestroyView, PrerequisitesOfWorkProgramCreateAPIView, PrerequisitesOfWorkProgramUpdateView, PrerequisitesOfWorkProgramList
from .views import FieldOfStudyDetailUpdateDeleteView, FieldOfStudyListCreateView
from .views import NewOrdinalNumbersForTopicAPI, TopicCreateAPI
from .views import BibliographicReferenceListCreateAPIView, BibliographicReferenceDetailsView, BibliographicReferenceDestroyView, \
BibliographicReferenceUpdateView, WorkProgramBibliographicReferenceUpdateView, BibliographicReferenceInWorkProgramList, EvaluationToolInWorkProgramList, \
FileUploadWorkProgramAPIView, CompetenceCreateView, CompetencesListView, FileUploadWorkProgramOutcomesAPIView
from .views import IndicatorCreateAPIView, IndicatorListAPIView, IndicatorDetailsView, IndicatorDestroyView, IndicatorUpdateView
from .views import ImplementationAcademicPlanAPIView, ImplementationAcademicPlanDetailsView, ImplementationAcademicPlanDestroyView, ImplementationAcademicPlanUpdateView, ImplementationAcademicPlanListAPIView
from .views import AcademicPlanCreateAPIView, AcademicPlanListAPIView, AcademicPlanDetailsView, AcademicPlanDestroyView, AcademicPlanUpdateView, ImplementationAcademicPlanAPIView
from .views import WorkProgramChangeInDisciplineBlockModuleCreateAPIView, WorkProgramChangeInDisciplineBlockModuleListAPIView, WorkProgramChangeInDisciplineBlockModuleDetailsView,\
WorkProgramChangeInDisciplineBlockModuleDestroyView, WorkProgramChangeInDisciplineBlockModuleUpdateView, DisciplineBlockModuleCreateAPIView, DisciplineBlockModuleDestroyView, DisciplineBlockModuleUpdateView,\
FileUploadAPIView, WorkProgramInFieldOfStudyListView, FieldOfStudiesForWorkProgramList, WorkProgramInFieldOfStudyListAPI, WorkProgramInFieldOfStudyDetailAPI, \
ZunListAPI, ZunDetailAPI, OutcomesForWorkProgramChangeBlock, WorkProgramDetailsWithDisciplineCodeView, AcademicPlanListShortAPIView, \
NewRealtionsForWorkProgramsInFieldOfStudyAPI, WorkProgramsWithOutcomesToPrerequisitesForThisWPView, WorkProgramsWithPrerequisitesToOutocomesForThisWPView, WorkProgramsWithOutocomesForThisWPView
from .files_export.views import DocxFileExportView, SyllabusExportView
from .views import CloneWorkProgramm
from .views import WorkProgramsListApi, UserGroups
from .views import СertificationEvaluationToolListAPI, СertificationEvaluationToolDetailAPI
from .views import WorkProgramFullDetailsWithDisciplineCodeView, ZunManyViewSet, WorkProgramInFieldOfStudyForWorkProgramList
# DocxFileExportOldView
from .workprogram_additions.views import CopyContentOfWorkProgram
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'api/zun/many_create',
ZunManyViewSet, basename='zun_many_create')
urlpatterns = [
# Старые урлы приложения удалены
# Блок реализации API
path('api/workprograms/', WorkProgramsListApi.as_view()),
# Компетенции индикаторы
# path('api/indicator/', IndicatorListView.as_view(), name='indicator'),
# path('api/indicator/<int:pk>', IndicatorUpdateView.as_view(), name='indicator_update'),
path('api/competences', CompetencesListView.as_view(), name='comptence'),
path('api/competence/create', CompetenceCreateView.as_view(), name='comptence'),
path('api/competence/', CompetenceListView.as_view(), name='comptence'),
path('api/competence/<int:pk>', CompetenceUpdateView.as_view(), name='comptence_update'),
path('api/competenceindicator/<int:pk>', CompetenceIndicatorDetailView.as_view(), name='comptenceindicator'),
path('api/competenceindicator/indicator/delete', DeleteIndicatorFromCompetenceView.as_view(),
name='DeleteIndicatorFromCompetenceView'),
path('api/competenceindicator/indicator/add', AddIndicatorToCompetenceView.as_view(),
name="AddIndicatorFromCompetenceView"),
# path('api/outcomesofworkprogram/<int:workprogram_id>', IndicatorForCompetence.as_view()),
path('api/indicator', IndicatorListAPIView.as_view()),
path('api/indicator/create', IndicatorCreateAPIView.as_view()),
path('api/indicator/detail/<int:pk>', IndicatorDetailsView.as_view()),
path('api/indicator/delete/<int:pk>', IndicatorDestroyView.as_view()),
path('api/indicator/update/<int:pk>', IndicatorUpdateView.as_view()),
path('api/outcomesofworkprogram/<int:workprogram_id>', OutcomesOfWorkProgramList.as_view()),
# Рабочая программа
path('api/workprogram/create', WorkProgramCreateAPIView.as_view()),
url(r'^api/workprogram/outcomes/prerequisites/relations/(?P<discipline_code>[0-9.]+)/$',
WorkProgramsWithOutcomesToPrerequisitesForThisWPView.as_view()),
url(r'^api/workprogram/prerequisites/outcomes/relations/(?P<discipline_code>[0-9.]+)/$',
WorkProgramsWithPrerequisitesToOutocomesForThisWPView.as_view()),
url(r'^api/workprogram/outcomes/relations/(?P<discipline_code>[0-9.]+)/$',
WorkProgramsWithOutocomesForThisWPView.as_view()),
path('api/workprogram/detail/<int:pk>', WorkProgramDetailsView.as_view()),
path('api/workprogram/delete/<int:pk>', WorkProgramDestroyView.as_view()),
path('api/workprogram/update/<int:pk>', WorkProgramUpdateView.as_view()),
path('api/workprogram/br/update/<int:pk>', WorkProgramBibliographicReferenceUpdateView.as_view()),
path('api/workprogram/clone', CloneWorkProgramm),
path('api/workprogram/merge_content', CopyContentOfWorkProgram),
path('api/workprogramsinfieldofstudy', WorkProgramInFieldOfStudyListView.as_view()),
path('api/workprogram/change_relations', NewRealtionsForWorkProgramsInFieldOfStudyAPI),
# path('api/workprogram/itemrelations/<char:discipline_code>', WorkProgramDetailsWithDisciplineCodeView.as_view()),
url(r'^api/workprogram/itemrelations/(?P<discipline_code>[0-9.]+)/$',
WorkProgramDetailsWithDisciplineCodeView.as_view()),
url(r'^api/workprogram/fullitemrelations/(?P<discipline_code>[0-9.]+)/$',
WorkProgramFullDetailsWithDisciplineCodeView.as_view()),
path('api/workprogram/getbynumbers', DisciplinesByNumber),
path('api/workprogram/fieldofstudies/<int:workprogram_id>', FieldOfStudiesForWorkProgramList.as_view()),
path('api/workprogram/fieldofstudies_for_competences/<int:workprogram_id>', WorkProgramInFieldOfStudyForWorkProgramList.as_view()),
path('api/workprograminfieldofstudy/', WorkProgramInFieldOfStudyListAPI.as_view()),
path('api/workprograminfieldofstudy/<int:pk>', WorkProgramInFieldOfStudyDetailAPI.as_view()),
path('api/zun/', ZunListAPI.as_view()),
path('api/zun/delete/competence/<int:competences_id>/wp_in_fs/<int:wp_in_fs_id>', ZunDetailAPI.as_view()),
# Работы с темами и разделами
path('api/tools/', EvaluationToolListAPI.as_view(), name='tools'),
path('api/tools/<int:pk>', EvaluationToolDetailAPI.as_view(), name='tool_detail'),
path('api/toolsinworkprogram/<int:workprogram_id>', EvaluationToolInWorkProgramList.as_view()),
path('api/sections/', DisciplineSectionListAPI.as_view(), name='sections'),
path('api/sections/<int:pk>', DisciplineSectionDetailAPI.as_view(), name='section_detail'),
# path('api/sections/NewOrdinalNumbers', NewOrdinalNumbersForDesciplineSectionAPI.as_view()),
path('api/sections/NewOrdinalNumbers', NewOrdinalNumbersForDesciplineSectionAPI),
path('api/topics/', TopicsListAPI.as_view(), name='topics'),
path('api/topics/create', TopicCreateAPI.as_view()),
path('api/topics/<int:pk>', TopicDetailAPI.as_view(), name='topic_detail'),
path('api/topics/NewOrdinalNumbers', NewOrdinalNumbersForTopicAPI),
# Работа с результатами
path('api/outcomesofworkprogram/<int:workprogram_id>', OutcomesOfWorkProgramList.as_view()),
path('api/outcomesofworkprogram/create', OutcomesOfWorkProgramCreateAPIView.as_view()),
path('api/outcomesofworkprogram/delete/<int:pk>', OutcomesOfWorkProgramDestroyView.as_view()),
path('api/outcomesofworkprogram/update/<int:pk>', OutcomesOfWorkProgramUpdateView.as_view()),
path('api/outcomesofworkprogramforacademycplan/<int:workprogram_id>', OutcomesForWorkProgramChangeBlock.as_view()),
# Работа с пререквизитами
path('api/prerequisitesofworkprogram/<int:workprogram_id>', PrerequisitesOfWorkProgramList.as_view()),
path('api/prerequisitesofworkprogram/create', PrerequisitesOfWorkProgramCreateAPIView.as_view()),
path('api/prerequisitesofworkprogram/delete/<int:pk>', PrerequisitesOfWorkProgramDestroyView.as_view()),
path('api/prerequisitesofworkprogram/update/<int:pk>', PrerequisitesOfWorkProgramUpdateView.as_view()),
# Работа с образовательными программами
path('api/fieldofstudy/', FieldOfStudyListCreateView.as_view()),
path('api/fieldofstudy/<int:pk>', FieldOfStudyDetailUpdateDeleteView.as_view()),
# Библиографическая ссылка
path('api/BibliographicReference', BibliographicReferenceListCreateAPIView.as_view()),
path('api/BibliographicReference/create', BibliographicReferenceListCreateAPIView.as_view()),
path('api/BibliographicReference/detail/<int:pk>', BibliographicReferenceDetailsView.as_view()),
path('api/BibliographicReference/delete/<int:pk>', BibliographicReferenceDestroyView.as_view()),
path('api/BibliographicReference/update/<int:pk>', BibliographicReferenceUpdateView.as_view()),
path('api/bibliographicreferenceinworkprogram/<int:workprogram_id>',
BibliographicReferenceInWorkProgramList.as_view()),
# Работа с файлами (загрузка/экспорт)
path('api/upload/wp', FileUploadWorkProgramAPIView.as_view()),
path('api/upload/wpwithoutcomes', FileUploadWorkProgramOutcomesAPIView.as_view()),
path('api/upload/csv', FileUploadAPIView.as_view()),
path('api/export/docx', DocxFileExportView.as_view()),
# path('api/export/docx2', DocxFileExportOldView.as_view()),
path('api/export/docx/<int:pk>/<int:fs_id>/<int:ap_id>/<int:year>', DocxFileExportView.as_view()),
path('api/export/syllabus/<int:pk>/<int:fs_id>/<int:ap_id>/<int:year>', SyllabusExportView.as_view()),
# Учебный планы
path('api/academicplan', AcademicPlanListAPIView.as_view()),
path('api/academicplan/short', AcademicPlanListShortAPIView.as_view()),
path('api/academicplan/create', AcademicPlanCreateAPIView.as_view()),
path('api/academicplan/detail/<int:pk>', AcademicPlanDetailsView.as_view()),
path('api/academicplan/delete/<int:pk>', AcademicPlanDestroyView.as_view()),
path('api/academicplan/update/<int:pk>', AcademicPlanUpdateView.as_view()),
# Учебные планы и направления
path('api/implementationacademicplan', ImplementationAcademicPlanListAPIView.as_view()),
path('api/implementationacademicplan/create', ImplementationAcademicPlanAPIView.as_view()),
path('api/implementationacademicplan/detail/<int:pk>', ImplementationAcademicPlanDetailsView.as_view()),
path('api/implementationacademicplan/delete/<int:pk>', ImplementationAcademicPlanDestroyView.as_view()),
path('api/implementationacademicplan/update/<int:pk>', ImplementationAcademicPlanUpdateView.as_view()),
path('api/academicplan/implemention', ImplementationAcademicPlanAPIView.as_view()),
# РПД в учебных планах
path('api/workprogramchangeindisciplineblockmodule', WorkProgramChangeInDisciplineBlockModuleListAPIView.as_view()),
path('api/workprogramchangeindisciplineblockmodule/create',
WorkProgramChangeInDisciplineBlockModuleCreateAPIView.as_view()),
path('api/workprogramchangeindisciplineblockmodule/detail/<int:pk>',
WorkProgramChangeInDisciplineBlockModuleDetailsView.as_view()),
path('api/workprogramchangeindisciplineblockmodule/delete/<int:pk>',
WorkProgramChangeInDisciplineBlockModuleDestroyView.as_view()),
path('api/workprogramchangeindisciplineblockmodule/update/<int:pk>',
WorkProgramChangeInDisciplineBlockModuleUpdateView.as_view()),
# Работа с модулями в учебном плане
path('api/disciplineblockmodule/create', DisciplineBlockModuleCreateAPIView.as_view()),
path('api/disciplineblockmodule/delete/<int:pk>', DisciplineBlockModuleDestroyView.as_view()),
path('api/disciplineblockmodule/update/<int:pk>', DisciplineBlockModuleUpdateView.as_view()),
path('api/disciplineblockmodule/short', DisciplineBlockModuleShortListView.as_view()),
path('api/disciplineblockmodule/detail/list', DisciplineBlockModuleDetailListView.as_view()),
path('api/disciplineblockmodule/detail/list/for_this_user', DisciplineBlockModuleDetailListForUserView.as_view()),
path('api/disciplineblockmodule/detail/<int:pk>', DisciplineBlockModuleDetailView.as_view()),
path('api/disciplineblockmodule/insert', InsertModule),
# Работа с образовательными программами
# --Факультет
path('api/Department', DepartmentListAPIView.as_view()),
path('api/Department/create', DepartmentCreateAPIView.as_view()),
path('api/Department/detail/<int:pk>', DepartmentDetailsView.as_view()),
path('api/Department/delete/<int:pk>', DepartmentDestroyView.as_view()),
path('api/Department/update/<int:pk>', DepartmentUpdateView.as_view()),
# --Экспертизы
path('api/expertise/user', UserExpertiseListView.as_view()),
path('api/expertise/user_with_expertise/<int:pk>', UserExpertiseListView.as_view()),
path('api/expertise/user/create', UserExpertiseCreateView.as_view()),
path('api/expertise/user/delete/<int:pk>', DeleteUserExpertise.as_view()),
path('api/expertise/comments/<int:pk>', ExpertiseCommentsView.as_view()),
path('api/expertise/comments/create', ExpertiseCommentCreateView.as_view()),
path('api/expertise/create', ExpertiseCreateView.as_view()),
path('api/expertise', ExpertiseListView.as_view()),
path('api/expertise/work_program/<int:pk>', ExpertiseWorkProgramView.as_view()),
path('api/expertise/<int:pk>', ExpertiseViewById.as_view()),
path('api/expertise/user/update/<int:pk>', ChangeUserExpertiseView.as_view()),
path('api/expertise/update/<int:pk>', ChangeExpertiseView.as_view()),
# Работа с профессиями
path('api/professions/', ProfessionsListApi.as_view()),
path('api/professions/without_pagination', ProfessionsListWithoutPaginationApi.as_view()),
path('api/profession/create', ProfessionCreateAPIView.as_view()),
path('api/profession/detail/<int:pk>', ProfessionDetailsView.as_view()),
path('api/profession/delete/<int:pk>', ProfessionDestroyView.as_view()),
path('api/profession/update/<int:pk>', ProfessionUpdateView.as_view()),
# path('api/itp', ItemWithProfessions.as_view()),
path('api/profession/create/bykeywords', CreateProfessionByKeywords),
# Работа с навыками прфоессий
path('api/skillsofprofessioninprofession/<int:profession_id>', SkillsOfProfessionInProfessionList.as_view()),
path('api/skillsofprofessioninprofession/create', SkillsOfProfessionInProfessionCreateAPIView.as_view()),
path('api/skillsofprofessioninprofession/delete/<int:pk>', SkillsOfProfessionInProfessionDestroyView.as_view()),
path('api/skillsofprofessioninprofession/update/<int:pk>', SkillsOfProfessionInProfessionUpdateView.as_view()),
path('api/skillsofprofessioningroups', ItemWithProfessions.as_view()),
# Работа с профессиями
path('api/roles/', RolesListApi.as_view()),
path('api/role/create', RoleCreateAPIView.as_view()),
path('api/role/detail/<int:pk>', RoleDetailsView.as_view()),
path('api/role/delete/<int:pk>', RoleDestroyView.as_view()),
path('api/role/update/<int:pk>', RoleUpdateView.as_view()),
# Работа с навыками прфоессий
path('api/skillsofroleinrole/<int:role_id>', SkillsOfRoleInRoleList.as_view()),
path('api/skillsofroleinrole/create', SkillsOfRoleInRoleCreateAPIView.as_view()),
path('api/skillsofroleinrole/delete/<int:pk>', SkillsOfRoleInRoleDestroyView.as_view()),
path('api/skillsofroleinrole/update/<int:pk>', SkillsOfRoleInRoleUpdateView.as_view()),
path('api/skillsofroleingroups', ItemWithRoles.as_view()),
# Информация о пользователе
path('api/user/groups', UserGroups),
# Папки и рейтинги
path('api/folders', FoldersListView.as_view()),
path('api/folders/create', CreateFolderView.as_view()),
path('api/folders/edit/<int:pk>', EditFolderView.as_view()),
path('api/folders/delete/<int:pk>', DeleteFolderView.as_view()),
path('api/folders/work_program/content/<int:pk>', WorkProgramInFolderView.as_view()),
path('api/folders/work_program/add', AddToFolderView.as_view()),
path('api/folders/work_program/remove/<int:pk>', RemoveFromFolderView.as_view()),
# --Папки для УП
path('api/folders/academic_plan/content/<int:pk>', AcademicPlanInFolderView.as_view()),
path('api/folders/academic_plan/add', AddToFolderAcademicPlanView.as_view()),
path('api/folders/academic_plan/remove/<int:pk>', RemoveFromFolderAcademicPlanView.as_view()),
# --Папки для Модулей
path('api/folders/block_module/content/<int:pk>', ModuleInFolderView.as_view()),
path('api/folders/block_module/add', AddToFolderModuleView.as_view()),
path('api/folders/block_module/remove/<int:pk>', RemoveFromFolderModuleView.as_view()),
# --Папки для Траекторий
path('api/folders/individual_path/content/<int:pk>', IndividualImplementationAcademicPlanInFolderView.as_view()),
path('api/folders/individual_path/add', AddToFolderndividualImplementationAcademicPlanView.as_view()),
path('api/folders/individual_path/remove/<int:pk>', RemoveFromFolderImplementationAcademicPlanView.as_view()),
# --прочее
path('api/workprogram/statistic/<int:pk>', WorkProgramStatistic),
path('api/folders/real_remove/<int:pk>', DeleteFolderView.as_view()),
# Аттестационные оценочные средства
path('api/certification_tools/', СertificationEvaluationToolListAPI.as_view()),
path('api/certification_tools/<int:pk>', СertificationEvaluationToolDetailAPI.as_view()),
url(r'^', include('workprogramsapp.educational_program.urls')),
url(r'^', include('workprogramsapp.educational_program.urls')),
url(r'^', include('workprogramsapp.workprogram_additions.urls')),
url(r'^', include('workprogramsapp.bars_merge.urls')),
url(r'^', include('workprogramsapp.individualization.urls')),
url(r'^', include('workprogramsapp.isu_merge.urls')),
url(r'^', include('workprogramsapp.statistic.urls')),
url(r'^', include('workprogramsapp.notifications.urls')),
url(r'^', include('workprogramsapp.feedback.urls')),
url(r'^', include('workprogramsapp.user_management.urls')),
url(r'^', include(router.urls)),
]
|
import numpy as np
from matplotlib import pyplot as plt
## Parameters
#Load Global papas
# Generate time domain channel H_G and H_r
from Global_paras import *
# Distance between BS and the center of user areas /m
# the center of the user areas coordinate
Lroom = 200
Wroom= 30
User_position = np.zeros((Num_User,2))
pt = np.array([0.0,0.0])
k1 = np.array([1.0,0])
k2 = np.array([0,1.0])
# the range of the user areas /m
R = 10
##
# Generate the location of users coordinate
for index in range(Num_User):
pt = np.array([0.0,0.0])
r = np.random.random_sample()
theta = np.random.random_sample()*2*np.math.pi
px = r * np.cos(theta)
py = r * np.sin(theta)
pt = np.array([Lroom,Wroom]) + px*k1 + py*k2
User_position[index,:] = pt
np.save('./Data/Location/User_position.npy',User_position)
###End of the coordinate generation
# print(Pt)
# plt.plot(Pt[:,0],Pt[:,1],'ro')
# plt.xlim([Lroom-R,Lroom+R])
# plt.ylim([Wroom-R,Wroom+R])
# plt.show()
## generate path loss
# calculate distance
#BS and IRS location
BS_positon = np.array([0.0,0.0])
IRS_positon = np.array([200.0,0.0])
#the distance between the BS and the IRS
BS_IRS_Dis = np.sqrt(np.sum(np.power(\
np.abs(BS_positon-IRS_positon),2)))
# the loss of the BS-to-IRS path alpha_l
BS_IRS_Loss = 35.6 +22*np.log10(BS_IRS_Dis)
IRS_User_Loss = np.zeros((Num_User,1))
for index in range(Num_User):
temp = User_position[index,:] - IRS_positon
dis_UI = np.sqrt(np.sum(np.power( np.abs(temp),2)))
IRS_User_Loss[index,:] = 35.6 +22*np.log10(dis_UI)
# the loss of the IRS-to-User path \beta_l
##
IRS_User_Loss = IRS_User_Loss+BS_IRS_Loss # sum loss of BS-IRS-USer link
# direct link loss
BS_User_Loss = np.zeros((Num_User,1))
for index in range(Num_User):
temp = User_position[index,:]-BS_positon
dis_BU = np.sqrt(np.sum(np.power( np.abs(temp),2)))
BS_User_Loss[index,:] = 35.6 +22*np.log10(dis_BU)
BS_User_Loss = BS_User_Loss+BS_IRS_Loss
IRS_User_Loss = IRS_User_Loss - BS_User_Loss
## generate the channel
## Rician loss
Noise = -170 + 10*np.log10(180*1000)
Noise = float(Noise)
# path_ direct
#(Num_user,1)
BS_User_Noise = 10**((-(BS_User_Loss+Noise))/10)
#path_Irs
IRS_User_Noise = 10**((-Noise-IRS_User_Loss)/10)
pd = np.sqrt(BS_User_Noise)
#(Num_user,Num_Ant_BS)
pd = np.tile(pd,[1,Num_Ant_BS])
ps = np.sqrt(IRS_User_Noise)
ps = np.tile(ps,[1,Num_Ant_IRS])
# initial theta_init and channel Hd
Hd_w = np.zeros((Num_Data,Num_User,Num_Ant_BS))
theta_init = np.zeros((Num_Data,Num_Ant_IRS,1))
for index in range(Num_Data):
Hd_temp = np.sqrt(1/2)* np.random.randn(Num_User,Num_Ant_BS)+\
1j*np.random.randn(Num_User,Num_Ant_BS)
Hd_w[index,:,:]= Hd_temp*pd
theta_temp = np.exp(np.random.rand(Num_Ant_IRS,1)*2*np.math.pi)
theta_init[index,:,:] = theta_temp
#print('log shape is:',theta_init.shape,Hd_w.shape)
eb = 10
eb2 = 1/(1+eb)
eb1 = np.sqrt(1-eb2)
eb2 = np.sqrt(eb2)
AoD_BS = 0.45
AoA_IRS = 0.45
AoD_IRS = np.random.random((Num_User,1))
G_sig = np.zeros((Num_Data,Num_Ant_IRS,Num_Ant_BS),np.complex)
Hr_sig = np.zeros((Num_Data,Num_User,Num_Ant_IRS),np.complex)
for index in range(Num_Data):
G_sig[index,:,:] = np.sqrt(0.5)*(np.random.randn(\
Num_Ant_IRS,Num_Ant_BS)+ 1j*np.random.randn(Num_Ant_IRS,Num_Ant_BS))
Hr_sig[index,:,:] = np.sqrt(0.5)*(np.random.randn(\
Num_User,Num_Ant_IRS)+ 1j*np.random.randn(Num_User,Num_Ant_IRS))
print('end')
### ULA
def channelresponse_ULA(\
azimuth_angle,NumofAntanna):
steering = np.zeros(int(NumofAntanna),dtype = complex)
for index_m in range(int(np.sqrt(NumofAntanna))):
steering[index_m] = np.exp(1j*np.pi*\
(index_m*np.sin(azimuth_angle)))
steering = steering / (np.sqrt(NumofAntanna))
return steering
### UPA
def channelresponse_UPA(\
azimuth_angle,elevater_angel,NumofAntanna):
steering = np.zeros(int(NumofAntanna),dtype = complex)
for index_m in range(int(np.sqrt(NumofAntanna))):
for index_n in range(int(np.sqrt(NumofAntanna))):
steering[index_m+index_n] = np.exp(1j*np.pi*\
(index_m*np.sin(azimuth_angle)*\
np.sin(elevater_angel) + \
index_n*np.cos(elevater_angel)))
steering = steering / (np.sqrt(NumofAntanna))
return steering
At_G = channelresponse_ULA(AoD_BS,Num_Ant_BS) # shape is (Num_User,)
Ar_IRS = channelresponse_ULA(AoA_IRS,Num_Ant_IRS)
H_G = np.matmul(np.expand_dims(Ar_IRS,-1),np.expand_dims(At_G,0))
H_G = eb1*H_G + eb2*G_sig
At_IRS = np.zeros((Num_User,Num_Ant_IRS),dtype = complex)
for index in range(Num_User):
At_IRS[index,:] = channelresponse_ULA(AoD_IRS[index],Num_Ant_IRS)
H_r = eb1*At_IRS + eb2*Hr_sig # shape is (Num_Data,Num_User,Num_Ant_IRS)
print('end')
## up-link Phi initialization
Phi_up_init = channelresponse_UPA(AoD_BS,0,Num_Ant_IRS)
np.save('./Data/Channel/Channel_G.npy', H_G)
np.save('./Data/Channel/Channel_Hr.npy',H_r)
np.save('./Data/Channel/Channel_Hd.npy',Hd_w)
np.save('./Data/Channel/Phase_uplink_init.npy',Phi_up_init)
print('shape check',H_G.shape,H_r.shape,Hd_w.shape,Phi_up_init.shape)
|
from django.conf.urls import url
from . import views
urlpatterns=[
url(r'^$',views.index),
url(r'^gold_here$',views.gold_here),
url(r'^reset$',views.reset)
] |
# -*- coding: utf-8 -*-
"""
处理数据的导出
[初始化数据分片]
1 启动事务
2 创建临时表,只有主表的主键
3 记录相关表的 modify_time
4 分批导出 主表的主键, 按page_size 分好,持久化
5 放弃事务
[导出数据]
1 启动只读事务
2 创建临时表
3 关联主表与临时表
4 关联从表与临时表
5 保存数据,(标记为完成)
6 放弃事务
"""
class DBSyncTaskBase(object):
"""
数据库的同步,分为若干细小的任务,
根据调度程序,分批次执行。
执行上下文包括: 数据库连接, 数据持久化层
"""
def __init__(self, tbl_mgr, rel_mgr, conn):
self._table_mgr = tbl_mgr
self._rel_mgr = rel_mgr
self._conn = conn
class DBSyncTaskInitDataBatch(DBSyncTaskBase):
"""
负责 [初始化数据分片]
"""
def process(self, storage):
"""
storage: 负责存储的接口
1 读取主表的主键
"""
main_tbl, sub_main_tbls = self._rel_mgr.get_main_tables()
print self._rel_mgr.get_main_table_primary()
print storage
class DBSyncTaskSyncDataPackage(DBSyncTaskBase):
"""
针对一个 具体的数据分区, 填充数据
"""
class DBSyncTaskSyncDictData(DBSyncTaskBase):
"""
- 同步字典表的数据
- 创建特定关键词的索引
"""
def process(self, storage):
"""
1 读取系统中得全部字典表。 TODO: 指定具体的字典表
2 动态加载对应的 Schema 定义, 读取其中的表结构信息
3
"""
pass
# end of file |
import Tkinter
from Tkinter import *
root = Tk()
root.title('A Tk Application')
Label(text='I am a label').pack(pady=15)
root.mainloop()
|
from typing import List
from torch.nn import Parameter
import torch
from torch import nn
class AntisymmetricRNNCell(torch.jit.ScriptModule):
def __init__(self, input_dim, hidden_size, eps, gamma, init_W_std=1, bias = True):
super(AntisymmetricRNNCell, self).__init__()
#init Vh
normal_sampler_V = torch.distributions.Normal(torch.Tensor([0]), torch.Tensor([1/input_dim]))
self.Vh_weight = nn.Parameter(normal_sampler_V.sample((hidden_size, input_dim))[..., 0])
self.bias = bias
#init W
normal_sampler_W = torch.distributions.Normal(torch.Tensor([0]), torch.Tensor([init_W_std/hidden_size]))
self.W = nn.Parameter(normal_sampler_W.sample((hidden_size, hidden_size))[..., 0])
#init biases
self.Vh_b_i = nn.Parameter(torch.zeros(hidden_size))
self.Vh_b_h = nn.Parameter(torch.zeros(hidden_size))
#init diffusion
self.gamma_I = nn.Parameter(torch.eye(hidden_size, hidden_size)*gamma, requires_grad=False)
self.eps = nn.Parameter(torch.Tensor([eps]), requires_grad=False)
@torch.jit.script_method
def forward(self, x, h):
# (W - WT - gammaI)h
WmWT_h = torch.matmul(h, (self.W - self.W.transpose(1, 0) - self.gamma_I)).squeeze()
# Vhx + bh
Vh_x = torch.matmul(self.Vh_weight, x.t()).t() + self.Vh_b_i + self.Vh_b_h
# (W - WT - gammaI)h + Vhx + bh
linear_transform = WmWT_h + Vh_x
# tanh((W - WT - gammaI)h + Vhx + bh)
f = torch.tanh(linear_transform)
#eq. 12
h = h + self.eps*f
return h
class AntisymmetricRNN(torch.jit.ScriptModule):
def __init__(self, input_size, hidden_size=32, eps=0.01, gamma=0.01, use_gating=False, init_W_std=1,
is_cuda=True, batch_first = True, num_layers = 1, bias = True):
super(AntisymmetricRNN, self).__init__()
if use_gating:
self.cell = AntisymmetricGatingRNNCell(input_size, n_units, eps, gamma, init_W_std)
else:
self.cell = AntisymmetricRNNCell(input_size, hidden_size, eps, gamma, init_W_std)
self._all_weights = [['weight_h', 'weight_x', 'bias_h', 'bias_x','gamma_I', 'eps']]
for name, param in zip(self._all_weights[0], self.parameters()):
setattr(self, name, param)
self.hidden_size = hidden_size
self.bias = bias
self.batch_first = batch_first
@torch.jit.script_method
def forward(self, x, h):
#T = x.shape[1]
if self.batch_first:
x_ = x.transpose(0,1).unbind(0)
else:
x_ = x.unbind(0)
outputs = torch.jit.annotate(List[Tensor], [])
#outputs = []
for t in range(len(x_)):
h = self.cell(x_[t], h)
outputs += [h.squeeze()]
out = torch.stack(outputs)
if self.batch_first:
out = out.transpose(0,1)
return out, h
@property
def all_weights(self) -> List[Parameter]:
return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
|
import pygame
from pygame import *
from pygame.locals import QUIT, KEYDOWN, MOUSEMOTION
import time
from random import choice, randint
import numpy
class Wall(object):
is_blockable = True
def __init__(self):
pass
# def generate_rectangles(player, minRec, maxRec, grid, gridx, gridy, maxwidth, maxheight):
# total_rec = randint(minRec, maxRec)
# for i in range(total_rec):
# if i == 0: #make sure a 3x3 is always around the player.
# clearRectangle(grid, gridx, gridy, player.xpos-1, player.ypos+2, 3, 3)
# width = randint(int(maxwidth/float(2)), maxwidth)
# height = randint(int(maxheight/float(2)), maxheight)
# randx = randint(1,gridx-1)
# randy = randint(1, gridy-1) #random coordinate
# clearRectangle(grid, gridx, gridy, randx, randy, width, height)
class Player(object):
def __init__(self,xpos,ypos, hasKey = False):
self.xpos = xpos
self.ypos = ypos
self.history = (xpos+1,ypos+1, xpos+1, ypos+1)
self.hasKey = hasKey
class Monster(object):
def __init__(self, XorY=0, xposition=60, yposition=60):
self.xpos = xposition
self.ypos = yposition
self.history = (xposition+1, yposition+1, xposition+1, yposition+1)
self.XorY = XorY
class MonsterPack(object):
def __init__(self,player,grid,monsters): #monsters is list of monsters
self.player = player
self.grid = grid
self.monsters = monsters
self.coordinates = [(monster.xpos,monster.ypos) for monster in self.monsters]
def move(self, grid, speed=1):
for monster in self.monsters:
monster.history = (monster.xpos, monster.ypos, monster.history[0], monster.history[1])
change_in_position = 0
if monster.XorY==0:
if monster.xpos > self.player.xpos and self.grid[monster.xpos-1,monster.ypos] == 0:
monster.xpos -= speed
change_in_position = abs(monster.history[0] - monster.history[2])
# return
elif monster.xpos < self.player.xpos and self.grid[monster.xpos+1,monster.ypos] == 0:
monster.xpos += speed
change_in_position = abs(monster.history[0] - monster.history[2])
# return
elif monster.ypos > self.player.ypos and self.grid[monster.xpos,monster.ypos-1] == 0:
monster.ypos -= speed
change_in_position = abs(monster.history[1] - monster.history[3])
# return
elif monster.ypos <self.player.ypos and self.grid[monster.xpos,monster.ypos+1] == 0:
monster.ypos += speed
change_in_position = abs(monster.history[1] - monster.history[3])
# return
else:
if monster.ypos > self.player.ypos and self.grid[monster.xpos,monster.ypos-1] == 0:
monster.ypos -= speed
change_in_position = abs(monster.history[1] - monster.history[3])
# return
elif monster.ypos <self.player.ypos and self.grid[monster.xpos,monster.ypos+1] == 0:
monster.ypos += speed
change_in_position = abs(monster.history[1] - monster.history[3])
elif monster.xpos > self.player.xpos and self.grid[monster.xpos-1,monster.ypos] == 0:
monster.xpos -= speed
change_in_position = abs(monster.history[0] - monster.history[2])
# return
elif monster.xpos < self.player.xpos and self.grid[monster.xpos+1,monster.ypos] == 0:
monster.xpos += speed
change_in_position = abs(monster.history[0] - monster.history[2])
# return
# return
if change_in_position == 0:
# self.grid[monster.xpos,monster.ypos] = 1
if monster.xpos < self.player.xpos and self.grid[monster.xpos+1,monster.ypos+1] == 1 and self.grid[monster.xpos,monster.ypos-1] == 0:
self.grid[monster.history[0],monster.history[1]] = 2
monster.ypos -= 1
elif monster.xpos > self.player.xpos and self.grid[monster.xpos-1,monster.ypos+1] == 1 and self.grid[monster.xpos,monster.ypos-1] == 0:
self.grid[monster.history[0],monster.history[1]] = 2
monster.ypos -= 1
elif monster.ypos > self.player.ypos and self.grid[monster.xpos+1,monster.ypos-1] == 1 and self.grid[monster.xpos-1,monster.ypos] == 0:
self.grid[monster.history[0],monster.history[1]] = 2
monster.xpos -= 1
elif monster.ypos > self.player.ypos and self.grid[monster.xpos-1,monster.ypos-1] == 1 and self.grid[monster.xpos+1,monster.ypos] == 0:
self.grid[monster.history[0],monster.history[1]] = 2
monster.xpos += 1
# return
self.coordinates = [(monster.xpos,monster.ypos) for monster in self.monsters]
# def move(self, grid, speed=1):
# self.history = (monster.xpos,self.ypos, self.history[0], self.history[1])
# if monster.xpos > self.player.xpos and self.grid[monster.xpos-1,self.ypos] == 0:
# monster.xpos -= speed
# elif monster.xpos < self.player.xpos and self.grid[monster.xpos+1,self.ypos] == 0:
# monster.xpos += speed
# elif self.ypos > self.player.ypos and self.grid[monster.xpos,self.ypos-1] == 0:
# self.ypos -= speed
# elif self.ypos < self.player.ypos and self.grid[monster.xpos,self.ypos+1] == 0:
# self.ypos += speed
class DungeonModel(object):
def __init__(self, x, y, xpos, ypos, monsternum = 4, won = False, eaten = False):
self.x = x
self.y = y
self.Grid = numpy.ones((x,y))
self.Player = Player(xpos,ypos, False)
self.monsternum = monsternum
self.won = won
self.eaten = eaten
# print self.Grid
# self.Grid[0, :] = self.Grid[-1, :] = 1
# self.Grid[:, 0] = self.Grid[:, -1] = 1
# for i in range(x):
# for j in range(y):
# if i == 0 or i == x-1 or j == 0 or j == y-1:
# self.Grid[i,j] = 1
# else:
# self.Grid[i,j] = choice([0,1])
def clearRectangle(grid, gridx, gridy, left, top, width, height): #grid is an array, gridx is the largest rightward index 1 is walls, 0 is empty space
if left + width > gridx:
rightbound = gridx
else:
rightbound = left + width
if top - height < 0:
upperbound = 1
else:
upperbound = top - height
for i in range(left,rightbound):
for j in range(upperbound,top):
grid[i,j] = 0
def generate_rectangles(player, xRec, yRec, grid, gridx, gridy, maxwidth, maxheight): #xRec and yRec are how many rectangles you want in each direction
for i in range(xRec):
for j in range(yRec):
clearRectangle(grid,gridx,gridy, i * int(gridx/float(xRec))+1, (j+1) * int(gridy/float(yRec)-1), int(gridx/float(xRec))-2, int(gridy/float(yRec))-2)
grid[(2*i-1)*(gridx-1)/float(xRec*2),range(1,gridy-1)] = 0
grid[range(1,gridx-1),(2*j-1)*(gridy)/float(yRec*2)] = 0
def getEmpty(grid, gridx, gridy):
result = []
for i in range(0,gridx):
for j in range(0,gridy):
if grid[i,j] == 0:
result.append((i,j))
return result
generate_rectangles(self.Player, 6, 4, self.Grid, self.x, self.y, 40, 40)
emptylist = getEmpty(self.Grid, self.x, self.y)
coord = randint(0,len(emptylist)-1)
(self.KeyX,self.KeyY) = (emptylist[coord][0],emptylist[coord][1])
emptylist.remove(emptylist[coord])
coord1 = randint(0,len(emptylist)-1)
(self.ChestX,self.ChestY) = (emptylist[coord1][0],emptylist[coord1][1])
self.Grid[(self.ChestX,self.ChestY)] = 1
emptylist.remove(emptylist[coord1])
MonsterLst = []
for i in range(self.monsternum):
coord = randint(0,len(emptylist)-1)
MonsterLst.append(Monster(randint(0,1),emptylist[coord][0],emptylist[coord][1]))
emptylist.remove(emptylist[coord])
self.MonsterPack = MonsterPack(self.Player, self.Grid, MonsterLst)
# print self.KeyX
# print self.KeyY
# print emptylist
# print numpy.where(self.Grid==0)[1][30]
def __str__(self):
return str(self.Grid)
class PyGameKeyboardController(object):
def __init__(self, model):
self.model = model
def handle_event(self, event):
self.model.Player.history = (self.model.Player.xpos,self.model.Player.ypos, self.model.Player.history[0], self.model.Player.history[1])
if event.type != KEYDOWN:
return
# while running:
# keys = pygame.key.get_pressed()
# if keys[pygame.K_LEFT] and self.model.Player.xpos > 1:
# self.model.Player.xpos -= 1
# if keys[pygame.K_RIGHT] and self.model.Player.xpos < self.model.x - 1:
# self.model.Player.xpos += 1
# if keys[pygame.K_UP] and self.model.Player.ypos > 1:
# self.model.Player.ypos -=1
# if keys[pygame.K_DOWN] and self.model.Player.ypos < self.model.y - 1:
# self.model.Player.ypos +=1
if event.key == pygame.K_LEFT and self.model.Grid[self.model.Player.xpos-1,self.model.Player.ypos] != 1:
self.model.Player.xpos -= 1
elif event.key == pygame.K_RIGHT and self.model.Grid[self.model.Player.xpos+1,self.model.Player.ypos] != 1:
self.model.Player.xpos += 1
elif event.key == pygame.K_UP and self.model.Grid[self.model.Player.xpos,self.model.Player.ypos-1] != 1:
self.model.Player.ypos -=1
elif event.key == pygame.K_DOWN and self.model.Grid[self.model.Player.xpos,self.model.Player.ypos+1] != 1:
self.model.Player.ypos +=1
if self.model.Player.xpos == self.model.KeyX and self.model.Player.ypos == self.model.KeyY:
self.model.Player.hasKey = True
if self.model.Player.hasKey == True:
self.model.Grid[self.model.ChestX,self.model.ChestY] = 0
if (self.model.Player.xpos,self.model.Player.ypos)==(self.model.ChestX,self.model.ChestY):
self.model.won = True
self.model.MonsterPack.move(self.model.MonsterPack.grid)
for coord in self.model.MonsterPack.coordinates:
# print coord
if coord == (self.model.Player.xpos,self.model.Player.ypos):
self.model.eaten = True
class DungeonModelView(object):
def __init__(self, model, screen, size):
self.model = model
self.screen = screen
self.size = size
def drawMap(self):
self.screen.fill(pygame.Color('black'))
gridsize = self.size[1]/float(self.model.y)
for x in range(self.model.x):
for y in range(self.model.y):
if self.model.Grid[x,y] == 1:
r = pygame.Rect(x * gridsize, y * gridsize, gridsize, gridsize)
pygame.draw.rect(self.screen, pygame.Color('red'), r)
y = pygame.Rect(self.model.KeyX * gridsize, self.model.KeyY * gridsize, gridsize, gridsize)
pygame.draw.rect(self.screen, pygame.Color('yellow'), y)
b = pygame.Rect(self.model.ChestX * gridsize, self.model.ChestY * gridsize, gridsize, gridsize)
pygame.draw.rect(self.screen, pygame.Color('brown'), b)
pygame.display.update()
def drawPlayer(self):
if self.model.won == False:
p = pygame.Rect(self.model.Player.xpos * self.size[1]/float(self.model.y), self.model.Player.ypos * self.size[1]/float(self.model.y), self.size[1]/float(self.model.y), self.size[1]/float(self.model.y))
pygame.draw.rect(self.screen, pygame.Color('white'), p)
if self.model.Player.xpos != self.model.Player.history[0] or self.model.Player.ypos != self.model.Player.history[1]:
b = pygame.Rect(self.model.Player.history[0] * self.size[1]/float(self.model.y), self.model.Player.history[1] * self.size[1]/float(self.model.y), self.size[1]/float(self.model.y), self.size[1]/float(self.model.y))
pygame.draw.rect(self.screen, pygame.Color('black'), b)
if self.model.Player.xpos != self.model.Player.history[2] or self.model.Player.ypos != self.model.Player.history[3]:
b1 = pygame.Rect(self.model.Player.history[2] * self.size[1]/float(self.model.y), self.model.Player.history[3] * self.size[1]/float(self.model.y), self.size[1]/float(self.model.y), self.size[1]/float(self.model.y))
pygame.draw.rect(self.screen, pygame.Color('black'), b1)
else:
self.screen.fill(pygame.Color('pink'))
pygame.display.update()
def drawMonster(self):
for monster in self.model.MonsterPack.monsters:
# print len(self.model.MonsterPack.monsters)
if self.model.eaten == False:
p = pygame.Rect(monster.xpos * self.size[1]/float(self.model.y), monster.ypos * self.size[1]/float(self.model.y), self.size[1]/float(self.model.y), self.size[1]/float(self.model.y))
pygame.draw.rect(self.screen, pygame.Color('green'), p)
if monster.xpos != monster.history[0] or monster.ypos != monster.history[1]:
b = pygame.Rect(monster.history[0] * self.size[1]/float(self.model.y), monster.history[1] * self.size[1]/float(self.model.y), self.size[1]/float(self.model.y), self.size[1]/float(self.model.y))
pygame.draw.rect(self.screen, pygame.Color('black'), b)
if monster.xpos != monster.history[2] or monster.ypos != monster.history[3]:
b1 = pygame.Rect(monster.history[2] * self.size[1]/float(self.model.y), monster.history[3] * self.size[1]/float(self.model.y), self.size[1]/float(self.model.y), self.size[1]/float(self.model.y))
pygame.draw.rect(self.screen, pygame.Color('black'), b1)
else:
self.screen.fill(pygame.Color('red'))
pygame.display.update()
if __name__ == '__main__':
pygame.init()
screenX = 1080
screenY = 720
size = (screenX, screenY)
screen = pygame.display.set_mode(size)
model = DungeonModel(int(screenX/float(10)),int(screenY/float(10)),int(screenX/float(20)),int(screenY/float(20)))
view = DungeonModelView(model, screen, size)
pygame.key.set_repeat(350,35)
# controller = PyGameMouseController(model)
controller = PyGameKeyboardController(model)
running = True
view.drawMap()
while running:
for event in pygame.event.get():
if event.type == QUIT:
running == False
pygame.quit()
controller.handle_event(event)
view.drawPlayer()
view.drawMonster()
time.sleep(.01)
|
for word in open("dictionary.txt").read().splitlines():
print(word)
|
import tensorflow as tf
from zipfile import ZipFile
import os
import pandas as pd
def download_data(download_dir, filename, url, unzip=True):
# download file from given url
# download dir is an absolute path
file_path = os.path.join(download_dir, filename)
_ = tf.keras.utils.get_file(
file_path,
url,
)
if unzip:
with ZipFile(file_path) as zip:
zip.printdir()
print("Extracting files from: {file_path}...")
zip.extractall(download_dir)
print("File extraction done !")
|
# coding: utf-8
'''
Created on 2017-5-25
@author Alex Wang
'''
import logging
class LogUtil:
def __init__(self, log_path="info.log"):
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=log_path,
filemode='a')
#定义一个StreamHandler,将INFO级别或更高的日志信息打印到标准错误,并将其添加到当前的日志处理对象#
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def debug(self, msg):
logging.debug(msg)
def info(self, msg):
logging.info(msg)
def error(self, msg):
logging.error(msg)
def warning(self, msg):
logging.warning(msg)
# TODO:所有其他模块使用该logger
logger = LogUtil(log_path='info.log') |
import smtplib
from email.mime.text import MIMEText
class EmailService():
def __init__(self, user, password):
self._from = user
self.server = smtplib.SMTP('smtp.gmail.com:587')
self.server.ehlo()
self.server.starttls()
self.server.login(user, password)
def __exit__(self):
self.server.close()
def send(self, to, subject, content):
msg = MIMEText(content)
msg["From"] = self._from
msg["To"] = to
msg["Subject"] = subject
self.server.send_message(msg)
service = EmailService("codeorgok@gmail.com", "Codecool2016!")
service.send("codeorgok@gmail.com", "teszt email", "teszt uzenet")
|
import os
# -----------------------------------------------------------------------------
# Add the directory holding the package to the start of the module search path.
# -----------------------------------------------------------------------------
os.sys.path.insert(0, '/home/ukdp/site-packages')
# --------------------------------------------------------------
# If required, set up a banner message to display on every page.
# --------------------------------------------------------------
#bannerHtml = ''
if 1:
bannerHtml = """
<p align=center><font color=red><b>
WARNING: THIS DPJUDGE SITE IS PRE-RELEASE!
<br>
<a href=http://www.floc.net/dpjudge><u>-- CLICK HERE FOR THE PRODUCTION DPJUDGE --
</u></a></b></font></p>
"""
# --------------------------------------------------------
# Give "tester" an e-mail address to redirect all (?) mail
# traffic to that person, or to an invalid address to
# generate no mail at all.
# -------------------------------------------------------
tester = None # '@'
# -----------------------------------------------------------------
# Set the host directory. That is where this particular host.py is
# -----------------------------------------------------------------
#hostDir = '/home/ukdp/ukdp' #os.path.dirname(__file__)
# --------------------------------------------------------------
# E-mail address, PBEM judge identifier, and URL of this DPjudge
# --------------------------------------------------------------
dpjudgeID = 'UKDP'
dpjudge = 'ukdp@uk.diplom.org'
judgekeeper = 'woelpad@gmail.com' # 'peter@spikings.com'
#judgePassword = ''
# --------------------------------------------------------------
dpjudgeURL = 'http://uk.diplom.org'
#dpjudgeSubDir = 'web' # Location: hostDir
#gameMapSubDir = 'maps' # Location: dpjudgeDir
#toolsSubDir = 'tools' # Location: packageDir
needIndexWWW = False
# --------------------------------------------------------------
# -----------------------
# DPPD used by this judge
# -----------------------
dppd = 'dppd@diplom.org'
dppdURL = ('http://uk.diplom.org/?variant=dppd' +
',http://www.floc.net/dpjudge?variant=dppd')
# -----------------------------------------------------------------------
# Domains that are exempt from suspicious activity checks in public games
# -----------------------------------------------------------------------
publicDomains = ['.proxy.aol.com', '.mx.aol.com', '.spikings.com']
# ------------------------------
# Database connection parameters
# ------------------------------
dbName = 'dplodge'
dbHost = 'localhost'
dbUser = 'dplodge'
dbPassword = '********'
dbPort = 3306
# --------------------------------------------------------
# Location of game directories (and main game status file)
# This should be a directory that is NOT Web-accessible!!!
# --------------------------------------------------------
#gameSubDir = 'games' # Location: hostDir
# --------------------------------------------------------
# Game creation limit (default 20, use 0 to have no limit).
# --------------------------------------------------------
createLimit = 0
# --------------------------
# The openings list address.
# --------------------------
openingsList = 'opening@diplom.org'
# ---------------------------------------------------------
# Notify the judgekeeper of any game created on a trial map
# 0: no notice [default]; 1: trial maps only; 2: all maps
# ---------------------------------------------------------
notify = 1
# --------------------------------------------------------------
# Set timeZone only as a last resort, i.e. if you're on Windows
# on a non-English/non-Latin computer where the local timezone
# comes out as a unicode string that results in a
# UnicodeDecodeError for the ascii codec.
# This will probably also ignore any timezone directive in the
# game settings.
# --------------------------------------------------------------
timeZone = None
zoneFile = '/usr/share/zoneinfo/zone.tab'
# --------------------------------------------------------------
# Image resolution. Determines the resolution used for the
# bitmaps (gifs) of the game maps. Choose wisely. Too small
# creates blocky images, too big increases render time and
# file size and consequently download time. Unit is dpi (dots
# per inch). The industry standard, used in the old days, is 72.
# --------------------------------------------------------------
imageResolution = 108
# --------------------------------------------------------------
# Use pdfmark. Turns forms into pdfmarks, so that they can be
# properly rendered by ps2pdf prior to GhostScript v9.14.
# --------------------------------------------------------------
usePDFMark = True
# --------------------------------------------------------------
# Time synchronizaion. Set ntpService to None if you do not
# want to automatically synchronize all reported times with an
# NTP Service. Otherwise, set to a (host, port) tuple (NTP is
# usually on port 37)
# --------------------------------------------------------------
ntpService = ('time.nist.gov', 37)
ntpService = None
# ----------------------------------------------------------
# E-mail will be sent either directly via an SMTP service, or
# through a pipe to a UNIX "sendmail" utility program. If
# "smtpService" is set to a string having the format
# 'host:port' or 'host' or '' (defaults are
# 'localhost:25'), SMTP is used. If "smtpService" is None,
# the sendmail pipe is used and sendmailDir is the location
# of the sendmail program. You would use the sendmail pipe
# rather than SMTP if, for example, your SMTP service is not
# configured to allow mail relaying to out-of-domain
# addresses.
# ----------------------------------------------------------
smtpService = 'localhost:25'
sendmailDir = '/usr/sbin'
# -------------
# Notifications
# -------------
detectives = []
hall_keeper = 'hall_keeper@ugcs.caltech.edu'
observers = None # 'observer@floc.net'
# ==========================================================
# UNIX MINIMUM PERMISSIONS
# dpjudgeDir must be 755
# packageDir must be 755
# gameDir must be 777
# ***as must each game directory within it***
# ==========================================================
|
from django.http import HttpResponse
from django.template import loader
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from .models import Image, Post, Blog
from .forms import ImageForm
def index(request):
latest_post_list = Post.objects.order_by('-pub_date')[:5]
template = loader.get_template('main/index.html')
print Blog
blog_title = Blog.objects.order_by('title')[:1]
#blog_title = Blog.title
context = {
'latest_post_list': latest_post_list,
'blog_title': blog_title,
}
return HttpResponse(template.render(context, request))
def detail(request, slug):
post = get_object_or_404(Post, slug=slug)
blog_title = Blog.objects.order_by('title')[:1]
return render(request, 'main/detail.html', {'post': post, 'blog_title': blog_title,})
def list(request):
# Handle file upload
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
newdoc = Image(imagefile = request.FILES['docfile'])
newdoc.save()
# Redirect to the document list after POST
return HttpResponseRedirect(reverse('main.views.list'))
else:
form = DocumentForm() # A empty, unbound form
# Load documents for the list page
documents = Document.objects.all()
# Render list page with the documents and the form
return render_to_response(
'main/list.html',
{'documents': documents, 'form': form},
context_instance=RequestContext(request)
) |
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
#!/usr/bin/env python
#
################################################################################################################
# Imports.
################################################################################################################
# Used to parse command-line arguments.
import argparse
# For SVM management.
from pycloud.pycloud.servicevm import svmmanager
from pycloud.pycloud.servicevm import instancemanager
# For config management.
from pycloud.pycloud.utils import config
from pycloud.pycloud import cloudlet
################################################################################################################
# Global constants.
################################################################################################################
# Configuration.
MAIN_CONFIG_FILE = "development.ini"
# Valid commands.
CMD_CREATE_VM = "create"
CMD_RUN_VM = "run"
CMD_LIST_VM = "list"
CMD_TEST_SSH = "test_ssh"
CMD_MODIFY = "modify"
COMMAND_LIST = [CMD_CREATE_VM, CMD_RUN_VM, CMD_LIST_VM, CMD_TEST_SSH, CMD_MODIFY]
################################################################################################################
# Parse the basic commands.
################################################################################################################
def parseCommand():
# Check that we have a valid command.
commandParser = argparse.ArgumentParser(description='Manage Service VMs.')
commandParser.add_argument('command', choices=COMMAND_LIST)
commandArguments = commandParser.parse_known_args()[0]
command = commandArguments.command
return command
################################################################################################################
# Parses the arguments for the ServiceVM creation command.
################################################################################################################
def parseCreateCommandArguments():
# Check that we have the arguments for the overlay command.
parser = argparse.ArgumentParser(description='Create a Service VM.')
parser.add_argument('-sourceImage', required=True, action='store', help='The path to the source disk image to use for the VM.')
parser.add_argument('-type', required=True, action='store', help='The OS: Windows for Windows, Linux for Linux.')
parser.add_argument('-serviceId', required=True, action='store', help='The id of the Service.')
parser.add_argument('-name', required=True, action='store', help='A name for the ServiceVM files.')
parser.add_argument('-port', required=True, action='store', help='The port the server will be listening on inside the Service VM.')
parsedArguments = parser.parse_known_args()[0]
return parsedArguments
################################################################################################################
# Parses the arguments for the ServiceVM running command.
################################################################################################################
def parseRunVmCommandArguments():
# Check that we have the arguments for the overlay command.
parser = argparse.ArgumentParser(description='Execute an instance of an existing Service VM.')
parser.add_argument('-serviceId', required=True, action='store', help='The id of the service in the service VM.')
parsedArguments = parser.parse_known_args()[0]
return parsedArguments
################################################################################################################
# Parses the arguments for the ServiceVM modification command.
################################################################################################################
def parseModifyVmCommandArguments():
# Check that we have the arguments for the overlay command.
parser = argparse.ArgumentParser(description='Modifying an existing Service VM.')
parser.add_argument('-serviceId', required=True, action='store', help='The id of the service in the service VM.')
parsedArguments = parser.parse_known_args()[0]
return parsedArguments
################################################################################################################
# Parses the arguments for the SSH Test command.
################################################################################################################
def parseTestSshCommandArguments():
# Check that we have the arguments for the overlay command.
parser = argparse.ArgumentParser(description='Test SSH on an exiting Server VM.')
parser.add_argument('-serviceId', required=True, action='store', help='The id of the service.')
parser.add_argument('-sfilepath', required=True, action='store', help='The file to send.')
parser.add_argument('-dfilepath', required=True, action='store', help='The file to store remotely.')
parser.add_argument('-command', required=True, action='store', help='The command to execute')
parsedArguments = parser.parse_known_args()[0]
return parsedArguments
################################################################################################################
# Main entry point of the tool.
################################################################################################################
def main():
# Load the config.
# NOTE: here pycloud.util.config is creating a dictionary of configurations in the default
# section of development.ini. This is similar to what is created by Pylons when loading
# a configuraton, tough pylons.config has more information. Since Cloudlet uses only the basic
# dictionary values, the dictionary we load here is equivalent to the pylons.config object.
configuration = config.Configuration.getDefaults(MAIN_CONFIG_FILE)
cloudletConfig = cloudlet.Cloudlet(configuration)
# Create the cache manager.
svmManager = svmmanager.ServiceVMManager(cloudletConfig)
# Get the command.
command = parseCommand()
print 'Command: ' + command
# Choose the action depending on the command.
if(command == CMD_CREATE_VM):
# Parse the commands for overlay creation and create it.
arguments = parseCreateCommandArguments()
svmManager.createServiceVM(arguments.type, arguments.sourceImage, arguments.serviceId, arguments.name, arguments.port)
elif(command == CMD_RUN_VM):
# Parse the commands for vm running and create it.
arguments = parseRunVmCommandArguments()
try:
# Run a VM with a VNC GUI.
instanceMan = instancemanager.ServiceVMInstanceManager(cloudletConfig)
runningInstance = instanceMan.getServiceVMInstance(serviceId=arguments.serviceId,
showVNC=True)
# After we unblocked because the user closed the GUI, we just kill the VM.
instanceMan.stopServiceVMInstance(runningInstance.instanceId)
except instancemanager.ServiceVMInstanceManagerException as e:
print "Error running Service VM: " + e.message
elif(command == CMD_MODIFY):
# Parse the commands for vm modification.
arguments = parseModifyVmCommandArguments()
svmManager.modifyServiceVM(arguments.serviceId)
elif(command == CMD_LIST_VM):
svmManager.listServiceVMs()
elif(command == CMD_TEST_SSH):
# Parse the commands for vm running and create it.
arguments = parseTestSshCommandArguments()
instanceMan = None
runningInstance = None
try:
# Create the manager and access the VM.
instanceMan = instancemanager.ServiceVMInstanceManager(cloudletConfig)
runningInstance = instanceMan.getServiceVMInstance(serviceId=arguments.serviceId,
showVNC=False)
# Send commands.
runningInstance.uploadFile(arguments.sfilepath, arguments.dfilepath)
result = runningInstance.executeCommand(arguments.command)
print 'Result of command: ' + result
# Close connection.
runningInstance.closeSSHConnection()
except instancemanager.ServiceVMInstanceManagerException as e:
print "Error testing ssh connection: " + e.message
finally:
# Cleanup.
if(instanceMan != None and runningInstance != None):
instanceMan.stopServiceVMInstance(runningInstance.instanceId)
|
import list_updated as list_updated
import manual as man
import xmltools
import extract_ExoPlanet as extract_ExoPlanet
import translate_NASA as extract_NASA
import cleanup as cleanUp
import compare as cmpXml
import gitPush as git
import databasecmp as matchSystems
import repo as repoTools
import glob
import os, sys
import shutil
import ntpath, datetime
#from io import StringIO
def commitCommand():
'''(None) -> None
Checks if localRepoPath is a valid path. If it is, syncs the local repository,
copies all the extracted XMLs from Changed_Systems into the local repository,
and pushes changes to the remote. Changes the last update date to current date
'''
localRepoPath = repoTools.getLocalRepo()
if (os.path.isdir(localRepoPath) == False): #file is not right, print message and pass if case
print("Problem in path. Please set the path of local repository using 'repo' command")
else:
#pull from remote to make sure local is up to date
git.pull_repo(localRepoPath)
#print clean up report in a text file
stdout = sys.stdout #keep a handle on the real standard output
if (os.path.isfile('cleanUpReport.txt')):
with open('cleanUpReport.txt', 'w') as f:
f.write("")
sys.stdout = open('cleanUpReport.txt', 'w', encoding='utf-8')
try:
cleanUp.cleanUp()
except:
print("Fail to run clean up script")
#reset stdout
sys.stdout = stdout
#go through changed_systems and copy files to local repo
source_dir = os.path.join(os.getcwd(), "Changed_Systems")
print("Copying files...")
for filename in glob.glob(os.path.join(source_dir, '*.*')):
try:
shutil.copy(filename, localRepoPath)
except:
print ("Couldn't copy " + filename + " to " + localRepoPath +". Please close all files and try again")
#push changes from local to remote
git.push_all(localRepoPath)
#change updated date
today = str(datetime.date.today())
fname = "last_commit_date.txt"
with open(fname, 'w') as f:
f.write(today)
print("Changes successfully made. Last updated date is now "+today)
switch = True
while (switch):
#Prompt user to input (i.e. help)
command = input('>>> ')
#Redirects the main to call other python scripts accordingly
#Refer to help page for details of each command
if (command[0:4] == "help"):
if (len(command) > 4):
entry = command[5:].strip()
man.main(entry)
else:
man.main()
elif (command[0:7] == "extract"):
success = True
try:
if (command[7:].find(" -l") > -1):
list_updated.main()
if (extract_ExoPlanet.get() != -1):
extract_ExoPlanet.parse()
if (extract_NASA.get() != -1):
extract_NASA.parse()
except:
print("Extraction from external sources failed. Try closing all opened CSV files and try again")
success = False
localRepoPath = repoTools.getLocalRepo()
if (localRepoPath == ""): #file is not right, print message and pass if case
print("Problem in path. Please set the path of local repository using 'repo' command")
success = False
extractedXmlsPath = os.path.join(os.getcwd(), 'extracted', 'Extracted_XMLs')
#clear change_systems of old updates
xmltools.ensure_empty_dir(os.path.join(os.getcwd(),"Changed_Systems"))
#if not success, let's save time from going through anything
if (success):
for filename in glob.iglob(os.path.join(extractedXmlsPath,"*.xml")):
#performance improv: check matchedSystems file to see if there is a previous match of this planetary system
pairFile = "matchedSystems.txt" #textfile containing name of matched system in local repo
#get system name from filename, i.e. remove NASA_ or ExoPlanet_
systemName = ntpath.basename(filename).split("_",1)[1]
matchedSystem = None
if (os.path.isfile(pairFile) == False):
#create file if it got deleted
f = open(pairFile, "w")
f.write("")
f.close()
with open(pairFile) as f:
for line in f:
if systemName in line:
#get name of file in local repo stored in matchedSystem file
#lines are in format: [system name], [filename]
matchedSystem = line.split(",",1)[1].strip()
matchingFile = matchSystems.matchXml(os.path.abspath(filename), localRepoPath, matchedSystem)
#if found, add/update this matched pairing to our matchedSystem file
if (matchingFile != None):
#extract filename from matchingFile i.e. ../../[matchingSystem].xml
matchedSystem = ntpath.basename(matchingFile)
repoTools.storeSystemMatch(systemName, matchedSystem, pairFile)
try:
cmpXml.main(matchingFile, filename, os.path.join(os.getcwd(),"Changed_Systems"))
except:
print ("failed to compare " + matchingFile +" and " + filename)
success = False
if (success):
print ("Files extracted. Please review XML files in "+os.path.join(os.getcwd(),"Changed_Systems"))
#if -a tag is entered, we commit automatically
if (command[7:].find(" -a") > -1):
commitCommand()
elif (command[0:4] == "repo"):
if (command[5:8] == "-p "):
l = command.split(" ")
if (len(l) < 3):
print ("Usage Error: Path argument not specified.")
else:
repoPath = command[8:]
repoTools.changeLocalRepo(repoPath)
else:
print("Path of local repository is set as: " + repoTools.getLocalRepo())
elif (command[0:4] == "date"):
args = command.split(" ",2)
if (len(command) > 4 and command [5:7]== "-c"):
#get the rest of command and try to parse as a date
if (len(args) < 3):
print ("Date argument not provided. Please enter 'help [command]' to read 'date' command")
else:
dateArg = args[2].strip()
try:
#try parsing as yyyy-mm-dd
dateString = str(datetime.datetime.strptime(dateArg, "%Y-%m-%d").date())
fname = "last_commit_date.txt"
with open(fname, 'w') as f:
f.write(dateString)
print ("Last commit date has been changed to: " + dateString)
except:
print("Could not parse "+ dateArg + " as date. Please try again with date format YYYY-MM-DD")
else:
fname = "last_commit_date.txt"
with open(fname) as f:
print (f.readlines()[0].strip())
elif (command[0:6] == "commit"):
commitCommand()
elif (command == "exit"):
switch = False
else:
print("'"+command + "' is not an available command. Enter 'help' for list of available commands.")
|
__author__ = "Narwhale"
class ListNode:
def __init__(self,elem):
self.elem = elem
self.next = None
class Solution(object):
def __init__(self,node=None):
self.__head = node
def is_empty(self):
return self.__head == None
def append(self,item):
node = ListNode(item)
if self.is_empty():
self.__head = node
else:
cur = self.__head
while cur.next is not None:
cur = cur.next
cur.next = node
def travel(self):
"""遍历整个链表"""
cur = self.__head
while cur != None:
print(cur.elem,end=" ")
cur = cur.next
def swapPairs(self):
if self.__head == None or self.__head.next == None:
return self.__head
#建立辅助空的头结点很重要
h = ListNode(0)
h.next = self.__head
head = h
# node1 node2
# head------1-------------2-------3--------4-------Null
#建立两个节点分别在要交换的第一个和第二个
while head.next and head.next.next:
node1 = head.next
print(node1.elem) #1
node2 = node1.next
head.next = node2
node1.next = node2.next
node2.next = node1
head = node1
return h.next
# node1 node2
#head------1-------------2-------3--------4-------Null
if __name__ == "__main__":
ll = Solution()
ll.append(1)
ll.append(2)
ll.append(3)
ll.append(4)
ll.append(5)
ll.append(6)
ll.travel()
print('----\n')
print(ll.swapPairs())
ll.travel()
|
# Made in gamingexpx12, not china.
from grovepi import *
from grove_rgb_lcd import *
import time
# Pref
dhtsensor = 7 # DI pin with PWM
# display is ic2 based
pinMode(dhtsensor, "input")
out = ""
prevt = 0
prevhum = 0
# Main
time.sleep(1)
while True:
try:
t, hum = dht(dhtsensor, 0)
out = "Det er {:2.0f} grader inne".format(t)
time.sleep(0.1)
if t == prevt:
print("No changes: {} {}".format(t,prevt))
pass
else:
setText(out)
setRGB(0,128,64)
print(out)
prevt = t
prevhum = hum
time.sleep(2)
except KeyboardInterrupt:
break
setText("")
setRGB(0,0,0)
except IOError:
print "Error"
|
#!/usr/bin/env python
# encoding: utf-8
#LTB:import NEWSCRIPTNAME;reload(NEWSCRIPTNAME);NEWSCRIPTNAME.main()
"""
NEWSCRIPTNAME.py
Created by Tim Reischmann on 2011-10-26.
Copyright (c) 2011 Tim Reischmann. All rights reserved.
usage:
import NEWSCRIPTNAME;reload(NEWSCRIPTNAME);NEWSCRIPTNAME.main()
"""
import pymel.core as pm
from datetime import datetime
def main():
prefRun()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.