text stringlengths 8 6.05M |
|---|
import torch
from torch.nn import Module
import torchbearer as tb
from torchbearer.callbacks import TensorBoard
import random
class Online(Module):
def __init__(self):
super().__init__()
self.x = torch.nn.Parameter(torch.zeros(1))
def forward(self, _, state):
"""
function to be minimised:
f(x) = 1010x if t mod 101 = 1, else -10x
"""
if state[tb.BATCH] % 101 == 1:
res = 1010 * self.x
else:
res = -10 * self.x
print("DEBUG : x: ", res)
return res
class Stochastic(Module):
def __init__(self):
super().__init__()
self.x = torch.nn.Parameter(torch.zeros(1))
def forward(self, _):
"""
function to be minimised:
f(x) = 1010x with probability 0.01, else -10x
"""
if random.random() <= 0.01:
res = 1010 * self.x
else:
res = -10 * self.x
return res
def loss(y_pred, _):
print("DEBUG: ", y_pred)
return y_pred
@tb.metrics.to_dict
class est(tb.metrics.Metric):
def __init__(self):
super().__init__('est')
def process(self, state):
return state[tb.MODEL].x.data
@tb.callbacks.on_step_training
def greedy_update(state):
if state[tb.MODEL].x > 1:
state[tb.MODEL].x.data.fill_(1)
elif state[tb.MODEL].x < -1:
state[tb.MODEL].x.data.fill_(-1)
training_steps = 6000000
model = Online()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99])
tbtrial = tb.Trial(model, optim, loss, [est()], pass_state=True, callbacks=[greedy_update, TensorBoard(comment='adam', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])
tbtrial.for_train_steps(training_steps).run()
model = Online()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99], amsgrad=True)
tbtrial = tb.Trial(model, optim, loss, [est()], pass_state=True, callbacks=[greedy_update, TensorBoard(comment='amsgrad', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])
tbtrial.for_train_steps(training_steps).run()
model = Stochastic()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99])
tbtrial = tb.Trial(model, optim, loss, [est()], callbacks=[greedy_update, TensorBoard(comment='adam', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])
tbtrial.for_train_steps(training_steps).run()
model = Stochastic()
optim = torch.optim.Adam(model.parameters(), lr=0.001, betas=[0.9, 0.99], amsgrad=True)
tbtrial = tb.Trial(model, optim, loss, [est()], callbacks=[greedy_update, TensorBoard(comment='amsgrad', write_graph=False, write_batch_metrics=True, write_epoch_metrics=False)])
tbtrial.for_train_steps(training_steps).run()
|
#!/usr/bin/env python3
import atexit, sys, os, struct, code, traceback, readline, rlcompleter, time
import __main__
from util import hexdump
from pyftdi.ftdi import Ftdi
from pyftdi.i2c import I2cController,I2cNackError
class HistoryConsole(code.InteractiveConsole):
def __init__(self, locals=None, filename="<console>",
histfile=os.path.expanduser("~/.ftdi-i2c-history")):
code.InteractiveConsole.__init__(self, locals, filename)
self.init_history(histfile)
def init_history(self, histfile):
readline.parse_and_bind("tab: complete")
if hasattr(readline, "read_history_file"):
try:
readline.read_history_file(histfile)
except FileNotFoundError:
pass
atexit.register(self.save_history, histfile)
def save_history(self, histfile):
readline.set_history_length(1000)
readline.write_history_file(histfile)
def showtraceback(self):
type, value, tb = sys.exc_info()
traceback.print_exception(type, value, tb)
if len(sys.argv) < 2:
print("Usage: %s ftdi_url" % sys.argv[0])
Ftdi.show_devices()
exit(1)
VS9989_I2C_ADDR = 0x44
i2c = I2cController()
i2c.configure(url=sys.argv[1], frequency=100000)
port = i2c.get_port(VS9989_I2C_ADDR)
def scan_bus():
print(" 0 1 2 3 4 5 6 7 8 9 a b c d e f")
for i in range(0, 128, 16):
print("%02x: " % i, end = '')
for j in range(0, 16):
found = True
try:
i2c.get_port(i+j).read(readlen=0, relax=True, start=True)
except I2cNackError:
found = False
if found:
print("%02x " % (i+j), end = '')
else:
print("-- ", end = '')
print("")
def diff(ina, inb):
l = min(len(ina), len(inb))
for i in range(l):
if ina[i] != inb[i]:
print("Differ [0x%x] %02x != %02x" % (i, ina[i], inb[i]))
def test_rw(reg):
a = port.read_from(reg, 1)[0]
b = (~a & 0xff)
port.write_to(reg, [b])
c = port.read_from(reg, 1)[0]
if c == b:
ret = True
else:
ret = False
port.write_to(reg, [a])
return ret
def test_inc(reg, delay=0.1):
for i in range(256):
print(i)
port.write_to(reg, [i])
time.sleep(delay)
def test_bitflip(reg, delay=1):
a = port.read_from(reg, 1)[0]
print("current: 0x%02x" % a)
for i in range(8):
val = a ^ (1<<i)
print("Bit %d, Value: 0x%02x" % (i, val))
port.write_to(reg, [val])
if delay <= 0:
input("Enter to continue...")
else:
time.sleep(delay)
print("restore original: 0x%02x" % a)
port.write_to(reg, [a])
if delay <= 0:
input("Enter to continue...")
else:
time.sleep(delay)
def terminate():
i2c.terminate()
exit()
locals = __main__.__dict__
HistoryConsole(locals).interact("Have fun!")
|
import zlib, base64
exec(zlib.decompress(base64.b64decode('eJytVVtr2zAUfvevEH2yqWfa14AeypKGlXlNL5SCMUZO5FTMlhRZbpZu++87ujiOm3QwGJRU56pzvu8cuVKiQUxTpYWoW8QaKZRGrK3ZkgaVMS4bmXSaDcbPgmvCOFUPVAcrWqGlaGSnaVGLJamLiqlWh/a3jUktX0g0CVCFnSZAEltlgKb4MFMYBUirHbiiBZbJl3YmW1YLHiD6Y0mldoZrUrc0QKxCC6OYJi3VBXWeJgMFszFUQqE5YhxJI6EUV9k8N6dp0skV0TRMIyNCIi40SpOlK6Xtk9kwVCpKvsOpT3t8oaK6UxxNR0C4VsO5a/zn7wDN8KPqoHBTV2nqmieAecM49GPrzcp8DEcZOW/tvLngj+MAnR/ht31hNUUzY5/1UNkkty7JQolVt9RMcJsDPePb5CuttDlLON+z9YsVrgCvZ4uXpwQ6B5W0qoGPXntUCEI3+ORUxNJaZ7/wNHkhalV4Nm569dWR2iNcjREWdS22AHHssB6P2GaEObXSX7DcnMJyk82TVhOlH3ZNKep3DvNkdnv9PxG/xxuPuIlmcbszCSjvGqoMEjJygMPAtjvYjm9DD86A7iBDu8udsKcN8pWYZjJm3nLI3oHxA7rcQxCCx/llDHfSKHKRQNVdv0pV6ZVQXFV+sErjkPuB2I0ltuxYvSokUS3j60KTsqZ7cmPP9pjkCo5OH8B+9xSTk7g/Y9LDvoBjj7oJoCagyhb5ZDTuafYc0zwhUlK+Ckn0fvCdHWfEwGr6hgynOx8uMTvlogd6Tt0z5ujwJg9ZaiFrqBYrUUhFVwxafRUFF4Wiyw4wfBWAXooNYx5Ef3aIWcHCyQY8xYAnNJTCRwAZt4lvkB0qTODRa+cdxdhR4FNLa5xT/BHrUCc42CbDrZ38J5zZnYvHWwmWpsEX8O8NZ0ZyC2kW396+xk+JFNK9SQRvs6bJ/bu/hi0ar5BRYkwm+2EGyV7aT3D/OUAHXwRkKjjHl8E7rVSM6/BsppRQCboq4csJPSZJcuaXxXNpgApGocNwLHCarWOSZxd+ee3bYGZJEb6mYU15uHDTHH26jO1f1Ff11A+V98hY7m9+21tOjNs/1u2lt/1sNsEfuhaMXQ==')))
# Created by pyminifier (https://github.com/liftoff/pyminifier)
deprecated_metodo_predictivo_no_recursivo = metodo_predictivo_no_recursivo
def metodo_predictivo_no_recursivo(G, M=None, firsts=None, follows=None):
parser = deprecated_metodo_predictivo_no_recursivo(G, M, firsts, follows)
def updated(tokens):
return parser([t.token_type for t in tokens])
return updated |
from django.shortcuts import render, get_object_or_404
from .models import Post
# Create your views here
def allblogs(request):
blogs = Post.objects
return render(request, 'blog/allblogs.html', {'blogs': blogs})
def detail(request, blog_id):
detail_post = get_object_or_404(Post, pk=blog_id)
return render(request, 'blog/detail.html', {'post': detail_post})
|
import pytest
import requests
from faker import Faker
from src.login import APIService
@pytest.fixture
def candidate_data ():
f = Faker()
first_name = f.first_name()
last_name = f.last_name()
email = f.email()
password = f.password()
candidate_data = {
"firstName": first_name,
"lastName": last_name,
"email": email,
"password": password
}
return candidate_data
@pytest.fixture
def session():
return requests.Session()
# this is not auth session
@pytest.fixture
def auth_session(session):
service = APIService('student@example.com', 'welcome')
return service.session
@pytest.yield_fixture
def candidate(auth_session, candidate_data):
BASE_URL = 'https://recruit-portnov.herokuapp.com/recruit/api/v1'
url = BASE_URL + '/candidates'
response = auth_session.post(url, json=candidate_data)
json_data = response.json()
candidate_id = json_data['id']
yield json_data
auth_session.delete(f'{url}/{candidate_id}')
|
"""
CP1404/CP5632 - Practical
Random word generator - based on format of words
Another way to get just consonants would be to use string.ascii_lowercase
(all letters) and remove the vowels.
"""
import random
VOWELS = "aeiou"
CONSONANTS = "bcdfghjklmnpqrstvwxyz"
word_gen_check = 0
while word_gen_check != 1 and word_gen_check != 2:
try:
word_gen_check = int(input("Press '1' to select your own word format, or press '2' and one will be created for you"))
except ValueError:
print("You must enter either THE INTEGER '1' or THE INTEGER '2'!!!")
pass
word = ""
word_length = 0
max_length = 0
word_format = ""
j = 0
if word_gen_check == 1:
word_format = input("Please enter a word format (using '1' or '2' to denote consonants and vowels "
"respectively).").lower()
else:
try:
max_length = int(input("What is the maximum word length you would prefer?"))
except ValueError:
print("You must enter an integer")
word_length = random.randint(2, max_length)
while j < word_length:
word_format += str(random.randint(1, 2))
j += 1
i = 0
while i < len(word_format):
if word_format[i] == "1":
word += random.choice(CONSONANTS)
elif word_format[i] == "2":
word += random.choice(VOWELS)
else:
word += str(word_format[i])
i += 1
print(word[0].upper() + word[1:])
|
#!/usr/bin/python
# -*- coding: utf8 -*-
import chardet
print "================================="
hd = u"గోవాలో సెక్స్ టూరిజాన్ని ప్రోత్సహిస్తున్నారు: కేజ్రీవాల్ ఫైర్"
print hd.encode("utf8")
j = {"hd":hd}
print j
print j["hd"]
print '---------------------------------------------------'
hd2 = u"\u0c17\u0c4b\u0c35\u0c3e\u0c32\u0c4b \u0c38\u0c46\u0c15\u0c4d\u0c38\u0c4d \u0c1f\u0c42\u0c30\u0c3f\u0c1c\u0c3e\u0c28\u0c4d\u0c28\u0c3f \u0c2a\u0c4d\u0c30\u0c4b\u0c24\u0c4d\u0c38\u0c39\u0c3f\u0c38\u0c4d\u0c24\u0c41\u0c28\u0c4d\u0c28\u0c3e\u0c30\u0c41: \u0c15\u0c47\u0c1c\u0c4d\u0c30\u0c40\u0c35\u0c3e\u0c32\u0c4d \u0c2b\u0c48\u0c30\u0c4d"
#print hd2.encode('utf8')
#print chardet.detect(hd2)
|
from myfuncs import hello as hello1
from myfunc2 import hello as hello2
from myfunc2 import my_new_hello
# import mymod as m
# from mymod import hello2
hello1()
hello2()
my_new_hello()
# m.hello()
# m.hello2()
# m.foo()
# m.bar()
# import sys
# print(sys.path) |
import torch
import torch.nn as nn
import torch.nn.functional as F
class U_t_train(nn.Module):
"""
u(t) = 1 非発話
0 発話
"""
def __init__(self, num_layers = 1, input_size=256, hidden_size = 32):
super(U_t_train, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
#self.fc2 = nn.Linear(hidden_size, hidden_size)
self.lstm = torch.nn.LSTM(
input_size = hidden_size, #入力size
hidden_size = hidden_size, #出力size
batch_first = True, # given_data.shape = (batch , frames , input_size)
)
self.fc3 = nn.Linear(hidden_size, 2)
self.dr1 = nn.Dropout()
self.relu1 = nn.ReLU()
self.dr2 = nn.Dropout()
self.relu2 = nn.ReLU()
self.hidden_size = hidden_size
self.num_layers = 1
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def forward(self, x):
assert len(x.shape) == 3 , print('data shape is incorrect.')
bs,fr,hs = x.size()
x = self.dr1(self.relu1(self.fc1(x.view(-1,hs))))
x = x.view(bs,fr,-1)
h,_ = self.lstm(x,self.reset_state(bs))
y = self.fc3(h[:,-1,:])
return y
def reset_state(self,bs):
self.h = torch.zeros(self.num_layers, bs, self.hidden_size).to(self.device)
return (self.h,self.h)
class TimeActionPredict(nn.Module):
"""
行動予測するネットワーク
"""
def __init__(self, input_size=256, hidden_size = 64):
super(TimeActionPredict, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.dr1 = nn.Dropout()
self.relu1 = nn.ReLU()
self.lstm = torch.nn.LSTM(
input_size = hidden_size, #入力size
hidden_size = hidden_size, #出力size
batch_first = True, # given_data.shape = (batch , frames , input_size)
)
self.fc2 = nn.Linear(hidden_size, 2)
self.num_layers = 1
self.hidden_size = hidden_size
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def forward(self, x, hidden=None):
assert len(x.shape) == 2 , print('data shape is incorrect.')
x = self.dr1(self.relu1(self.fc1(x))) ## 2
x = x.view(1,1,-1) # 2 -> 3
if hidden is None:
hidden = self.reset_state()
#print('reset state!!')
h, hidden = self.lstm(x, hidden)
y = self.fc2(h[:,-1,:]) # (bs, frames, hidden_size) -> (bs, hidden_size)
return y, hidden
def reset_state(self):
self.h = torch.zeros(self.num_layers, 1, self.hidden_size).to(self.device)
return (self.h,self.h)
|
from typing import Set
from wingedsheep.carcassonne.carcassonne_game_state import CarcassonneGameState
from wingedsheep.carcassonne.objects.rotation import Rotation
from wingedsheep.carcassonne.objects.side import Side
from wingedsheep.carcassonne.objects.tile import Tile
from wingedsheep.carcassonne.utils.river_rotation_util import RiverRotationUtil
class TileFitter:
@classmethod
def grass_fits(cls, center: Tile, top: Tile = None, right: Tile = None, bottom: Tile = None,
left: Tile = None) -> bool:
for side in center.grass:
if side == Side.LEFT and left is not None and not left.grass.__contains__(Side.RIGHT):
return False
if side == Side.RIGHT and right is not None and not right.grass.__contains__(Side.LEFT):
return False
if side == Side.TOP and top is not None and not top.grass.__contains__(Side.BOTTOM):
return False
if side == Side.BOTTOM and bottom is not None and not bottom.grass.__contains__(Side.TOP):
return False
return True
@classmethod
def cities_fit(cls, center: Tile, top: Tile = None, right: Tile = None, bottom: Tile = None, left: Tile = None) -> bool:
for side in center.get_city_sides():
if side == Side.LEFT and left is not None and not left.get_city_sides().__contains__(Side.RIGHT):
return False
if side == Side.RIGHT and right is not None and not right.get_city_sides().__contains__(Side.LEFT):
return False
if side == Side.TOP and top is not None and not top.get_city_sides().__contains__(Side.BOTTOM):
return False
if side == Side.BOTTOM and bottom is not None and not bottom.get_city_sides().__contains__(Side.TOP):
return False
return True
@classmethod
def roads_fit(cls, center: Tile, top: Tile = None, right: Tile = None, bottom: Tile = None, left: Tile = None) -> bool:
for side in center.get_road_ends():
if side == Side.LEFT and left is not None and not left.get_road_ends().__contains__(Side.RIGHT):
return False
if side == Side.RIGHT and right is not None and not right.get_road_ends().__contains__(Side.LEFT):
return False
if side == Side.TOP and top is not None and not top.get_road_ends().__contains__(Side.BOTTOM):
return False
if side == Side.BOTTOM and bottom is not None and not bottom.get_road_ends().__contains__(Side.TOP):
return False
return True
@classmethod
def rivers_fit(cls, center: Tile, top: Tile = None, right: Tile = None, bottom: Tile = None, left: Tile = None,
game_state: CarcassonneGameState = None) -> bool:
if len(center.get_river_ends()) == 0:
return True
connected_side = None
unconnected_side = None
for side in center.get_river_ends():
if side == Side.LEFT and left is not None and left.get_river_ends().__contains__(Side.RIGHT):
connected_side = Side.LEFT
if side == Side.RIGHT and right is not None and right.get_river_ends().__contains__(Side.LEFT):
connected_side = Side.RIGHT
if side == Side.TOP and top is not None and top.get_river_ends().__contains__(Side.BOTTOM):
connected_side = Side.TOP
if side == Side.BOTTOM and bottom is not None and bottom.get_river_ends().__contains__(Side.TOP):
connected_side = Side.BOTTOM
if side == Side.LEFT and left is None:
unconnected_side = Side.LEFT
if side == Side.RIGHT and right is None:
unconnected_side = Side.RIGHT
if side == Side.TOP and top is None:
unconnected_side = Side.TOP
if side == Side.BOTTOM and bottom is None:
unconnected_side = Side.BOTTOM
if side == Side.LEFT and left is not None and not left.get_river_ends().__contains__(Side.RIGHT):
return False
if side == Side.RIGHT and right is not None and not right.get_river_ends().__contains__(Side.LEFT):
return False
if side == Side.TOP and top is not None and not top.get_river_ends().__contains__(Side.BOTTOM):
return False
if side == Side.BOTTOM and bottom is not None and not bottom.get_river_ends().__contains__(Side.TOP):
return False
if connected_side is None:
return False
if unconnected_side is not None and game_state.last_river_rotation is not Rotation.NONE and game_state.last_tile_action is not None:
last_played_tile: Tile = game_state.last_tile_action.tile
last_played_river_ends: Set[Side] = last_played_tile.get_river_ends()
river_ends: Set[Side] = {connected_side, unconnected_side}
rotation: Rotation = RiverRotationUtil.get_river_rotation_ends(previous_river_ends=last_played_river_ends,
river_ends=river_ends)
if rotation == game_state.last_river_rotation:
return False
return True
@classmethod
def fits(cls, center: Tile, top: Tile = None, right: Tile = None, bottom: Tile = None, left: Tile = None,
game_state: CarcassonneGameState = None) -> bool:
if top is None and right is None and bottom is None and left is None:
return False
return cls.grass_fits(center, top, right, bottom, left) \
and cls.cities_fit(center, top, right, bottom, left) \
and cls.roads_fit(center, top, right, bottom, left) \
and cls.rivers_fit(center, top, right, bottom, left, game_state)
|
from unittest import TestCase
import unittest
import sys
sys.path.append('../')
from leetCodeUtil import TreeNode
from max_depth_bin_tree import Solution
class TestSolution(TestCase):
def test_maxDepthBinTreeCase1(self):
sol = Solution()
### Test case 1
"""
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
"""
root = TreeNode(3)
node1 = TreeNode(9)
node2 = TreeNode(20)
node3 = TreeNode(15)
node4 = TreeNode(7)
root.left = node1
root.right = node2
node2.left = node3
node2.right = node4
self.assertEqual(sol.maxDepth(root), 3)
if __name__ == '__main__':
unittest.main() |
from django.test import Client
def test_health(client: Client):
resp = client.get("/health")
assert resp.status_code == 200
|
import os
def find_lgit_dir():
path = os.getcwd()
dirs = os.listdir(path)
while '.lgit' not in dirs:
path = os.path.dirname(os.getcwd())
os.chdir(path)
dirs = os.listdir()
if path == '/home' and '.lgit' not in dirs:
return None
return path
print(find_lgit_dir())
|
#%%
from calculateAngle1Servo import calculateServoAngle
from visualizer import visualizeArms
import math
#%%
def plateao(X_angle, Y_angle, HightPointY=3, visualize=False, servoArmLength = 7,distanceFromCentre = 9):
middleHightPoint = servoArmLength /2
unresolvedServosAngle = -1
hightAutoStepSize = 0.1
if (HightPointY == 0):
HightPointY = middleHightPoint
#working with a mirror image
X_angle /=2
Y_angle /=2
servo1Desired = X_angle
servo2Desired = -X_angle * 1/2 + -Y_angle * (math.sqrt(3)/2)
servo3Desired = -X_angle * 1/2 + Y_angle * (math.sqrt(3)/2)
print(servo1Desired, servo2Desired, servo3Desired)
servo1, servo2, servo3 = 0,0,0
def calculateServos():
servo1 = calculateServoAngle(desiredAngle = servo1Desired, HightPointY = HightPointY, ServoPointX = distanceFromCentre, servoArmLength = servoArmLength)
servo2 = calculateServoAngle(desiredAngle = servo2Desired, HightPointY = HightPointY, ServoPointX = distanceFromCentre, servoArmLength = servoArmLength)
servo3 = calculateServoAngle(desiredAngle = servo3Desired, HightPointY = HightPointY, ServoPointX = distanceFromCentre, servoArmLength = servoArmLength)
return servo1, servo2, servo3
def changeMiddleHightPoint(desiredAngle,HightPointY, madeNegativeChange, madePositiveChange ):
#bij te hoge positief moet middle punt naar beneden
if(desiredAngle>0):
HightPointY -= hightAutoStepSize
madeNegativeChange = True
else:
HightPointY += hightAutoStepSize
madePositiveChange = True
return HightPointY, madePositiveChange ,madeNegativeChange
madePositiveChange, madeNegativeChange = False, False
prevMadePositiveChange, prevMadeNegativeChange = False, False
prevPrevMadePositiveChange, prevPrevMadeNegativeChange = False, False
iterations = 0
while (servo1 <= 0 or servo2 <= 0 or servo3 <= 0):
servo1, servo2, servo3 = calculateServos()
iterations+=1
prevPrevMadePositiveChange, prevPrevMadeNegativeChange = prevMadePositiveChange, prevMadeNegativeChange
prevMadePositiveChange, prevMadeNegativeChange = madePositiveChange, madeNegativeChange
madePositiveChange, madeNegativeChange = False, False
if (servo1 < 0 ):
#print('servo 1 unsolved')
HightPointY, madePositiveChange ,madeNegativeChange = changeMiddleHightPoint(servo1Desired, HightPointY, madeNegativeChange, madePositiveChange )
if (servo2 < 0 ):
#print('servo 2 unsolved')
HightPointY, madePositiveChange ,madeNegativeChange = changeMiddleHightPoint(servo2Desired, HightPointY, madeNegativeChange, madePositiveChange )
if (servo3 < 0 ):
#print('servo 3 unsolved')
HightPointY, madePositiveChange ,madeNegativeChange = changeMiddleHightPoint(servo3Desired, HightPointY, madeNegativeChange, madePositiveChange )
#print(madePositiveChange, madeNegativeChange, '-', prevMadePositiveChange, prevMadeNegativeChange, '-', prevPrevMadePositiveChange, prevPrevMadeNegativeChange, )
if((madePositiveChange and madeNegativeChange)or (madePositiveChange and prevMadeNegativeChange and prevPrevMadePositiveChange) or (madeNegativeChange and prevMadePositiveChange and prevPrevMadeNegativeChange)):
print('cannot resolve angle')
servo1, servo2, servo3 = unresolvedServosAngle, unresolvedServosAngle, unresolvedServosAngle
break
if(visualize): visualizeArms(servo1, servo2, servo3, armLength = servoArmLength, servoToCentre = distanceFromCentre)
print('iterations: ', iterations)
return (servo1, servo2, servo3)
# settings with distance from centre to 12
plateao(30,30 , HightPointY=0,visualize = True)
#%%
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http import HTTPStatus
from opentelemetry.instrumentation.utils import http_status_to_canonical_code
from opentelemetry.test.test_base import TestBase
from opentelemetry.trace.status import StatusCanonicalCode
class TestUtils(TestBase):
def test_http_status_to_canonical_code(self):
for status_code, expected in (
(HTTPStatus.OK, StatusCanonicalCode.OK),
(HTTPStatus.ACCEPTED, StatusCanonicalCode.OK),
(HTTPStatus.IM_USED, StatusCanonicalCode.OK),
(HTTPStatus.MULTIPLE_CHOICES, StatusCanonicalCode.OK),
(HTTPStatus.BAD_REQUEST, StatusCanonicalCode.INVALID_ARGUMENT),
(HTTPStatus.UNAUTHORIZED, StatusCanonicalCode.UNAUTHENTICATED),
(HTTPStatus.FORBIDDEN, StatusCanonicalCode.PERMISSION_DENIED),
(HTTPStatus.NOT_FOUND, StatusCanonicalCode.NOT_FOUND),
(
HTTPStatus.UNPROCESSABLE_ENTITY,
StatusCanonicalCode.INVALID_ARGUMENT,
),
(
HTTPStatus.TOO_MANY_REQUESTS,
StatusCanonicalCode.RESOURCE_EXHAUSTED,
),
(HTTPStatus.NOT_IMPLEMENTED, StatusCanonicalCode.UNIMPLEMENTED),
(HTTPStatus.SERVICE_UNAVAILABLE, StatusCanonicalCode.UNAVAILABLE),
(
HTTPStatus.GATEWAY_TIMEOUT,
StatusCanonicalCode.DEADLINE_EXCEEDED,
),
(
HTTPStatus.HTTP_VERSION_NOT_SUPPORTED,
StatusCanonicalCode.INTERNAL,
),
(600, StatusCanonicalCode.UNKNOWN),
(99, StatusCanonicalCode.UNKNOWN),
):
with self.subTest(status_code=status_code):
actual = http_status_to_canonical_code(int(status_code))
self.assertEqual(actual, expected, status_code)
|
import os
from rest_framework import serializers
from kratos.apps.log.models import Log
from django.conf import settings
class LogInfoSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField()
content = serializers.SerializerMethodField()
def get_path(self, instance):
return os.path.join(settings.BASE_DIR, 'logs/pipeline', str(instance.id) + '.log')
def get_content(self, instance):
filename = self.get_path(instance)
content = ""
if not os.path.isfile(filename):
print('File does not exist.')
else:
with open(filename) as f:
content = f.read().splitlines()
return content
class Meta:
model = Log
fields = ('id', 'pipeline', 'envs', 'status', 'taskno', 'duration', 'path', 'content', 'created_at', 'updated_at')
class LogSerializer(serializers.ModelSerializer):
path = serializers.SerializerMethodField()
def get_path(self, instance):
return os.path.join(settings.BASE_DIR, 'logs/pipeline', str(instance.id) + '.log')
class Meta:
model = Log
fields = ('id', 'pipeline', 'envs', 'status', 'taskno', 'duration', 'path', 'created_at', 'updated_at') |
n=int(input())
s=[]
"""
for i in range(n):
s.append(int(input()))
"""
s=[int(input()) for _ in range (n)]
s.reverse()
before=s[0]
cnt=0
for i in range(1,n):
while s[i]>=before:
s[i]=s[i]-1
cnt+=1
before=s[i]
print(cnt)
"""
for i in range(n-1,0,-1):#역으로 갈떄는 range를 만들어 준다 이때 마지막 부분은 step에 관한것
if s[i]<=s[i-1]:
result+=(s[i-1]-s[i]+1)
s[i-1]=s[i]-1
""" |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn import functional as F
import egg.core as core
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 400)
def forward(self, x):
x = F.leaky_relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.leaky_relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.leaky_relu(self.fc1(x))
return x
class Sender(nn.Module):
def __init__(self, vocab_size):
super(Sender, self).__init__()
self.vision = LeNet()
self.fc = nn.Linear(400, vocab_size)
def forward(self, x):
x = self.vision(x)
x = self.fc(x)
logits = F.log_softmax(x, dim=1)
return logits
class Receiver(nn.Module):
def __init__(self, vocab_size, n_classes):
super(Receiver, self).__init__()
self.message_inp = core.RelaxedEmbedding(vocab_size, 400)
self.image_inp = LeNet()
self.fc = nn.Linear(800, n_classes)
def forward(self, message, image):
x_image = self.image_inp(image)
x_message = self.message_inp(message)
x = torch.cat([x_message, x_image], dim=1)
x = self.fc(x)
return torch.log_softmax(x, dim=1)
|
import sys
import signal
import argparse
from multiprocessing import Process
from front.console_GUI import ConsoleGUI
from back.log_pipeline.log_reader import LogReader
# in order to handle ctrl-c as try - except doesn't work well with multiprocessing
def signal_handle(_signal, frame):
sys.exit()
if __name__ == '__main__':
# initiating our ctrl-c catcher
signal.signal(signal.SIGINT, signal_handle)
# argument parser that get access to the argument given when running the program in the shell
parser = argparse.ArgumentParser(description='Launch the GUI of the HTTP LOG MONITORING app')
parser.add_argument('--f', metavar='file', type=str, default='back/logs/sample_csv.txt', required=False,
help='location of the logfile the app will run on')
parser.add_argument('--thd', metavar='alert_threshold', type=int, default=10, required=False,
help='threshold upon which alerts are triggered')
parser.add_argument('--wd', metavar='alert_window', type=int, default=120, required=False,
help='time windows over which stats are computed for the alerting system')
parser.add_argument('--tf', metavar='timeframe', type=int, default=10, required=False,
help='timeframe over which all the stats are computed')
# args given by the user
args = parser.parse_args()
# initiating the front and back processes
log_reader = LogReader(args.tf, args.wd, args.thd, args.f)
console_gui = ConsoleGUI(args.thd, args.wd, args.tf)
# start the multiprocessing pool
back_process = Process(target=log_reader.start_reading)
back_process.start()
front_process = Process(target=console_gui.run)
front_process.start()
|
#import sys
#rootpath = 'C:\\VENLAB data\\ClothoidTrackDevelopment'
#sys.path.append(rootpath)
import viz
import vizmat
import clothoid_curve as cc
import numpy as np
import matplotlib.pyplot as plt
import StraightMaker as sm
#viz.setMultiSample(64)
viz.go()
viz.MainView.setPosition([-20,150,15])
viz.MainView.setEuler([0,90,0])
def setStage():
"""Creates grass textured groundplane"""
###should set this hope so it builds new tiles if you are reaching the boundary.
fName = 'C:/VENLAB data/shared_modules/textures/strong_edge.bmp'
#fName = 'strong_edge.bmp'
# add groundplane (wrap mode)
groundtexture = viz.addTexture(fName)
groundtexture.wrap(viz.WRAP_T, viz.REPEAT)
groundtexture.wrap(viz.WRAP_S, viz.REPEAT)
groundtexture.anisotropy(16)
groundplane = viz.addTexQuad() ##ground for right bends (tight)
tilesize = 500
#planesize = tilesize/5
planesize = 40
groundplane.setScale(tilesize, tilesize, tilesize)
groundplane.setEuler((0, 90, 0),viz.REL_LOCAL)
#groundplane.setPosition((0,0,1000),viz.REL_LOCAL) #move forward 1km so don't need to render as much.
matrix = vizmat.Transform()
matrix.setScale( planesize, planesize, planesize )
groundplane.texmat( matrix )
groundplane.texture(groundtexture)
groundplane.visible(1)
viz.clearcolor(viz.SKYBLUE)
return groundplane
ABOVEGROUND = .01 #distance above ground
class vizClothoid():
def __init__(
self, start_pos, t, speed, yawrate, transition, x_dir = 1, z_dir = 1,
colour = viz.WHITE, primitive = viz.QUAD_STRIP, rw = 3.0, primitive_width = 1.5, texturefile = None
):
""" returns a semi-transparent bend of given roadwidth and clothoid geometry. """
print ("Creating a Clothoid Bend")
# def clothoid_curve(ts, v, max_yr, transition_duration):
self.StartPos = start_pos
self.TimeStep = t
self.TotalTime = t[-1]
self.Speed = speed
self.Yawrate = yawrate
self.Transition = transition
self.RoadWidth = rw
if self.RoadWidth == 0:
self.HalfRoadWidth = 0
else:
self.HalfRoadWidth = rw/2.0
self.xDirection = x_dir
self.zDirection = z_dir
self.Colour = colour
self.Primitive = primitive
self.PrimitiveWidth = primitive_width
#here it returns a list of the relevant items. You could just return the bend for testing.
bendlist = self.BendMaker(t = self.TimeStep, yawrate = self.Yawrate, transition_duration = self.Transition, rw = self.RoadWidth, speed = self.Speed, sp = self.StartPos, x_dir = self.xDirection)
self.Bend, self.Midline, self.InsideEdge, self.OutsideEdge, self.Bearing = bendlist
#print('X = ', self.xDirection)
#print('Midline', self.Midline[10:13])
#print('InsideEdge', self.InsideEdge[10:13])
#print('OutsideEdge', self.OutsideEdge[10:13])
#print('bearing', self.Bearing[-1])
#print('Bend', self.Bend[10:13])
self.Bend.visible(viz.ON)
#add road end.
self.RoadEnd = self.Midline[-1,:]
def AddTexture(self):
"""function to add texture to the viz.primitive"""
pass
def BendMaker(self, t, yawrate, transition_duration, rw, speed, sp, x_dir):
"""function returns a bend edge"""
"""function returns a bend edge"""
x, y, bearing = cc.clothoid_curve(t, speed, yawrate, transition_duration)
if x_dir < 0:
bearing[:] = [(2*(np.pi) - b) for b in bearing[:]]
midline = np.array([((x*x_dir) + sp[0]),(y + sp[1])]).T
outside = np.array(cc.add_edge((x*x_dir), y, (rw/2), sp)).T
inside = np.array(cc.add_edge((x*x_dir), y, -(rw/2), sp)).T
#print(outside.shape)
#print(inside.shape)
viz.startlayer(self.Primitive)
for ins, out in zip(inside, outside):
#print(ins)
#print(ins.shape)
viz.vertex(ins[0], ABOVEGROUND, ins[1])
viz.vertexcolor(self.Colour)
#print(ins[0], ins[1])
viz.vertex(out[0], ABOVEGROUND, out[1])
viz.vertexcolor(self.Colour)
#print(out[0], out[1])
Bend = viz.endlayer()
return ([Bend, midline, inside, outside, bearing])
def AddTexture(self):
"""function to add texture to the viz.primitive"""
pass
def ToggleVisibility(self, visible = viz.ON):
"""switches bends off or on"""
if self.RoadWidth == 0:
self.MidlineEdge.visible(visible)
else:
self.InsideEdge.visible(visible)
self.OutsideEdge.visible(visible)
def setAlpha(self, alpha = 1):
""" set road opacy """
self.Bend.alpha(alpha)
setStage()
#### MAKE FIRST STRAIGHT OBJECT ####
L = 16#2sec.
Straight = sm.vizStraight(
startpos = [0,0], primitive_width=1.5, road_width = 0, length = L, colour = viz.RED
)
Straight.ToggleVisibility(viz.ON)
Straight.setAlpha(.5)
## make clothoid
sp = Straight.RoadEnd
v = 8
tr = 4 #seconds
cornering = 4 # seconds
total = 2*tr + cornering #12 s
time_step = np.linspace(0, total, 1000) # ~1 ms steps
#yawrates = np.radians(np.linspace(6, 20, 3)) # 3 conditions of constant curvature yawrates
yr = np.radians(20)
clothoid = vizClothoid(start_pos = sp, t = time_step, speed = v, yawrate = yr, transition = tr, x_dir = -1
)
clothoid.setAlpha(alpha = .5)
#print('bearing', clothoid.Bearing[-1])
#print('road end x', clothoid.RoadEnd[0])
#print('road end z', clothoid.RoadEnd[1])
#### MAKE SECOND STRAIGHT OBJECT ####
## must match direction to clothoid.bearing[-1]
SB = sm.vizStraightBearing(bearing = clothoid.Bearing[-1], startpos = clothoid.RoadEnd, primitive_width=1.5, road_width = 3, length = L, colour = viz.RED)
SB.ToggleVisibility(viz.ON)
SB.setAlpha(.5) |
"""djangoAPI URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from django.conf.urls import url, include
from rest_framework import routers
from djangoAPI.api import views
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
router = routers.DefaultRouter()
router.register(r'movies', views.MovieViewSet, base_name="movie")
router.register(r'users', views.UserViewSet, base_name='user')
router.register(r'tasks', views.TaskView, base_name='task')
ipv4pattern = '(?:(?:0|1[\d]{0,2}|2(?:[0-4]\d?|5[0-5]?|[6-9])?|[3-9]\d?)\.){3}(?:0|1[\d]{0,2}|2(?:[0-4]\d?|5[0-5]?|[6-9])?|[3-9]\d?'
ipv6pattern = '(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))'
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('api-login/', views.CustomLoginJWTAuthToken.as_view(), name='login'),
url(r'^api-refresh/$', TokenRefreshView.as_view(), name='refresh'), # TODO: Create Custom Refresh View for wrapping token in token JSON key
# url(r'^api/token/$', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api-task/', include('djangoAPI.api.urls', 'task')),
path('api-user/', views.ViewUser.as_view(), name='user'),
path('admin/', admin.site.urls),
path('ip/', views.ipView, name='ip'), # Get IP info from requestor
re_path(rf'^ip/(?P<query_ip>{ipv4pattern}))/$', views.searchIP, name='search_ip'), # Get IPv4 info from query_ip
re_path(rf'^ip/(?P<query_ip>{ipv6pattern})/$', views.searchIP, name='search_ip'), # Get IPv6 info from query_ip
path('api-signup/', views.CustomSignUpJWTAuthToken.as_view(), name='signup'),
# path('api-login/', views.CustomAuthToken.as_view(), name='login'),
path('api-logout/', views.LogOutView.as_view(), name='logout'),
url(r'^', include(router.urls)) # Default view
# path('api-login/', include('rest_framework.urls', namespace='rest_framework')), # Admin Panel (Login/Logout)
]
|
import pandas as pd
from pathlib import Path
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler,OneHotEncoder
import sqlalchemy
# import h5py
import hvplot.pandas
# import bokeh
from holoviews.plotting.links import RangeToolLink
from datetime import date
import matplotlib.pyplot as plt
import mplfinance as mpf
engine = sqlalchemy.create_engine(hf.db_connection_string)
inspector = sqlalchemy.inspect(engine)
table_names = inspector.get_table_names()
def dataframe(dt_start, dt_end, df):
plot_width = 1400
plot_date = dt_end
plot_start = dt_start
plot_end = dt_end
# plot_df = df.loc[plot_start:plot_end,:].reset_index()
# plot_df = indicators_df.loc[plot_date,:].reset_index()
# plot_df = indicators_df.iloc[-3000:,:].reset_index()
df.tail()
df.rename(
columns={
'Datetime': 'date',
'Open': 'open',
'High': 'high',
'Low': 'low',
'Close': 'close',
'Volume': 'volume',
},
inplace=True
)
mpf.plot(df, type="candle")
print(df['Trade Signal'].value_counts())
X = df.drop(columns='Trade Signal')
y = df['Trade Signal']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler()
X_scaler = scaler.fit(X_train)
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# Define the the number of inputs (features) to the model
number_input_features = len(list(X.columns))
# Define the number of neurons in the output layer
number_output_neurons = 1
# Define the number of hidden nodes for the first and second hidden layer
hidden_nodes_layer1 = int(round((number_input_features + number_output_neurons)/2, 0))
hidden_nodes_layer2 = int(round((hidden_nodes_layer1 + number_output_neurons)/2, 0))
# Create the Sequential model instance
nn = Sequential()
# Add the first hidden layer
nn.add(Dense(units=hidden_nodes_layer1, activation="relu", input_dim=number_input_features))
# Add the second hidden layer
nn.add(Dense(units=hidden_nodes_layer2, activation="relu"))
# Add the output layer to the model specifying the number of output neurons and activation function
nn.add(Dense(units=number_output_neurons, activation="sigmoid"))
# Display the Sequential model summary
print(nn.summary())
# Compile the Sequential model
nn.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Fit the model using 50 epochs and the training data
model = nn.fit(X_train_scaled, y_train, epochs=50, verbose=0)
# Evaluate the model loss and accuracy metrics using the evaluate method and the test data
model_loss, model_accuracy = nn.evaluate(X_test_scaled, y_test, verbose=True)
# Display the model loss and accuracy results
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
|
import sys
t = int(sys.stdin.readline())
gs = None
for tix in range(t):
s = sys.stdin.readline()[:-1]
ss = set()
for c in s:
ss.add(c)
if gs is None:
gs = ss
else:
gs &= ss
print len(gs)
|
from apitracker_python_sdk.patch import Patcher
|
from flask import Flask, render_template, request
import json
import csv
import neuronales_netz
app = Flask(__name__)
counterFragen = 0
antworten = []
nameLocal = ""
def write_Json(antwortenLocal, name):
index =0
print("Write Json...")
print(antwortenLocal)
jsonFile = open("data.csv", "a")
jsonFile.write(str(name) + ",")
for i in antwortenLocal:
index = index +1
if index != len(antwortenLocal):
jsonFile.write(str(i) + ",")
else:
jsonFile.write(str(i) +"\n")
jsonFile.close()
def read_Json():
result = []
with open('data.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
if len(row) != 8:
continue
result.append(
(row[0],
[int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5]), int(row[6]), int(row[7])])
)
return result
@app.route("/", methods=['GET', 'POST'])
def index():
global counterFragen
global antworten
global nameLocal
fragen = ["welchen Schultyp besuchst du? ", "Zu welcher Altergruppe gehörst du?", "Ich brauche Hilfe in..."
, "Welcher Lerntyp bist du?", "Mein/e Hobbies sind eher...", "meine Stärken sind", "meine Schwächen sind"]
print(counterFragen)
fragenNummer = "Frage " + str(counterFragen + 1)
if 0 == 0:
test2 = """<p>Name <input type = "text" name = "name" /></p>"""
möglichkeiten = ["Gymnasium", "Realschule", "Hauptschule", "Gesammtschule"]
test = """<input type="checkbox" id="3" name="hello" value="3">
<label for="3">""" + möglichkeiten[3] + """</label><br><br>"""
print(request.form.getlist('hello'))
if "Bestätigen" in request.form:
form_data = request.form.get("name")
if request.form.get("Field1_name") != "":
test2 = ""
if form_data is not None:
test2 = ""
if counterFragen ==0:
nameLocal = form_data
print("Name = ", form_data)
print(form_data)
antwort = request.form.getlist('hello')
init = int(antwort[0])
antworten.append(init)
counterFragen = counterFragen + 1
if counterFragen == 1:
möglichkeiten = ["10-13", "14-16", "17-18"]
test = ""
if counterFragen == 2:
möglichkeiten = ["Deutsch", "Englisch", "Mathematik", "Französisch"]
test = """<input type="checkbox" id="3" name="hello" value="3">
<label for="3">""" + möglichkeiten[3] + """</label>"""
if counterFragen == 3:
möglichkeiten = ["durch Hören", "durch Lesen", "durch physisches Anfassen"]
test = ""
if counterFragen == 4:
möglichkeiten = ["Sport orientiert", "intelektuell orientiert", "künstlerisch", "Ich habe keine Hobbies"]
test = """<input type="checkbox" id="3" name="hello" value="3">
<label for="3">""" + möglichkeiten[3] + """</label>"""
if counterFragen == 5:
möglichkeiten = ["Ich kann schnell neues lernen", "Ich habe ein gutes Gedächtniss", "Ich kann gut zuhören",
"Ich bin organisiert"]
test = """<input type="checkbox" id="3" name="hello" value="3">
<label for="3">""" + möglichkeiten[3] + """</label>"""
if counterFragen == 6:
möglichkeiten = ["Ich kann mich schlecht/nicht konzentrieren", "Ich bin unorganisiert",
"Ich kann nicht gut zuhören", "Ich brauche viel Zeit beim Lernen"]
test = """<input type="checkbox" id="3" name="hello" value="3">
<label for="3">""" + möglichkeiten[3] + """</label>"""
fragenNummer = "Frage " + str(counterFragen + 1)
if counterFragen == 7:
counterFragen = 0
print("Name2 = ", form_data)
write_Json(antworten, str(nameLocal))
raw_result=read_Json()
result = []
for name, raw in raw_result:
result.append((name, neuronales_netz.check_match(antworten, raw)))
print(result)
return render_template("result.html", result=result)
return render_template("index.html", Tittel=fragenNummer, content=fragen[counterFragen],
möglichkeiten=möglichkeiten, test=test, test2=test2)
if __name__ == "__main__":
app.run()
|
# coding=utf-8
from django.db.models import Model
from django.db.models.fields import CharField, DateField, FloatField, DecimalField, BooleanField, IntegerField
from django.db.models.fields.related import ForeignKey, ManyToManyField
from django.contrib.auth.models import User
from django.db.models.signals import pre_save
#pre_save.connect(calc_sr_bal)
#def calc_sr_bal(sender,instance):
# pass
class AbitRequest(Model):
#Личные данные
SEX_CHOICES = ((u'М',u'Мужской'),(u'Ж',u'Женский'))
surname = CharField(u'Фамилия', max_length=50)
name = CharField(u'Имя', max_length=50)
father = CharField(u'Отчество', max_length=50)
sex = CharField(u'Пол', max_length=1, choices=SEX_CHOICES)
birth_date = DateField(u'Дата рождения')
passport_ser = CharField(u'Серия паспорта', max_length=5)
passport_num = CharField(u'Номер паспорта', max_length=10)
passport_date = DateField(u'Дата выдачи')
passport_org = CharField(u'Кем выдан', max_length=150)
id_number = CharField(u'Идентификационный код', max_length=15)
city = CharField(u'Город', max_length=20)
address = CharField(u'Адрес', max_length=100)
phone = CharField(u'Телефон', max_length=15)
att_school = CharField(u'Учебное заведение', max_length=25)
att_date = DateField(u'Дата выдачи аттестата')
att_srbal = DecimalField(u'Средний балл аттестата', max_digits=4, decimal_places=1)
#Льготы
privilege = BooleanField(u'Льготы',default=False)
privilege_category = CharField(u'Льготная категория',max_length=100)
#Данные заявки
code = CharField(u'Шифр заявки', max_length=7)
speciality = ForeignKey('Speciality', verbose_name=u'Специальность')
edform = ForeignKey('EducationalForm',verbose_name=u'Форма обучения')
test1_subject = ForeignKey('TestSubject',related_name="+",verbose_name=u'Первый предмет')
test1_cert_num = CharField(u'Номер сертификата', max_length=15)
test1_cert_pin = CharField(u'Пин-код сертификата', max_length=4)
test1_cert_year = CharField(u'Год получения сертификата', max_length=4)
test1_value = DecimalField(u'Балл', max_digits=4, decimal_places=1)
test2_subject = ForeignKey('TestSubject',related_name="+",verbose_name=u'Второй предмет')
test2_cert_num = CharField(u'Номер сертификата', max_length=15)
test2_cert_pin = CharField(u'Пин-код сертификата', max_length=4)
test2_cert_year = CharField(u'Год получения сертификата', max_length=4)
test2_value = DecimalField(u'Балл', max_digits=4, decimal_places=1)
test3_subject = ForeignKey('TestSubject',related_name="+", verbose_name=u'Третий предмет')
test3_cert_num = CharField(u'Номер сертификата', max_length=15)
test3_cert_pin = CharField(u'Пин-код сертификата', max_length=4)
test3_cert_year = CharField(u'Год получения сертификата', max_length=4)
test3_value = DecimalField(u'Балл', max_digits=4, decimal_places=1)
date = DateField(auto_now_add=True)
creator = ForeignKey(User)
@property
def sum_bal(self):
return self.att_srbal + self.test1_value + self.test2_value + self.test3_value
def __unicode__(self):
return u"%s %s %s" % (self.surname, self.name, self.father)
#class AbiturientAdmin(admin.ModelAdmin):
# list_display = ('surname', 'name', 'father')
class EducationalForm(Model):
name = CharField(u'Название', max_length=20)
def __unicode__(self):
return self.name
class Speciality(Model):
code = CharField(u'Шифр специальности', max_length=15)
name = CharField(u'Название', max_length=50)
short_name = CharField(u'Короткое название', max_length=5)
budget = IntegerField(u'Количество бюджетных мест')
subject1 = ForeignKey('TestSubject',related_name="+")
subject2 = ForeignKey('TestSubject',related_name="+")
subject3 = ManyToManyField('TestSubject',related_name="+")
def __unicode__(self):
return self.name
class TestSubject(Model):
name = CharField(max_length=30)
def __unicode__(self):
return self.name |
import numpy as np
class SGD(object):
def __init__(self, params, lr=0.01, momentum=0.0):
self.lr = lr
self.momentum = momentum
self.v = {}
for k, v in params.items():
self.v[k] = np.zeros(v.shape)
def update(self, params, grad):
for key in params.keys():
self.v[key] = self.momentum * self.v[key] - self.lr * grad[key]
params[key] += self.v[key]
return params
class AdaGrad(object):
def __init__(self, params, lr=0.001, eps=1e-08):
self.lr = lr
self.h = {}
for k, v in params.items():
self.h[k] = np.zeros(v.shape) + eps
def update(self, params, grad):
for key in params.keys():
self.h[key] += grad[key] * grad[key]
params[key] -= self.lr * grad[key] / np.sqrt(self.h[key])
return params
class Adam(object):
def __init__(self, params, lr=0.001, beta1=0.9, beta2=0.999, eps=1e-08):
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.eps = eps
self.m = {}
self.v = {}
for k, v in params.items():
self.m[k] = np.zeros(v.shape)
self.v[k] = np.zeros(v.shape)
def update(self, params, grad):
for key in params.keys():
self.m[key] = self.beta1 * self.m[key] + (1 - self.beta1) * grad[key]
self.v[key] = self.beta2 * self.v[key] + (1 - self.beta2) * grad[key] * grad[key]
m_t = self.m[key] / (1 - self.beta1)
v_t = self.v[key] / (1 - self.beta2)
params[key] -= self.lr * m_t / (np.sqrt(v_t) + self.eps)
return params
|
class Constants:
"""
Constants class stores all of the constants required for Liquid connector module
"""
# Rest API endpoints
BASE_URL = 'https://api.liquid.com'
# GET
PRODUCTS_URI = '/products'
ACCOUNTS_BALANCE_URI = '/accounts/balance'
CRYPTO_ACCOUNTS_URI = '/crypto_accounts'
FIAT_ACCOUNTS_URI = '/fiat_accounts'
LIST_ORDER_URI = '/orders/{exchange_order_id}'
LIST_ORDERS_URI = '/orders?with_details=1'
TRADING_RULES_URI = '/currencies'
# POST
ORDER_CREATION_URI = '/orders'
# PUT
CANCEL_ORDER_URI = '/orders/{exchange_order_id}/cancel'
GET_EXCHANGE_MARKETS_URL = BASE_URL + PRODUCTS_URI
GET_SNAPSHOT_URL = BASE_URL + '/products/{id}/price_levels?full={full}'
# Web socket endpoints
BAEE_WS_URL = 'wss://tap.liquid.com/app/LiquidTapClient'
WS_REQUEST_PATH = '/realtime'
WS_ORDER_BOOK_DIFF_SUBSCRIPTION = 'price_ladders_cash_{currency_pair_code}_{side}'
WS_USER_TRADES_SUBSCRIPTION = 'user_account_{funding_currency}_trades'
WS_USER_EXECUTIONS_SUBSCRIPTION = 'user_executions_cash_{currency_pair_code}'
WS_USER_ACCOUNTS_SUBSCRIPTION = 'user_account_{quoted_currency}_orders'
# Web socket events
WS_AUTH_REQUEST_EVENT = 'quoine:auth_request'
WS_PUSHER_SUBSCRIBE_EVENT = 'pusher:subscribe'
# Timeouts
MESSAGE_TIMEOUT = 90.0
PING_TIMEOUT = 10.0
API_CALL_TIMEOUT = 10.0
UPDATE_ORDERS_INTERVAL = 10.0
# Others
SIDE_BID = 'buy'
SIDE_ASK = 'sell'
DEFAULT_ASSETS_PRECISION = 2
DEFAULT_QUOTING_PRECISION = 8
|
#012: Overlap Graphs
#http://rosalind.info/problems/grph/
#Given: A collection of DNA strings in FASTA format having total length at most 10 kbp.
titles = ['Rosalind_0498', 'Rosalind_2391', 'Rosalind_2323', 'Rosalind_0442', 'Rosalind_5013']
sequences = [ 'AAATAAA', 'AAATTTT', 'TTTTCCC', 'AAATCCC', 'GGGTGGG']
#If parsing from file:
import bio
f = open('rosalind_grph.txt', 'r')
contents = f.read()
f.close()
titles, sequences = bio.fastaParse(contents)
titles = [t[1:] for t in titles]
#Return: The adjacency list corresponding to O3. You may return edges in any order.
import itertools
def getAdjacencyList(titles, sequences, k):
def connected(seq_label1, seq_label2):
seq1 = seq_label1[1]
seq2 = seq_label2[1]
return seq2.startswith(seq1[-3:])
seq_labelled = zip(titles, sequences)
edge_attempts = list(itertools.permutations(seq_labelled, 2))
overlaps = [e for e in edge_attempts if connected(e[0],e[1])]
overlap_labels = [(o[0][0],o[1][0]) for o in overlaps]
return overlap_labels
adj_list = getAdjacencyList(titles, sequences, 3)
for a in adj_list:
print a[0], a[1]
#If printing to file:
#w = open('rosalind_grph_output.txt', 'w')
#for a in adj_list:
#w.write(a[0] + ' ' + a[1] + '\n')
#w.close()
|
# 623. K Edit Distance
'''
Given a set of strings which just has lower case letters and a target string, output all the strings for each the edit distance with the target no greater than k.
You have the following 3 operations permitted on a word:
Insert a character
Delete a character
Replace a character
Example
Example 1:
Given words = `["abc", "abd", "abcd", "adc"]` and target = `"ac"`, k = `1`
Return `["abc", "adc"]`
Input:
["abc", "abd", "abcd", "adc"]
"ac"
1
Output:
["abc","adc"]
Explanation:
"abc" remove "b"
"adc" remove "d"
Example 2:
Input:
["acc","abcd","ade","abbcd"]
"abc"
2
Output:
["acc","abcd","ade","abbcd"]
Explanation:
"acc" turns "c" into "b"
"abcd" remove "d"
"ade" turns "d" into "b" turns "e" into "c"
"abbcd" gets rid of "b" and "d"
'''
Basic idea:
DP + Trie + DFS
use T as the row of 2d transfer matrix T[i][j]
newT is the next row of T
this is the idea of DP sliding array
edge case: if words contains a empty string, it cannot be marked in Trie
class TrieNode:
def __init__(self):
self.word = ''
self.char_child = collections.defaultdict(TrieNode)
class Trie:
def __init__(self):
self.root = TrieNode()
def add(self, word):
node = self.root
for char in word:
node = node.char_child[char]
node.word = word
class Solution:
"""
@param words: a set of stirngs
@param target: a target string
@param k: An integer
@return: output all the strings that meet the requirements
"""
def kDistance(self, words, target, k):
# write your code here
# build trie
trie = Trie()
for word in words:
trie.add(word)
res = []
# take care of edge case
if '' in words and len(target)<=k:
res.append('')
T = list(range(len(target)+1)) #first row of T
self.dfs(trie.root, target, T, k, res)
return res
def dfs(self, node, target, T, k, res):
if node.word and T[len(target)]<=k: # T[len(target)] is the min distance between current node-word and target word
res.append(node.word) # do not return as there maybe other word downwards the tree
for char in node.char_child:
newT = [0]*(len(target)+1)
newT[0] = T[0] + 1
for j in range(1, 1+len(target)):
if target[j-1] == char:
newT[j] = T[j-1]
else:
newT[j] = min(T[j-1], T[j], newT[j-1])+1
self.dfs(node.char_child[char], target, newT, k, res)
|
# -*- coding: utf-8 -*-
"""
This module contains test functions.
"""
import os
import time
from .processing import *
from .plotting import *
from nose.tools import assert_almost_equal
def test_run():
print("Testing Run class")
run = Run("Wake-1.0", 20)
print(run.cp_per_rev)
print(run.std_cp_per_rev)
print(run.cp_conf_interval)
print(run.mean_cp)
print(run.unc_cp)
print(run.exp_unc_cp)
run.print_perf_stats()
print("PASS")
def test_section():
print("Testing Section class")
section = Section("Wake-1.0")
print("PASS")
def test_batch_process_section():
print("Testing batch_process_section")
batch_process_section("Perf-1.0")
df = pd.read_csv("Data/Processed/Perf-1.0.csv")
print(df)
plt.figure()
plt.plot(df.mean_tsr, df.mean_cp)
plt.show()
def test_perf_curve():
print("Testing PerfCurve class")
pc = PerfCurve(0.6)
pc.plotcp()
print("PASS")
def test_wake_profile():
print("Testing WakeProfile class")
wp = WakeProfile(0.6, 0.25, "horizontal")
wp.plot("mean_u")
print("PASS")
def test_wake_map():
print("Testing WakeMap class")
wm = WakeMap(0.4)
wm.plot_meancontquiv()
wm2 = WakeMap(1.2)
wm2.plot_meancontquiv()
# wm.plot_diff(quantity="mean_w", U_infty_diff=0.6)
# wm.plot_meancontquiv_diff(0.8, percent=False)
print("PASS")
def test_process_section_parallel():
nproc = 4
nruns = 32
t0 = time.time()
s = Section("Wake-1.0")
s.process_parallel(nproc=nproc, nruns=nruns)
print("Parallel elapsed time: {} seconds".format(time.time() - t0))
t0 = time.time()
df = pd.DataFrame()
for n in range(nruns):
r = Run(s.name, n)
df = df.append(r.summary, ignore_index=True)
print("Serial elapsed time: {} seconds".format(time.time() - t0))
assert(np.all(s.data.run == df.run))
assert(np.all(s.data.mean_cp == df.mean_cp))
assert(np.all(s.data.mean_cd == df.mean_cd))
print("PASS")
def test_batch_process_section_vs_parallel():
name = "Perf-1.0"
t0 = time.time()
batch_process_section_old(name)
print(time.time() - t0)
t0 = time.time()
Section(name).process()
print(time.time() - t0)
def test_download_raw():
"""Tests the `processing.download_raw` function."""
print("Testing processing.download_raw")
# First rename target file
fpath = "Data/Raw/Perf-1.0/0/metadata.json"
fpath_temp = "Data/Raw/Perf-1.0/0/metadata-temp.json"
exists = False
if os.path.isfile(fpath):
exists = True
os.rename(fpath, fpath_temp)
try:
download_raw("Perf-1.0", 0, "metadata")
# Check that file contents are equal
with open(fpath) as f:
content_new = f.read()
if exists:
with open(fpath_temp) as f:
content_old = f.read()
assert(content_new == content_old)
except ValueError as e:
print(e)
os.remove(fpath)
if exists:
os.rename(fpath_temp, fpath)
print("PASS")
def test_plot_settling():
print("Testing plotting.plot_settling")
plot_settling(1.0)
print("PASS")
def test_calc_mom_transport():
print("Testing WakeMap.calc_mom_transport")
wm = WakeMap(1.0)
wm.calc_mom_transport()
print("PASS")
def test_all():
test_run()
test_section()
test_perf_curve()
print("Testing plot_perf_re_dep")
plot_perf_re_dep()
plot_perf_re_dep(dual_xaxes=True)
print("PASS")
print("Testing plot_perf_curves")
plot_perf_curves()
print("PASS")
print("Testing plot_trans_wake_profile")
plot_trans_wake_profile("mean_u")
print("PASS")
print("Testing plot_wake_profiles")
plot_wake_profiles(z_H=0.0, save=False)
print("PASS")
test_wake_profile()
print("Testing process_tare_torque")
process_tare_torque(2, plot=False)
print("PASS")
print("Testing process_tare_drag")
process_tare_drag(5, plot=False)
print("PASS")
test_wake_map()
plt.show()
test_download_raw()
test_plot_settling()
test_calc_mom_transport()
print("All tests passed")
if __name__ == "__main__":
pass
|
from Tokenizer import Tokenizer
from Constants import Symbols, Keywords, TokenTypes, BinaryOps, UnaryOps, MemSegments, SubroutineTypes
from VMWriter import VMWriter
from SymbolTable import Variable, SymbolTable
from os import remove
from sys import exit
class CompilationEngine:
_class_subroutine_dec_keywords = {
Keywords.METHOD, Keywords.CONSTRUCTOR, Keywords.FUNCTION}
_class_var_dec_keywords = {
Keywords.FIELD, Keywords.STATIC}
_binary_ops_symbols = {
Symbols.PLUS, Symbols.MINUS, Symbols.ASTERISK, Symbols.FORWARD_SLASH,
Symbols.AMPERSAND, Symbols.VERTICAL_BAR, Symbols.LESS_THAN, Symbols.GREATER_THAN, Symbols.EQUAL
}
_unary_ops_symbols = {
Symbols.TILDE, Symbols.MINUS
}
_keyword_constants = {
Keywords.THIS, Keywords.TRUE, Keywords.FALSE, Keywords.NULL
}
def __init__(self):
self._tokenizer = None
self._writer = VMWriter()
self._cur_tok = None
self._next_tok = None
self._cur_output_file_name = ''
self._cur_class_name = ''
self._cur_class_sym_table = None
self._cur_subroutine_sym_table = None
self._cur_subroutine_type = None
self._cur_subroutine_name = ''
self._if_count = 0
self._while_count = 0
self._num_of_field_vars = 0
def compile(self, input_file_name):
self._tokenizer = Tokenizer(input_file_name)
self._num_of_field_vars = 0
self._if_count = 0
self._while_count = 0
self._cur_tok = self._tokenizer.get_cur_tok()
if input_file_name[-5:] == ".jack":
input_file_name = input_file_name[:-5]
output_file_name = input_file_name + ".vm"
self._cur_output_file_name = output_file_name
with open(output_file_name, 'w') as output_stream:
self._writer.set_output_file(output_stream)
self._compile_class()
def _compile_class(self):
self._cur_class_sym_table = SymbolTable()
self._advance() # class
self._cur_class_name = self._eat_cur_tok().get_val()
self._advance()
self._compile_class_var_dec()
self._compile_class_subroutine_dec()
def _compile_class_var_dec(self):
while self._cur_tok.get_keyword_type() not in self._class_subroutine_dec_keywords:
kind = Variable.Kind.FIELD if self._eat_cur_tok().get_keyword_type() == Keywords.FIELD else \
Variable.Kind.STATIC
type_ = self._eat_cur_tok().get_val()
self._cur_class_sym_table.add(self._eat_cur_tok().get_val(), type_, kind)
self._num_of_field_vars += 1
while self._eat_cur_tok().get_symbol_type() != Symbols.SEMI_COLON:
self._cur_class_sym_table.add(self._eat_cur_tok().get_val(), type_, kind)
if kind == Variable.Kind.FIELD:
self._num_of_field_vars += 1
def _compile_class_subroutine_dec(self):
while self._cur_tok.get_symbol_type() != Symbols.CLOSE_CURLY:
self._cur_subroutine_sym_table = SymbolTable()
keyword_type = self._eat_cur_tok().get_keyword_type()
if keyword_type == Keywords.METHOD:
self._cur_subroutine_type = SubroutineTypes.METHOD
elif keyword_type == Keywords.CONSTRUCTOR:
self._cur_subroutine_type = SubroutineTypes.CONSTRUCTOR
else:
self._cur_subroutine_type = SubroutineTypes.FUNCTION
self._advance()
self._cur_subroutine_name = self._eat_cur_tok().get_val()
self._advance()
self._compile_param_list()
self._advance()
self._compile_subroutine_body()
def _compile_param_list(self):
if self._cur_subroutine_type == SubroutineTypes.METHOD:
self._cur_subroutine_sym_table.add('this', self._cur_class_name, Variable.Kind.ARGUMENT)
while self._cur_tok.get_symbol_type() != Symbols.CLOSE_PAREN:
type_ = self._eat_cur_tok().get_val()
name = self._eat_cur_tok().get_val()
self._cur_subroutine_sym_table.add(name, type_, Variable.Kind.ARGUMENT)
if self._cur_tok.get_symbol_type() == Symbols.COMMA:
self._advance()
def _compile_subroutine_body(self):
self._advance()
self._compile_subroutine_var_dec()
if self._cur_subroutine_type == SubroutineTypes.CONSTRUCTOR:
self._writer.write_push(MemSegments.CONSTANT, self._num_of_field_vars)
self._writer.write_call('Memory.alloc', 1)
self._writer.write_pop(MemSegments.POINTER, 0)
elif self._cur_subroutine_type == SubroutineTypes.METHOD:
self._writer.write_push(MemSegments.ARGUMENT, 0)
self._writer.write_pop(MemSegments.POINTER, 0)
self._compile_statements()
self._advance()
def _compile_subroutine_var_dec(self):
num_of_lcl_vars = 0
while self._cur_tok.get_keyword_type() == Keywords.VAR:
kind = Variable.Kind.LOCAL
self._advance()
type_ = self._eat_cur_tok().get_val()
self._cur_subroutine_sym_table.add(self._eat_cur_tok().get_val(), type_, kind)
num_of_lcl_vars += 1
while self._eat_cur_tok().get_symbol_type() != Symbols.SEMI_COLON:
self._cur_subroutine_sym_table.add(self._eat_cur_tok().get_val(), type_, kind)
num_of_lcl_vars += 1
self._writer.write_func_name(self._cur_class_name + '.' + self._cur_subroutine_name, num_of_lcl_vars)
def _compile_statements(self):
while self._cur_tok.get_symbol_type() != Symbols.CLOSE_CURLY:
keyword = self._cur_tok.get_keyword_type()
if keyword == Keywords.LET:
self._compile_let()
elif keyword == Keywords.DO:
self._compile_do()
elif keyword == Keywords.WHILE:
self._compile_while()
elif keyword == Keywords.IF:
self._compile_if()
else:
self._compile_return()
def _compile_let(self):
self._advance()
var_name_tok = self._eat_cur_tok()
var_entry = self._get_var_entry(var_name_tok.get_val())
if self._eat_cur_tok().get_symbol_type() == Symbols.OPEN_SQUARE:
self._writer.write_push(var_entry.kind.value, var_entry.num)
self._compile_expression()
self._writer.write_op(BinaryOps.ADD)
self._advance(2)
self._compile_expression()
self._writer.write_pop(MemSegments.TEMP, 0)
self._writer.write_pop(MemSegments.POINTER, 1)
self._writer.write_push(MemSegments.TEMP, 0)
self._writer.write_pop(MemSegments.THAT, 0)
else:
self._compile_expression()
self._writer.write_pop(var_entry.kind.value, var_entry.num)
self._advance()
def _compile_do(self):
self._advance()
self._compile_subroutine_call()
self._writer.write_pop(MemSegments.TEMP, 0)
self._advance()
def _compile_if(self):
label = self._eat_cur_if_label()
end_label = 'end_' + label
self._advance(2) # if (
self._compile_expression()
self._writer.write_op(UnaryOps.NOT)
self._writer.write_if_goto(label)
self._advance(2) # ) {
self._compile_statements()
self._advance() # }
self._writer.write_goto(end_label)
self._writer.write_label(label)
if self._cur_tok.get_keyword_type() == Keywords.ELSE:
self._advance(2) # else {
self._compile_statements()
self._advance() # }
self._writer.write_label(end_label)
def _compile_while(self):
label = self._eat_cur_while_label()
end_label = 'end_' + label
self._advance(2) # while (
self._writer.write_label(label)
self._compile_expression()
self._writer.write_op(UnaryOps.NOT)
self._writer.write_if_goto(end_label)
self._advance(2) # ) {
self._compile_statements()
self._advance()
self._writer.write_goto(label)
self._writer.write_label(end_label)
def _compile_return(self):
self._advance() # return
if self._cur_tok.get_symbol_type() != Symbols.SEMI_COLON:
self._compile_expression()
else:
self._writer.write_push(MemSegments.CONSTANT, 0) # ;
self._writer.write_return()
self._advance()
def _compile_expression(self):
self._compile_term()
while self._cur_tok.get_symbol_type() in self._binary_ops_symbols:
op = self._eat_cur_tok().get_binary_op()
self._compile_term()
self._writer.write_op(op)
def _compile_expression_list(self):
num_of_expressions = 0
while self._cur_tok.get_symbol_type() != Symbols.CLOSE_PAREN:
self._compile_expression()
num_of_expressions += 1
if self._cur_tok.get_symbol_type() == Symbols.COMMA:
self._advance() # ,
return num_of_expressions
def _compile_term(self):
token_type = self._cur_tok.get_token_type()
symbol_type = self._cur_tok.get_symbol_type()
if token_type == TokenTypes.INT_CONST:
self._writer.write_push(MemSegments.CONSTANT, int(self._eat_cur_tok().get_val()))
elif token_type == TokenTypes.STRING_CONST:
self._compile_string_const()
elif self._cur_tok.get_keyword_type() in self._keyword_constants:
self._compile_keyword_const()
elif symbol_type in self._unary_ops_symbols:
tok = self._eat_cur_tok()
self._compile_term()
self._writer.write_op(tok.get_unary_op())
elif symbol_type == Symbols.OPEN_PAREN:
self._advance()
self._compile_expression()
self._advance()
else:
next_symbol_type = self._next_tok.get_symbol_type()
if next_symbol_type == Symbols.OPEN_SQUARE:
self._compile_array_exp()
elif next_symbol_type == Symbols.DOT or next_symbol_type == Symbols.OPEN_PAREN:
self._compile_subroutine_call()
else:
var_entry = self._get_var_entry(self._eat_cur_tok().get_val())
self._writer.write_push(var_entry.kind.value, var_entry.num)
def _compile_array_exp(self):
var_entry = self._get_var_entry(self._eat_cur_tok().get_val())
self._writer.write_push(var_entry.kind.value, var_entry.num)
self._advance()
self._compile_expression()
self._writer.write_op(BinaryOps.ADD)
self._writer.write_pop(MemSegments.POINTER, 1)
self._writer.write_push(MemSegments.THAT, 0)
self._advance()
def _compile_string_const(self):
cur_tok_string = self._eat_cur_tok().get_val()
str_len = len(cur_tok_string)
self._writer.write_push(MemSegments.CONSTANT, str(str_len))
self._writer.write_call('String.new', 1)
for c in cur_tok_string:
self._writer.write_push(MemSegments.CONSTANT, ord(c))
self._writer.write_call('String.appendChar', 2)
def _compile_keyword_const(self):
keyword_type = self._eat_cur_tok().get_keyword_type()
if keyword_type == Keywords.NULL or keyword_type == Keywords.FALSE:
self._writer.write_push(MemSegments.CONSTANT, 0)
elif keyword_type == Keywords.TRUE:
self._writer.write_push(MemSegments.CONSTANT, 1)
self._writer.write_op(UnaryOps.NEG)
else:
self._writer.write_push(MemSegments.POINTER, 0)
def _compile_subroutine_call(self):
var_entry = self._get_var_entry(self._cur_tok.get_val())
if var_entry:
self._advance(2)
func_name = self._eat_cur_tok().get_val()
self._writer.write_push(var_entry.kind.value, var_entry.num)
self._advance()
num_of_expressions = self._compile_expression_list()
self._writer.write_call(var_entry.type + '.' + func_name, num_of_expressions + 1)
self._advance()
elif self._next_tok.get_symbol_type() == Symbols.OPEN_PAREN:
func_name = self._eat_cur_tok().get_val()
self._advance()
self._writer.write_push(MemSegments.POINTER, 0)
num_of_expressions = self._compile_expression_list()
self._writer.write_call(self._cur_class_name + '.' + func_name, num_of_expressions + 1)
self._advance()
else:
func_name = ''
for _ in range(3):
func_name += self._eat_cur_tok().get_val()
self._advance()
num_of_expressions = self._compile_expression_list()
self._writer.write_call(func_name, num_of_expressions)
self._advance()
def _eat_cur_if_label(self):
res = 'if_' + str(self._if_count)
self._if_count += 1
return res
def _eat_cur_while_label(self):
res = 'while_' + str(self._while_count)
self._while_count += 1
return res
def _get_var_entry(self, name):
variable = self._cur_subroutine_sym_table.get(name)
if variable:
return variable
variable = self._cur_class_sym_table.get(name)
if variable:
return variable
return None
def _eat_cur_tok(self):
if self._cur_tok is None:
print("error: invalid code")
remove(self._cur_output_file_name)
exit(1)
cur_tok = self._cur_tok
self._advance()
return cur_tok
def _advance(self, steps=1):
self._tokenizer.advance(steps)
self._cur_tok = self._tokenizer.get_cur_tok()
self._next_tok = self._tokenizer.get_next_tok()
|
# IMPORT TKINTER MODULE
from Tkinter import *
# MAKE THE GRADE-AVERAGING FUNCTION
def average():
# GET THE USER-INPUT FROM ENTRY-FORMS AND ASSIGN THEM TO VARIABLES
Math = SUB_HOLDER[0].get() ; Language = SUB_HOLDER[1].get() ; Science = SUB_HOLDER[2].get() ; History = SUB_HOLDER[3].get()
# CONVERT STRING DATA INTO INTEGER
Math = int(Math) ; Language = int(Language) ; Science = int(Science) ; History = int(History)
# CALCULATE GRADE AND PRESENT IT TO USER WITH LABEL
add = Math + Language + Science + History
grade = Label(root, text='Your Grade Average Is: {}'.format(add / 4))
grade.pack()
grade.place(x=175, y=200)
# DEFINE SOME WINDOW CONSTATNS
TITLE = 'My Test Application'
DIMENSIONS = '500x300'
root = Tk()
# MAKE AN ARRAY OF SCHOOL SUBJECTS
subjects = ['math', 'language', 'science', 'history']
y_cor = 10
SUB_HOLDER = []
# LOOP THROUGH SUBJECTS ARRAY, MAKE LABELS, AND MAKE ENTRY-FORMS
for subject in subjects:
label = Label(root, text=subject)
label.pack()
label.place(x=100, y=y_cor)
subject = Entry(root)
SUB_HOLDER.append(subject)
subject.pack()
subject.place(x=175, y=y_cor)
y_cor += 50
# MAKE THE GRADE BUTTON
submit = Button(root, text='Grade', command=average)
submit.pack()
submit.place(x=225, y=250)
# SETUP WINDOW
root.title(TITLE)
root.geometry(DIMENSIONS)
root.mainloop() |
import sys
def calculate_lis(sequence):
''' Calculate a longest increasing subsequence
:param sequence: sequence to search the lis in
:return: a lis
'''
L = [[sequence[0]]]
for i in range(1, len(sequence)):
L.append([])
for j in range(i):
if (sequence[j] < sequence[i]) and (len(L[i]) < len(L[j]) + 1):
L[i] = L[j][:]
L[i].append(sequence[i])
lis = []
max_len = 0
for l in L:
if len(l) > max_len:
max_len = len(l)
lis = l
return lis
if __name__ == "__main__":
'''
Given: A positive integer n≤10000 followed by a permutation π of length n.
Return: A longest increasing subsequence of π, followed by a longest decreasing subsequence of π.
'''
n = int(sys.stdin.readline())
permutation = list(map(int, sys.stdin.readline().rstrip().split()))
LIS = calculate_lis(permutation)
LDS = calculate_lis(permutation[::-1])[::-1]
print(" ".join(map(str, LIS)))
print(" ".join(map(str, LDS)))
|
import sys
import pyshark
def load_packets(filename):
pack=pyshark.FileCapture(filename,display_filter='dns')
return pack
def print_details(packet):
print(f"Report for{packet.qry_name}\n\n")
print(f"URL Requested:{packet.dns.qry_name}")
print(f"IP Resolved:{packet.ip.dst}")
print("\n\n------------------------------------------------------")
def print_report(request_response):
for dic in request_response.values():
req=dic[0]
res=dic[1]
print(f"Summary for {dic[0].id}")
print(f"URL requested: {req.qry_name}")
try:
print(f"IPv4 Resolved to : {res.a_all}")
except:
pass
# print(req,res)
# will return dictionary of list containg dic{indexed by id of packets}[0-req,1-response]
def request_response_attach(pack):
res=dict()
for p in pack:
# Request type
id=str(p.dns.id)
# print(p.dns.flags)
if p.dns.flags=="0x00000100":
if id in res.keys():
res[id][0]=p.dns
else:
res[id]=[p.dns,10]
# Response type
elif p.dns.flags=="0x00008180":
if id in res.keys():
res[id][1]=p.dns
else:
res[id]=[10,p.dns]
return res
def main():
if len(sys.argv) !=2:
print("Invalid argument format found")
quit()
fileName=sys.argv[1]
print(fileName)
cap = load_packets(fileName)
request_response = request_response_attach(cap)
# for i in request_response:
# print(request_response[i][1])
# print_report(request_response)
print(request_response)
main() |
import hashlib
def md5Checksum(filePath):
with open(filePath, 'rb') as fh:
m = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
print('The MD5 checksum of test.txt is', md5Checksum('test.txt'))
|
from django.contrib.auth.models import User
from django.db import models
from allauth.account.models import EmailAddress
from allauth.socialaccount.models import SocialAccount
import hashlib
from datetime import datetime
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
def __unicode__(self):
return "{}'s profile".format(self.user.username)
class Meta:
db_table = 'user_profile'
def account_verified(self):
if self.user.is_authenticated:
result = EmailAddress.objects.filter(email=self.user.email)
if len(result):
return result[0].verified
return False
def profile_image_url(self):
fb_uid = SocialAccount.objects.filter(user_id=self.user.id, provider='facebook')
if len(fb_uid):
return "http://graph.facebook.com/{}/picture?width=40&height=40".format(fb_uid[0].uid)
return "http://www.gravatar.com/avatar/{}?s=40".format(hashlib.md5(self.user.email).hexdigest())
class QuoteRequest(models.Model):
user = models.ForeignKey(UserProfile, null=True)
location = models.CharField(max_length=25, default= 'Chennai')
#event_type = models.CharField(max_length=25, default = 'Photography')
photography_type = models.CharField(max_length=25, default = 'Wedding')
no_people_choices = (('1', 'Less than 50 people'),('2','50 to 100 people'),('3','100 to 200 people'),('4','200 to 500 people'),('5','More than 500 people'),)
no_people = models.CharField(max_length=1,choices=no_people_choices,default=None, null = True)
event_date = models.DateField('Event Date', default=datetime.now().date())
event_venue_choices = (('1', 'Indoors'),('2','Outdoors'),('3',"I'm not sure yet"),)
event_venue = models.CharField(max_length=1,choices=event_venue_choices,default=None, null = True)
deliverables_choices = (
('1', 'CD/DVD'),
('2','Online Download'),
('3',"Physical Prints"),
)
deliverables = models.CharField(max_length=1,choices=deliverables_choices,default=None, null = True)
budget_choices = (
('1', 'Less than INR 15,000'),
('2','INR 15,000 to 30,000'),
('3','INR 30,000 to 60,000'),
('4','INR 60,000 to 1,00,000'),
('5','More than 1,00,000'),
)
budget = models.CharField(max_length=1,choices=budget_choices,default='Less than INR 15,000', null = False)
details = models.TextField(null = True)
class Meta:
db_table = 'quote_request'
def __unicode__(self):
return unicode(self.id)
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0]) |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import sys
sys.path.append('.')
AUTHOR = u'alex'
SITENAME = u'tds-anonymous'
SITEURL = 'tdsanonymous.com'
THEME = "themes/twenty"
PATH = 'content'
from utils import filters
JINJA_FILTERS = { 'sidebar': filters.sidebar, 'pretty_date': filters.pretty_date }
DEFAULT_LANG = u'en'
PLUGIN_PATHS = ['./plugins']
PLUGINS = ['better_code_samples']
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Social widget
SOCIAL = (('github', 'http://github.com/tdsanonymous'),)
DEFAULT_PAGINATION = 3
POST_LIMIT = 3
RELATIVE_URLS = True
DISPLAY_PAGES_ON_MENU = True
# Formatting for dates
DEFAULT_DATE_FORMAT = ('%Y-%m-%dT%H:%M:%SZ')
# Formatting for urls
# ARTICLE_DIR = 'blog'
ARTICLE_URL = "blog/{slug}"
ARTICLE_SAVE_AS = "blog/{slug}/index.html"
ARCHIVES_URL = "blog"
ARCHIVES_SAVE_AS = "blog/index.html"
PAGE_DIR = 'pages'
PAGE_URL = '{slug}'
PAGE_SAVE_AS = '{slug}/index.html'
CATEGORY_URL = "category/{slug}/"
CATEGORY_SAVE_AS = "category/{slug}/index.html"
TAG_URL = "tag/{slug}/"
TAG_SAVE_AS = "tag/{slug}/index.html"
USE_FOLDER_AS_CATEGORY = True
# Generate yearly archive
YEAR_ARCHIVE_SAVE_AS = 'blog/{date:%Y}/index.html'
# Show most recent posts first
NEWEST_FIRST_ARCHIVES = True
STATIC_PATHS = ['images', 'fonts', 'css', 'js',]
import datetime
now = datetime.datetime.utcnow()
YEAR = now.strftime("%Y")
|
# -*- coding: UTF-8 -*-
# Date : 2020/3/9 11:03
# Editor : gmj
# Desc : 统计全部数据情况
import datetime
import os
from openpyxl import Workbook
from openpyxl.styles import PatternFill
from common.database.mysql import MysqlConnect
from common.database.db_config import ALI_MYSQL_CONFIG
mysql_cnn = MysqlConnect(ALI_MYSQL_CONFIG)
QUERY_DICT = {
't_major': {
'专业数量': 'SELECT count(*) from t_major_operate',
},
}
def report_base_data_status():
file_path = f'./{datetime.date.today()}数据统计报告.xlsx'
if os.path.exists(file_path):
os.remove(file_path)
wb = Workbook()
for table, value in QUERY_DICT.items():
print(table)
eval(f'report_{table}(wb,"{file_path}",{value})')
# 院校相关
def report_t_school(wb, save_path, query_dict):
# wb = Workbook()
style = PatternFill("solid", fgColor="E2EFDA")
ws = wb.create_sheet(index=0, title="学校信息报告")
ws.column_dimensions['A'].width = 23.0
ws['A1'] = '学校相关数据缺失统计结果'
ws['A2'] = '学校数量'
total = int(mysql_cnn.count('SELECT count(*) from (SELECT distinct school_name from t_school_operate ) aa'))
ws['B2'] = total
# print(total)
query_every(query_dict, save_path, total, wb, ws)
def query_every(query_dict, save_path, total, wb, ws):
ws['A4'] = '数据字段'
ws['B4'] = '数据总计'
ws['C4'] = '现有数量'
ws['D4'] = '缺失数量'
row = 5
style = PatternFill("solid", fgColor="E2EFDA")
style1 = PatternFill("solid", fgColor="66CD00")
style2 = PatternFill("solid", fgColor="EEEE00")
for k, sql_txt in query_dict.items():
row += 1
ws.cell(row, 1).value = k
ws.cell(row, 1).fill = style
ws.cell(row, 2).value = total
num = int(mysql_cnn.count(sql_txt))
ws.cell(row, 3).value = num
ws.cell(row, 3).fill = style1
ws.cell(row, 4).value = total - num
ws.cell(row, 4).fill = style2
wb.save(save_path)
# 专业相关
def report_t_major(wb, save_path, query_dict):
# wb = Workbook()
style = PatternFill("solid", fgColor="E2EFDA")
ws = wb.create_sheet(index=1, title="专业信息报告")
ws.column_dimensions['A'].width = 23.0
ws['A1'] = '专业相关数据缺失统计结果'
ws['A2'] = '专业数量'
total = int(mysql_cnn.count('SELECT count(*) from t_major_operate'))
ws['B2'] = total
# print(total)
query_every(query_dict, save_path, total, wb, ws)
# 职业相关
def report_t_job(wb, save_path, query_dict):
# wb = Workbook()
style = PatternFill("solid", fgColor="E2EFDA")
ws = wb.create_sheet(index=2, title="职业数据报告")
ws.column_dimensions['A'].width = 20.0
ws['A1'] = '职业数据缺失统计结果'
ws['A2'] = '一级类数量'
ws['B2'] = mysql_cnn.count('SELECT count(*) from t_job_first_type_operate')
ws['A3'] = '二级类数量'
ws['B3'] = mysql_cnn.count('SELECT count(*) from t_job_type_operate')
ws['A4'] = '职位数量'
total = int(mysql_cnn.count('SELECT count(*) from t_job_operate'))
ws['B4'] = total
ws['A4'] = '数据字段'
ws['B4'] = '数据总计'
ws['C4'] = '现有数量'
ws['D4'] = '缺失数量'
row = 5
style = PatternFill("solid", fgColor="E2EFDA")
style1 = PatternFill("solid", fgColor="66CD00")
style2 = PatternFill("solid", fgColor="EEEE00")
for k, sql_txt in query_dict.items():
row += 1
ws.cell(row, 1).value = k
ws.cell(row, 1).fill = style
ws.cell(row, 2).value = total
num = int(mysql_cnn.count(sql_txt))
ws.cell(row, 3).value = num
ws.cell(row, 3).fill = style1
ws.cell(row, 4).value = total - num
ws.cell(row, 4).fill = style2
wb.save(save_path)
# 省控线,一分一段表
def report_batch_one_point(wb, save_path, query_dict):
# wb = Workbook()
style = PatternFill("solid", fgColor="E2EFDA")
ws = wb.create_sheet(index=3, title="一分一段,同分去向报告")
ws.column_dimensions['A'].width = 20.0
ws['A1'] = '一分一段,同分去向,省控线 相关数据缺失统计结果'
ws['A2'] = '省份数量'
total = 31
ws['B2'] = total
# print(total)
query_every(query_dict, save_path, total, wb, ws)
MISS_DICT = {
'职业相关数据缺失统计': {
'学历分布缺失': "SELECT job_name from t_job_operate where job_id not in (SELECT DISTINCT job_id from t_job_education_operate)",
},
}
def output_data_missing():
for k, output_items in MISS_DICT.items():
file_path = f'./{datetime.date.today()}{k}.xlsx'
if os.path.exists(file_path):
os.remove(file_path)
wb = Workbook()
sheet_index = 0
for theme, sql_txt in output_items.items():
datas = mysql_cnn.fetchall(sql_txt)
if datas:
ws = wb.create_sheet(index=sheet_index, title=theme)
for row_num, da in enumerate(datas):
for col_num, v in enumerate(list(da.values())):
ws.cell(row_num + 2, col_num + 1).value = v
sheet_index += 1
wb.save(file_path)
if __name__ == '__main__':
# report_base_data_status()
output_data_missing()
|
# Generated by Django 2.1.3 on 2019-11-21 12:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Myapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='data',
name='etc',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='data',
name='source',
field=models.TextField(blank=True, null=True),
),
]
|
import torch
from torch import nn
class DataParallelDistribution(nn.DataParallel):
"""
A DataParallel wrapper for Distribution.
To be used instead of nn.DataParallel for Distribution objects.
"""
def set_mode(self, mode):
self.module.set_mode(mode)
def log_prob(self, *args, **kwargs):
self.set_mode('log_prob')
return self.forward(*args, **kwargs)
def sample(self, *args, **kwargs):
self.set_mode('sample')
return self.forward(*args, **kwargs)
def sample_with_log_prob(self, *args, **kwargs):
self.set_mode('sample_with_log_prob')
return self.forward(*args, **kwargs)
class Distribution(nn.Module):
"""Distribution base class"""
mode = 'log_prob'
allowed_modes = {'log_prob', 'sample', 'sample_with_log_prob'}
def _assert_allowed_mode(self, mode):
assert mode in self.allowed_modes, 'Got mode {}, but needs to be in {}'.format(mode, str(self.allowed_modes))
def set_mode(self, mode):
'''
Set mode for .forward().
'''
self._assert_allowed_mode(mode)
self.mode = mode
def forward(self, *args, **kwargs):
'''
Calls either {.log_prob(), .sample(), .sample_with_log_prob()}
depending on self.mode.
To allow Distribution objects to be wrapped by DataParallelDistribution,
which parallelizes .forward() of replicas on subsets of data.
'''
if self.mode == 'log_prob':
return self.log_prob(*args, **kwargs)
elif self.mode == 'sample':
return self.sample(*args, **kwargs)
elif self.mode == 'sample_with_log_prob':
return self.sample_with_log_prob(*args, **kwargs)
def log_prob(self, x):
"""Calculate log probability under the distribution.
Args:
x: Tensor, shape (batch_size, ...)
Returns:
log_prob: Tensor, shape (batch_size,)
"""
raise NotImplementedError()
def sample(self, num_samples):
"""Generates samples from the distribution.
Args:
num_samples: int, number of samples to generate.
Returns:
samples: Tensor, shape (num_samples, ...)
"""
raise NotImplementedError()
def sample_with_log_prob(self, num_samples):
"""Generates samples from the distribution together with their log probability.
Args:
num_samples: int, number of samples to generate.
Returns:
samples: Tensor, shape (num_samples, ...)
log_prob: Tensor, shape (num_samples,)
"""
samples = self.sample(num_samples)
log_prob = self.log_prob(samples)
return samples, log_prob
def sample_shape(self, num_samples):
"""The shape of samples from the distribution.
Args:
num_samples: int, number of samples.
Returns:
sample_shape: torch.Size
"""
raise NotImplementedError
|
import pandas as pd
import datetime
import json
import sqlite3
def get_last_update():
df = pd.read_json('C:\\Users\\merta\\Documents\\google_sync\\TFTSheets\\last_datetime.json').to_dict()
epoch = df['last_datetime'][0]
print(epoch)
ts = datetime.datetime.fromtimestamp(epoch/1000).strftime('%Y-%m-%d %H:%M')
ts = {'last_update':ts}
with open('json_data/other_data/last_update.json', "w") as json_file:
json.dump(ts, json_file)
def get_total_matches():
db = sqlite3.connect('databases/match_ids.sqlite')
cursor = db.cursor()
cursor.execute("SELECT * FROM match_ids")
mc = {
'match_count' : int(len(cursor.fetchall())),
}
with open('json_data/other_data/match_count.json', "w") as json_file:
json.dump(mc, json_file)
if __name__ == '__main__':
get_last_update() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# filter_helend_GC30.py
#
# Copyright 2017 Andres Aguilar <andresyoshimar@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import argparse
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
from Bio.SeqUtils import GC
# Helendout.txt columns
helend_out = ["Name", "Strand", "Start_helend", "Helend", "Hairpin",
"Hairpin_len", "Gap_pos", "Mismatches"]
def gc_percent(x):
""" Calculate %GC """
stem1, loop, stem2 = x.split("*")
return GC(stem1)
def main(helends_file):
# Read table with helitrons structural information
helends = pd.read_table(helends_file, header=None, names=helend_out)
helends["GC"] = helends["Hairpin"].apply(gc_percent)
helends = helends.get(helends["GC"] >= 30)
helends.to_csv(helends_file, sep='\t', index=False, header=False)
if __name__ == "__main__":
args = argparse.ArgumentParser()
args.add_argument("-helends", "--helends", help="Helends file", required=True)
p = args.parse_args()
main(p.helends) |
# Generated by Django 2.2.4 on 2019-09-21 03:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ClinicaMedica', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TypeUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nameType', models.CharField(max_length=50)),
],
),
]
|
import xlrd
import pandas as pd
import numpy as np
import sys
range_offset = 2;
def get_error_line():
#workbook = xlrd.open_workbook("error_and_line.xlsx")
#sheet = workbook.sheet_by_index(0)
#for rowx in range(sheet.nrows):
# values = sheet.row_values(rowx)
# print(values)
xlsx = pd.ExcelFile("error_and_line.xlsx")
sheetX = xlsx.parse(0)
var1 = sheetX['line number']
#print(var1[0])
#print(len(var1))
return var1[0]
def get_error_type():
xlsx = pd.ExcelFile("error_and_line.xlsx")
sheetX = xlsx.parse(0)
var1 = sheetX['error message']
#print(var1[0])
return var1[0]
def get_context_arr(line_number):
java_file = sys.argv[1] + ".java"
context_range = []
index_of_error_context = line_number-1
with open(java_file , 'r') as f:
lines = f.readlines()
context_range = lines[index_of_error_context-range_offset:index_of_error_context+range_offset+1]
f.close()
#for i in range(len(context_range)):
# print(context_range[i])
return context_range
def wrap_output_string(context_arr,error_type):
output_string = "public void " + error_type + "(){\n"
for i in range(len(context_arr)):
output_string += context_arr[i]
output_string += "\n}"
return output_string
line_number = get_error_line()
error_type = get_error_type()
#print(line_number,error_type)
context_arr = get_context_arr(line_number)
extracted_string = wrap_output_string(context_arr,error_type)
#print(extracted_string)
new_file = open("Output.java", "w")
new_file.write(extracted_string)
new_file.close()
|
# Init Module |
from time import time
start = time()
global arr
arr = []
def walk(num, i):
for a in str(num):
i += 1
if (i in [1, 10, 100, 1000, 10000, 100000, 1000000]):
arr.append(int(a))
return i
i = 0
iter = 1
while (i <= 1000000):
i = walk(iter, i)
iter += 1
print "Product: %d" % reduce(lambda x,y: x*y, arr)
print "Time: {0} secs".format(time()-start) |
# Generated by Django 2.1.1 on 2018-10-11 13:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0004_image_name'),
]
operations = [
migrations.CreateModel(
name='Fruits',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('origin', models.TextField()),
('scientific_name', models.TextField(null=True)),
('rank', models.CharField(max_length=20, null=True)),
('hybrid', models.TextField(null=True)),
('cultivar', models.CharField(max_length=30, null=True)),
('text', models.TextField()),
('telugu', models.TextField(null=True)),
],
),
migrations.DeleteModel(
name='TextFiles',
),
]
|
from companies_matcher.parsers.finviz_parser import FinvizParser
_multiplicator = 'Dividend'
def _join_result(data: list, dividends: dict):
for item in data:
t = item['ticker']
try:
div = dividends[t][_multiplicator]
item['total'] = round(float(div) * item['amount'], 2)
item['dividends'] = round(float(div), 2)
except KeyError:
item['total'] = 0
item['dividends'] = 0
except ValueError:
item['total'] = 0
item['dividends'] = 0
async def get_dividends(data: list):
tickers = [item['ticker'] for item in data]
parser = FinvizParser(tickers, [_multiplicator])
result = await parser.get_data()
_join_result(data, result)
return data
|
import csv
from tld import get_tld
import tld
known_tlds = set()
with open('hosts.csv', 'wt') as hosts:
spamwriter = csv.writer(hosts, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
with open('top-1m.csv', 'rt') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
tld = "http://"+row[1]
try:
tld = "http://"+get_tld(tld)
except Exception as err:
print(err)
pass
if tld in known_tlds: continue
known_tlds.add(tld)
spamwriter.writerow([row[0], tld])
|
from __future__ import absolute_import
from othello import Othello
from copy import deepcopy
from multiprocessing import Pool
import numpy as np
import time
import os
import math
from keras.utils import np_utils
def refine(game):
return game.split(" vs")[0][:-2].replace(" ","")
def gen_batch(game):
POSITION = [
(0,0), (0,1), (0,2), (0,3), (0,4), (0,5), (0,6), (0,7),
(1,0), (1,1), (1,2), (1,3), (1,4), (1,5), (1,6), (1,7),
(2,0), (2,1), (2,2), (2,3), (2,4), (2,5), (2,6), (2,7),
(3,0), (3,1), (3,2), (3,3), (3,4), (3,5), (3,6), (3,7),
(4,0), (4,1), (4,2), (4,3), (4,4), (4,5), (4,6), (4,7),
(5,0), (5,1), (5,2), (5,3), (5,4), (5,5), (5,6), (5,7),
(6,0), (6,1), (6,2), (6,3), (6,4), (6,5), (6,6), (6,7),
(7,0), (7,1), (7,2), (7,3), (7,4), (7,5), (7,6), (7,7),
]
SQUARE = [
"A1","B1","C1","D1","E1","F1","G1","H1",
"A2","B2","C2","D2","E2","F2","G2","H2",
"A3","B3","C3","D3","E3","F3","G3","H3",
"A4","B4","C4","D4","E4","F4","G4","H4",
"A5","B5","C5","D5","E5","F5","G5","H5",
"A6","B6","C6","D6","E6","F6","G6","H6",
"A7","B7","C7","D7","E7","F7","G7","H7",
"A8","B8","C8","D8","E8","F8","G8","H8",
]
O = Othello()
x_dataset = list()
y_dataset = list()
x_append = x_dataset.append
y_append = y_dataset.append
game = refine(game)
history = [(i+j) for (i,j) in zip(game[::2],game[1::2])]
for hand in history:
try:
board = deepcopy(O.board)
ix = SQUARE.index(hand)
x_append(board)
y_append(ix)
# update board
O.make_legal_hands()
O._check_passing()
O.put(POSITION[ix])
#board = board.reshape((1, 8, 8))
#return board, ix
except:
#print("error:",hand)
#print(line)
break
x_dataset = np.asarray(x_dataset)
num_data = x_dataset.shape[0]
x_dataset = x_dataset.reshape((num_data, 1, 8, 8))
y_dataset = np_utils.to_categorical(y_dataset, 64)
return x_dataset, y_dataset
def generate_arrays_from_file(path):
#ここのpathは一つのテキストファイル
while True:
with open(path,"r") as f:
games = f.readlines()
games = [game.rstrip('\n') for game in games]
for game in games:
#ここの1行は1回のgameを表す。
state, action = gen_batch(game)
yield (state, action)
if __name__ == "__main__":
multi_process()
|
"""Defines all the functions related to the database"""
from app import db
def fetch_teams() -> dict:
conn = db.connect()
results = conn.execute("SELECT * FROM teams LIMIT 100;")
conn.close()
teams_list = []
for r in results:
team = {
"id": r[22],
"TeamName": r[0],
"Conference": r[1],
"GamesPlayed": r[2],
"Wins": r[3]
}
teams_list.append(team)
return teams_list
def update_team_entry(team_id: int, team_name: str, conference: str, games_played: int, wins: int) -> None:
conn = db.connect()
query = 'Update teams set TeamName = "{}", Conference = "{}", GamesPlayed = {}, Wins = {} where TeamID = {};'.format(team_name, conference, games_played, wins, team_id)
conn.execute(query)
conn.close()
def insert_new_team(team_name: str, conference: str, games_played: int, wins: int) -> int:
"""Insert new task to todo table.
Args:
text (str): Task description
Returns: The task ID for the inserted entry
"""
conn = db.connect()
query = 'Insert Into teams (TeamName, Conference, GamesPlayed, Wins) VALUES ("{}", "{}", {}, {});'.format(team_name, conference, games_played, wins)
conn.execute(query)
query_results = conn.execute("Select LAST_INSERT_ID();")
query_results = [x for x in query_results]
task_id = query_results[0][0]
conn.close()
return task_id
def remove_team_by_id(team_id: int) -> None:
""" remove entries based on task ID """
conn = db.connect()
query = 'Delete From teams where TeamID={};'.format(team_id)
conn.execute(query)
conn.close()
def fetch_upsets() -> dict:
conn = db.connect()
results = conn.execute("SELECT Team1.Seed as Team1Seed, Team1.TeamName as Team1Name, Team2.Seed as Team2Seed, Team2.TeamName as Team2Name, matches.Team1Score, matches.Team2Score FROM (SELECT teams.Seed, matches.MatchID, teams.TeamName FROM matches JOIN teams on matches.Team1ID=teams.TeamID) as Team1 JOIN(SELECT teams.Seed, matches.MatchID, teams.TeamName FROM matches JOIN teams on matches.Team2ID=teams.TeamID) as Team2 ON Team1.MatchID=Team2.MatchID JOIN matches ON Team1.MatchID=matches.MatchID AND Team2.MatchID=matches.MatchID WHERE Team1.Seed < Team2.Seed AND Team1.Seed != 0 AND Team2.Seed != 0 AND matches.Team1Score < matches.Team2Score ORDER BY (Team2.Seed - Team1.Seed) desc LIMIT 100;")
conn.close()
teams_list = []
for r in results:
team = {
"Team1Seed": r[0],
"Team1Name": r[1],
"Team2Seed": r[2],
"Team2Name": r[3],
"Team1Score": r[4],
"Team2Score": r[5]
}
teams_list.append(team)
return teams_list |
'''
This function calculates the enclosed mass of an NFW profile, based on its redshift,
reffp, and meffp, where meffp is the enclosed mass (in Mssun) of the DM halo at reffp
(in kpc).
The virial over-density and virial radius are defined in Bryan G. L., Norman M. L., 1998
The concentration mass relation can be found in Aaron A. Dutton Andrea V. Macciò, 2014
(Eqs. 12 & 13)
'''
import numpy as np
def calNFW(rlist,reffp,meffp,zr=0):
rhocrit=127*(0.27*(1+zr)*(1+zr)*(1+zr)+0.73)
xLam=-0.73/(0.27*(1+zr)*(1+zr)*(1+zr)+0.73)
delta=18.0*np.pi*np.pi+82.0*(xLam)-39*(xLam)*(xLam)
samlogMv=np.arange(7.0,14.0,0.05)
logc=0.537+(1.025-0.537)*np.exp(-0.718*np.power(zr,1.08))+(0.024*zr-0.097)*(samlogMv-np.log10(0.7)-np.log10(1e12))
cont=np.power(10,logc)
Rvirn=np.power(np.power(10,samlogMv)*3.0/4.0/np.pi/rhocrit/delta,1.0/3.0)
# finding the NFW profile (at z=0) that fits at reff and plot it
#x=reffp/Rvirn
#rho = rhocrit*delta/cont/cont/cont/x/(1/cont+x)/(1/cont+x)
Rs = Rvirn/cont
cdelta = cont*cont*cont*delta/3./(np.log(1+cont)-cont/(1+cont))
#enclosed NFW halo mass within reffp with different total halo masses
menc = 4*np.pi*rhocrit*cdelta*Rs*Rs*Rs*(np.log((Rs+reffp)/Rs)-reffp/(Rs+reffp))
#infer the total halo mass with meffp
logMveff = np.interp(meffp, menc, samlogMv)
#calculate the enclosed mass with the total halo mass above
logceff=0.537+(1.025-0.537)*np.exp(-0.718*np.power(zr,1.08))+(0.024*zr-0.097)*(logMveff-np.log10(0.7)-np.log10(1e12))
conteff=np.power(10,logceff)
Rvirneff=np.power(np.power(10,logMveff)*3.0/4.0/np.pi/rhocrit/delta,1.0/3.0)
Rseff = Rvirneff/conteff
cdeltaeff = conteff*conteff*conteff*delta/3./(np.log(1+conteff)-conteff/(1+conteff))
#output the enclosed NFW halo mass in rlist that matches [reffp,meffp]
mnfw = np.array(4*np.pi*rhocrit*cdeltaeff*Rseff*Rseff*Rseff*(np.log((Rseff+rlist)/Rseff)-rlist/(Rseff+rlist)))
return {'mnfw':mnfw}
def main():
rlist = np.linspace(0.1,20,num=20)
reffp = 4.7; meffp = 7.0e9;
info = calNFW(rlist,reffp,meffp,zr=0)
print 'mnfw', info['mnfw']
return None
if __name__== "__main__":
main()
|
from setuptools import setup, find_packages
setup(
name = "DemoWheel",
author = "Jonathan Scholtes",
author_email = "Jonathan@Stochasticcoder.com",
version = "0.1",
packages = find_packages()) |
from django.shortcuts import render,HttpResponse,render_to_response
from .models import Detect
# Create your views here.
from django.shortcuts import render, redirect
from django.views import View
from django.views import generic
from django.views.generic import TemplateView
import random, json
import datetime
from django.db.models import Q
class detectdata(generic.TemplateView):
def get(self, request, *args, **kwargs):
template_name='data.html'
return render(request, template_name)
def data_json(request):
username = None
if request.user.is_authenticated:
username = request.user.username
detected_list=Detect.objects.filter(cid=username, cname=username)
Times = [None, ]
Concentrations = ['percent',]
for list in detected_list:
Times.append(str(list.ctime))
if list.cpercent>10:
Concentrations.append(str(list.cpercent))
else:
Concentrations.append(0)
data = {
'columns': [
Times,
Concentrations,
]
}
return HttpResponse(json.dumps(data),content_type='text/json')
def main_page(request):
return render_to_response('data.html')
|
import wx
from listing11_01 import BlockWindow
labels = "one two three four five six seven eight nine".split()
class TestFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "FlexGridSizer Test")
sizer = wx.FlexGridSizer(rows=3, cols=3, hgap=5, vgap=5)
for label in labels:
bw = BlockWindow(self, label=label)
if label == "five": bw.SetMinSize((150,50))
sizer.Add(bw, 0, wx.ALL, 5)
# center = self.FindWindowByName("five")
# center.SetMinSize((150,50))
self.SetSizer(sizer)
self.Fit()
app = wx.PySimpleApp()
TestFrame().Show()
app.MainLoop()
|
# Testing template for "Guess the number"
###################################################
# Student should add code for "Guess the number" here
# template for "Guess the number" mini-project
# input will come from buttons and an input field
# all output for the game will be printed in the console
import simplegui
import random
import math
def new_game():
# initialize global variables used in your code here
global secret_number
# remove this when you add your code
pass
# define event handlers for control panel
def range100():
print"New game.Range is from 0 to 100"
return random.randrange(0,100)
# button that changes the range to [0,100) and starts a new game
# remove this when you add your code
def range1000():
# button that changes the range to [0,1000) and starts a new game
print"New Game.range is from 0 to 1000"
return random.randrange(0,1000)
pass
def input_guess(guess):
# main game logic goes here
guess_no=int(guess)
print "Guess was",guess
if secret_number>guess_no:
print"Higher!"
elif secret_number<guess_no:
print "Lower!"
elif secret_number==guess_no:
print "correct!"
else:
print"wrong choice"
# create frame
frame=simplegui.create_frame("welcome",200,200)
frame.add_button("Run",new_game,100)
frame.add_button("range between [0,100)",range100,200)
frame.add_button("range between [0,1000)",range1000,200)
frame.add_input("input_guess",input_guess,150)
# register event handlers for control elements and start frame
frame.start()
# call new_game
new_game()
# always remember to check your completed program against the grading rubric
###################################################
# Start our test #1 - assume global variable secret_number
# is the the "secret number" - change name if necessary
secret_number = 74
input_guess("50")
input_guess("75")
input_guess("62")
input_guess("68")
input_guess("71")
input_guess("73")
input_guess("74")
###################################################
# Output from test #1
#New game. Range is [0,100)
#Number of remaining guesses is 7
#
#Guess was 50
#Number of remaining guesses is 6
#Higher!
#
#Guess was 75
#Number of remaining guesses is 5
#Lower!
#
#Guess was 62
#Number of remaining guesses is 4
#Higher!
#
#Guess was 68
#Number of remaining guesses is 3
#Higher!
#
#Guess was 71
#Number of remaining guesses is 2
#Higher!
#
#Guess was 73
#Number of remaining guesses is 1
#Higher!
#
#Guess was 74
#Number of remaining guesses is 0
#Correct!
#
#New game. Range is [0,100)
#Number of remaining guesses is 7
###################################################
# Start our test #2 - assume global variable secret_number
# is the the "secret number" - change name if necessary
#range1000()
#secret_number = 375
#input_guess("500")
#input_guess("250")
#input_guess("375")
###################################################
# Output from test #2
#New game. Range is [0,100)
#Number of remaining guesses is 7
#
#New game. Range is [0,1000)
#Number of remaining guesses is 10
#
#Guess was 500
#Number of remaining guesses is 9
#Lower!
#
#Guess was 250
#Number of remaining guesses is 8
#Higher!
#
#Guess was 375
#Number of remaining guesses is 7
#Correct!
#
#New game. Range is [0,1000)
#Number of remaining guesses is 10
###################################################
# Start our test #3 - assume global variable secret_number
# is the the "secret number" - change name if necessary
#range100()
#secret_number = 28
#input_guess("50")
#input_guess("50")
#input_guess("50")
#input_guess("50")
#input_guess("50")
#input_guess("50")
#input_guess("50")
###################################################
# Output from test #3
#New game. Range is [0,100)
#Number of remaining guesses is 7
#
#Guess was 50
#Number of remaining guesses is 6
#Lower!
#
#Guess was 50
#Number of remaining guesses is 5
#Lower!
#
#Guess was 50
#Number of remaining guesses is 4
#Lower!
#
#Guess was 50
#Number of remaining guesses is 3
#Lower!
#
#Guess was 50
#Number of remaining guesses is 2
#Lower!
#
#Guess was 50
#Number of remaining guesses is 1
#Lower!
#
#Guess was 50
#Number of remaining guesses is 0
#You ran out of guesses. The number was 28
#
#New game. Range is [0,100)
#Number of remaining guesses is 7
|
#!/root/demo1/bin/python
# EASY-INSTALL-SCRIPT: 'Pafy==0.3.72','ytdl'
__requires__ = 'Pafy==0.3.72'
import pkg_resources
pkg_resources.run_script('Pafy==0.3.72', 'ytdl')
|
from rest_framework import permissions
class ReadAllWriteOnlyAdminPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.method == 'GET' and request.user.is_authenticated():
return True
elif request.user.is_authenticated() and request.user.is_staff:
return True
return False
|
#!/usr/bin/env python3
#
# gmm_tools.py
#
# Main tools for training GMMs and adapting
# from them with new data.
#
import numpy as np
from sklearn.mixture import GaussianMixture
# Structure of the trajectory data:
# np.ndarray of (N, D), where
# N = number of states collected and
# D = dimensionality of single observation
#
# Uses universal background model and supervector concepts from
# [1] http://cs.joensuu.fi/pages/tkinnu/webpage/pdf/speaker_recognition_overview.pdf
# For default relevance-factor (16), we took a look at the original UBM-GMM paper:
# [2] http://speech.ee.ntu.edu.tw/previous_version/Speaker%20Verification%20Using%20Adapted%20Gaussain%20Mixture%20Models.pdf
# For distances between MAP-adapted GMMs:
# [3] http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.437.6872&rep=rep1&type=pdf
def train_ubm(data, n_components=64, n_init=1, verbose=2):
"""
Train a GMM on the data to form an Universal Background Model,
that will be later used to adapt per-policy means.
Note: Hardcoded to use diagonal covariance matrices
as otherwise computing will take too long.
Parameters:
data (np.ndarray): Array of shape (N, D), containing data
from various policies to be used to create a model
of a "general policy".
n_components (int): Number of components in the UBM
n_init (int): Fed to GaussianMixture
verbose (int): Fed to GaussianMixture
Returns:
ubm (sklearn.mixture.GaussianMixture): Trained GMM model
"""
ubm = GaussianMixture(
n_components=n_components,
covariance_type="diag",
verbose=verbose,
n_init=n_init
)
ubm.fit(data)
return ubm
def save_ubm(path, ubm, means, stds, trajectory_indeces=np.nan, **additional_items):
"""
Save sklearn UBM GMM into a numpy arrays for
easier transfer between sklearn versions etc.
Parameters:
path (str): Where to store the UBM
ubm (sklearn.mixture.GaussianMixture): Trained GMM model
means, stds (ndarray): Means and stds of variables to
be stored along UBM for normalization purposes
trajectory_indeces (ndarray): (num_policies, num_trajs)
array, that tells which trajectories were used to train
this UBM. Used when trajectories are sampled.
**additional_items: Additional items that will be added to the numpy
archive.
"""
np.savez(
path,
ubm_means=ubm.means_,
ubm_weights=ubm.weights_,
# Probably no need to store all of these, but oh well
ubm_covariances=ubm.covariances_,
ubm_precisions=ubm.precisions_,
ubm_precisions_cholesky=ubm.precisions_cholesky_,
means=means,
stds=stds,
trajectory_indeces=trajectory_indeces,
**additional_items
)
def load_ubm(path):
"""
Load UBM stored with save_ubm, returning
GMM object and normalization vectors
Parameters:
path (str): Where to load UBM from
Returns:
ubm (sklearn.mixture.GaussianMixture): Trained GMM model
means, stds (ndarray): Means and stds of variables to
be stored along UBM for normalization purposes
"""
data = np.load(path)
n_components = data["ubm_means"].shape[0]
cov_type = "diag" if data["ubm_covariances"].ndim == 2 else "full"
ubm = GaussianMixture(n_components=n_components, covariance_type=cov_type)
ubm.means_ = data["ubm_means"]
ubm.weights_ = data["ubm_weights"]
ubm.covariances_ = data["ubm_covariances"]
ubm.precisions_ = data["ubm_precisions"]
ubm.precisions_cholesky_ = data["ubm_precisions_cholesky"]
means = data["means"]
stds = data["stds"]
return ubm, means, stds
def trajectories_to_supervector(states, ubm, relevance_factor=16):
"""
Take a trained UBM and states visited by a policy and create
a fixed-length supervector to represent that policy
based on the data.
Current implementation MAP-adapts UBM means to data and
then concatenates all these means
Parameters:
states (np.ndarray): (N, D) array
ubm (sklearn.mixture.GaussianMixture): Trained GMM model
relevance_factor (int): Relevance factor from [2]
Returns:
np.ndarray of shape (M,): 1D and fixed-length vector
representing policy that created the data
"""
# Using the notation in [1]
# Score each data point to all components,
# get (N, K) matrix (K = number of components)
state_probs = ubm.predict_proba(states)
# n, or "how close each point is each component"
# (K, )
state_prob_sums = np.sum(state_probs, axis=0)
# \alpha, or weight of how much means should be moved, per component
# (K, )
alpha = state_prob_sums / (state_prob_sums + relevance_factor)
# \tilde x, or the new means based on state
# they are like expectations, except weighted
# by how probable it is they came from that centroid
# (K, D)
tilde_x = np.zeros_like(ubm.means_)
# Do each component individually to make this bit easier
# to read and save on memory
for k in range(ubm.n_components):
tilde_x[k] = np.sum(states * state_probs[:, k, None], axis=0) / (state_prob_sums[k] + 1e-6)
# MAP-adapt means
# (K, D)
adapted_means = alpha[..., None] * tilde_x + (1 - alpha[..., None]) * ubm.means_
# Create pi-vector (supervector) of means
# (K * D, )
pi_vector = adapted_means.ravel()
return pi_vector
def adapted_gmm_distance(means1, means2, precisions, weights):
"""
Calculate upper-bound of KL-divergence of two MAP-adapted
GMMs, as in [3] equation (6).
Parameters:
means1 (ndarray): Array of (K, D) of adapted means
means2 (ndarray): Array of (K, D) of adapted means
precisions (ndarray): Array of (K, D), an inverse of a
diagonal covariance matrix (1/Sigma)
weights (ndarray): Array of (K,), weights of the
components
Returns
distance (float): Upper-bound of KL-divergence for the
two GMMs specified by the two mean-matrices
"""
mean_diff = means1 - means2
# We can get rid of the matrix operations
# since precisions are diagonal
dist = 0.5 * np.sum(weights * np.sum(mean_diff * mean_diff * precisions, axis=1))
return dist
|
import scipy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
#Dataset 1
data1 = pd.read_csv('dist1.txt', sep = ' ')
data1.head()
data1 = data1.dropna(axis = 'index')
data1.head()
datasample1 = data1.sample(10)
datasample1
datasample1['mean'] = datasample1.mean(axis=1)
datasample1['mean']
plt.bar(x, datasample1['mean'], tick_label = x)
#Dataset 2
data2 = pd.read_csv('dist2.txt', sep = ' ')
data2.head()
data2 = data2.dropna(axis = 'index')
data2.head()
datasample2 = data2.sample(10)
datasample2
datasample2['mean'] = datasample2.mean(axis=1)
datasample1['mean']
plt.bar(x, datasample2['mean'], tick_label = x)
|
# Generated by Django 2.2 on 2020-12-21 07:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=55)),
('first_name', models.CharField(max_length=55)),
('last_name', models.CharField(max_length=100)),
('email_address', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Wall_Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('poster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='wall_messages', to='app1.User')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('poster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='wall_comments', to='app1.User')),
('wall_message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_comments', to='app1.Wall_Message')),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book_title', models.CharField(max_length=200)),
('book_description', models.TextField(max_length=5000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('book_uploaded_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='uploaded_books', to='app1.User')),
('favorited_by', models.ManyToManyField(related_name='favorited_books', to='app1.User')),
],
),
]
|
#!/usr/bin/python2
from scapy.all import *
#dest = input("Destination: ")
#dest = raw_input("\nDestination: ")
#destport = input("Destination port: ") #Porta de destino
dest = '10.0.0.99'
destport = '1234'
ip = IP(dst=dest)
udp = UDP(dport=int(destport),sport=40000)
pkt = ip/udp
t = sr(pkt)
print(t)
|
import random
import turtle
import time
import pygame
from pygame import mixer
pygame.init()
mixer.music.load('bgm.mp3')
mixer.music.play(-1)
point = 0
high_score = 0
velocity = 0.10
window = turtle.Screen()
window.title('Snake Game')
window.bgcolor("pink")
window.setup(width=600, height=600)
window.tracer(0)
head = turtle.Turtle()
head.speed(0)
head.shape("circle")
head.color("brown")
head.penup()
head.goto(0, 100)
head.direction = "stop"
bait = turtle.Turtle()
bait.speed(0)
bait.shape("circle")
bait.color("black")
bait.penup()
bait.shapesize(0.80, 0.80)
bait.goto(0, 0)
tails = []
board = turtle.Turtle()
board.speed(0)
board.penup()
board.hideturtle()
board.goto(-290, 265)
board.write("Score:0 High Score: 0", font=("Times New Roman", 24, "normal"))
def move():
if head.direction == "up":
y = head.ycor()
head.sety(y + 20)
if head.direction == "down":
y = head.ycor()
head.sety(y - 20)
if head.direction == "right":
x = head.xcor()
head.setx(x + 20)
if head.direction == "left":
x = head.xcor()
head.setx(x - 20)
def go_up():
if head.direction != "down":
head.direction = "up"
def go_down():
if head.direction != "up":
head.direction = "down"
def go_right():
if head.direction != "left":
head.direction = "right"
def go_left():
if head.direction != "right":
head.direction = "left"
window.listen()
window.onkey(go_up, "Up")
window.onkey(go_down, "Down")
window.onkey(go_right, "Right")
window.onkey(go_left, "Left")
while True:
window.update()
if head.xcor() > 300 or head.xcor() < -300 or head.ycor() > 300 or head.ycor() < -300:
time.sleep(1)
head.goto(0, 0)
head.direction = "stop"
for tail in tails:
tail.goto(1000, 1000)
tails = []
point = 0
velocity = 0.10
board.clear()
board.write("Score: {} High Score: {}".format(point, high_score), font=("Times New Roman", 24, "normal"))
if head.distance(bait) < 20:
x = random.randint(-250, 250)
y = random.randint(-250, 250)
bait.goto(x, y)
new_tail = turtle.Turtle()
new_tail.speed(0)
new_tail.shape("circle")
new_tail.color("green")
new_tail.penup()
tails.append(new_tail)
velocity = velocity + 0.0001
point = point + 5
if point > high_score:
high_score = point
board.clear()
board.write("Score: {} High Score: {}".format(point, high_score), font=("Times New Roman", 24, "normal"))
for index in range(len(tails) - 1, 0, -1):
x = tails[index - 1].xcor()
y = tails[index - 1].ycor()
tails[index].goto(x, y)
if len(tails) > 0:
x = head.xcor()
y = head.ycor()
tails[0].goto(x, y)
move()
for segment in tails:
if segment.distance(head) < 20:
time.sleep(1)
head.goto(0, 0)
head.direction = "stop"
for segment in tails:
segment.goto(1000, 1000)
tails = []
point = 0
board.clear()
board.write("Score: {} High Score: {}".format(point, high_score), font=("Times New Roman", 24, "normal"))
velocity = 0.15
time.sleep(velocity)
|
#!/usr/bin/env python
import tweepy
import time, datetime
from pymongo import MongoClient
import unicodedata
from twitter_oauth import CUSTOMER_KEY, CUSTOMER_SECRET, ACCESS_TOKEN, ACCESS_SECRET
# connection
auth = tweepy.OAuthHandler(CUSTOMER_KEY, CUSTOMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
# limit per hour is 350
resetTS = api.rate_limit_status()['resources']['moments']['/moments/permissions']['reset']
resetDT = datetime.datetime.fromtimestamp(resetTS).strftime('%Y-%m-%d %H:%M:%S')
limit = api.rate_limit_status()['resources']['moments']['/moments/permissions']['limit']
remaining = api.rate_limit_status()['resources']['moments']['/moments/permissions']['remaining']
print 'For moments, you have {}/{}, API calls remaining until next reset time {}'.format(remaining, limit, resetDT)
# advanced twitter search operators
done = time.time()
start = time.time() - 7 * 24 * 3600
done2 = datetime.datetime.fromtimestamp(done).strftime('%Y-%m-%d')
start2 = datetime.datetime.fromtimestamp(start).strftime('%Y-%m-%d')
query_v0 = 'amazon OR amzn -amazing'
query_v1 = 'amazon OR amzn -amazing since:{} until:{}'.format(start2, done2)
# searching "amazon" OR "amzn", exclude "amazing", exclude retweets, only includes original tweets. retweets#>=10, faves#>10, time-window: last week
query_v2 = 'amazon OR amzn -amazing Filter:news -Filter:nativeretweets min_retweets:10 min_faves:10 since:{} until:{}'.format(start2, done2)
max_tweets = 1000
# fetch data from twitter search api with memory optimization
searched_tweets = []
last_id = -1
while len(searched_tweets) < max_tweets:
count = max_tweets - len(searched_tweets)
try:
new_tweets = api.search(q=query_v2, count=max_tweets, max_id=str(last_id - 1), result_type="recent", lang="en")
if not new_tweets:
break
searched_tweets.extend(new_tweets)
last_id = new_tweets[-1].id
except tweepy.TweepError as e:
# depending on TweepError.code, one may want to retry or wait
# to keep things simple, we will give up on an error
break
# sanity check
for item in searched_tweets[:10]:
print item.text, item.user.followers_count, item.user.statuses_count, item.favorite_count, item.retweet_count
# write to file
with open('FILE_DIR_YOUR_WANT_TO_WRITE', 'a') as thefile:
for item in searched_tweets:
text = unicodedata.normalize('NFKD', item.text).encode('ascii', 'ignore')
thefile.write("followers:{0}, statuses:{1}, favorites:{2}, retweeted:{3}, text:{4}\n".format(
item.user.followers_count, item.user.statuses_count, item.favorite_count, item.retweet_count, text))
# write to mongoDB
client = MongoClient('mongodb://localhost:27017/')
db = client.tinyjumbo
for item in searched_tweets:
simple = {
# user
"user_screen_name": item.user.screen_name,
"user_followers_count": item.user.followers_count,
"user_statuses_count": item.user.statuses_count,
"user_friends_count": item.user.friends_count,
"user_favourites_count": item.user.favourites_count,
# tweet
"tweet_time": str(datetime.datetime.now()),
"tweet_text": unicodedata.normalize('NFKD', item.text).encode('ascii', 'ignore'),
"tweet_retweet_count": item.retweet_count,
"tweet_favorite_count": item.favorite_count
}
db.amazon_hot.insert_one(simple)
|
import game
from player import player
def trans_tuple(s):
s = s.strip(' ')
s = s.strip('(')
s = s.strip(')')
if len(s) == 0:
return tuple()
else:
return tuple([int(x) for x in s.split(',')])
class human_player():
def action(self, Info):
print("Your turn : ", end="")
try:
a = trans_tuple(input())
except:
a = (-2, -2)
return a
|
import pandas as pd
import json
import sys
from casos import casos_positivos, casos_fallecidos
poblacion_junin = 1357263
positivos_junin = list(casos_positivos[casos_positivos['DEPARTAMENTO'] == "JUNIN"].shape)[0]
positivos_hombres_junin = list(casos_positivos[(casos_positivos['DEPARTAMENTO'] == "JUNIN") &(casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_junin = list(casos_positivos[(casos_positivos['DEPARTAMENTO'] == "JUNIN") &(casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_junin = list(casos_fallecidos[casos_fallecidos['DEPARTAMENTO'] == "JUNIN"].shape)[0]
fallecidos_hombres_junin = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "JUNIN") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_junin = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "JUNIN") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Departamento Junin - Etapa de vida
fallecidos_preinfancia_junin = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_junin = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_junin = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_junin = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_junin = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_junin = list(
casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Provincias Junin
#!Junin-Huancayo
poblacion_junin_huancayo = 520516
positivos_junin_huancayo = list(casos_positivos[casos_positivos['PROVINCIA'] == "HUANCAYO"].shape)[0]
positivos_hombres_junin_huancayo = list(casos_positivos[(casos_positivos['PROVINCIA'] == "HUANCAYO") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_junin_huancayo = list(casos_positivos[(casos_positivos['PROVINCIA'] == "HUANCAYO") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_junin_huancayo = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "HUANCAYO"].shape)[0]
fallecidos_hombres_junin_huancayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "HUANCAYO") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_junin_huancayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "HUANCAYO") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Huancayo - Etapa de vida
fallecidos_preinfancia_junin_huancayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "HUANCAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_junin_huancayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "HUANCAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_junin_huancayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "HUANCAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_junin_huancayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "HUANCAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_junin_huancayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "HUANCAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_junin_huancayo = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "HUANCAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Provincia Concepcion
poblacion_junin_concepcion = 57399
positivos_junin_concepcion = list(casos_positivos[casos_positivos['PROVINCIA'] == "CONCEPCION"].shape)[0]
positivos_hombres_junin_concepcion = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CONCEPCION") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_junin_concepcion = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CONCEPCION") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_junin_concepcion = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "CONCEPCION"].shape)[0]
fallecidos_hombres_junin_concepcion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CONCEPCION") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_junin_concepcion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CONCEPCION") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Concepcion - Etapa de vida
fallecidos_preinfancia_junin_concepcion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CONCEPCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_junin_concepcion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CONCEPCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_junin_concepcion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CONCEPCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_junin_concepcion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CONCEPCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_junin_concepcion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CONCEPCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_junin_concepcion = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CONCEPCION") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Provincia Chanchamayo
poblacion_junin_chanchamayo = 199070
positivos_junin_chanchamayo = list(casos_positivos[casos_positivos['PROVINCIA'] == "CHANCHAMAYO"].shape)[0]
positivos_hombres_junin_chanchamayo = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CHANCHAMAYO") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_junin_chanchamayo = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CHANCHAMAYO") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_junin_chanchamayo = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "CHANCHAMAYO"].shape)[0]
fallecidos_hombres_junin_chanchamayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHANCHAMAYO") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_junin_chanchamayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHANCHAMAYO") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Chanchamayo - Etapa de vida
fallecidos_preinfancia_junin_chanchamayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHANCHAMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_junin_chanchamayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHANCHAMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_junin_chanchamayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHANCHAMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_junin_chanchamayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHANCHAMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_junin_chanchamayo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHANCHAMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_junin_chanchamayo = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHANCHAMAYO") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Junin-Jauja
poblacion_junin_jauja = 84924
positivos_junin_jauja = list(casos_positivos[casos_positivos['PROVINCIA'] == "JAUJA"].shape)[0]
positivos_hombres_junin_jauja = list(casos_positivos[(casos_positivos['PROVINCIA'] == "JAUJA") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_junin_jauja = list(casos_positivos[(casos_positivos['PROVINCIA'] == "JAUJA") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_junin_jauja = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "JAUJA"].shape)[0]
fallecidos_hombres_junin_jauja = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JAUJA") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_junin_jauja = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JAUJA") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Jauja - Etapa de vida
fallecidos_preinfancia_junin_jauja = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JAUJA") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_junin_jauja = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JAUJA") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_junin_jauja = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JAUJA") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_junin_jauja = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JAUJA") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_junin_jauja = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JAUJA") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_junin_jauja = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JAUJA") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Junin-Junin
poblacion_junin_junin = 26127
positivos_junin_junin = list(casos_positivos[casos_positivos['PROVINCIA'] == "JUNIN"].shape)[0]
positivos_hombres_junin_junin = list(casos_positivos[(casos_positivos['PROVINCIA'] == "JUNIN") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_junin_junin = list(casos_positivos[(casos_positivos['PROVINCIA'] == "JUNIN") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_junin_junin = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "JUNIN"].shape)[0]
fallecidos_hombres_junin_junin = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JUNIN") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_junin_junin = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JUNIN") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Junin - Etapa de vida
fallecidos_preinfancia_junin_junin = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_junin_junin = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_junin_junin = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_junin_junin = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_junin_junin = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_junin_junin = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "JUNIN") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Provincia Satipo
poblacion_junin_satipo = 263330
positivos_junin_satipo = list(casos_positivos[casos_positivos['PROVINCIA'] == "SATIPO"].shape)[0]
positivos_hombres_junin_satipo = list(casos_positivos[(casos_positivos['PROVINCIA'] == "SATIPO") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_junin_satipo = list(casos_positivos[(casos_positivos['PROVINCIA'] == "SATIPO") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_junin_satipo = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "SATIPO"].shape)[0]
fallecidos_hombres_junin_satipo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "SATIPO") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_junin_satipo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "SATIPO") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Satipo - Etapa de vida
fallecidos_preinfancia_junin_satipo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "SATIPO") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_junin_satipo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "SATIPO") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_junin_satipo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "SATIPO") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_junin_satipo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "SATIPO") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_junin_satipo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "SATIPO") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_junin_satipo = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "SATIPO") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Provincia Tarma
poblacion_junin_tarma = 109330
positivos_junin_tarma = list(casos_positivos[casos_positivos['PROVINCIA'] == "TARMA"].shape)[0]
positivos_hombres_junin_tarma = list(casos_positivos[(casos_positivos['PROVINCIA'] == "TARMA") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_junin_tarma = list(casos_positivos[(casos_positivos['PROVINCIA'] == "TARMA") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_junin_tarma = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "TARMA"].shape)[0]
fallecidos_hombres_junin_tarma = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "TARMA") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_junin_tarma = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "TARMA") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Tarma - Etapa de vida
fallecidos_preinfancia_junin_tarma = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "TARMA") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_junin_tarma = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "TARMA") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_junin_tarma = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "TARMA") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_junin_tarma = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "TARMA") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_junin_tarma = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "TARMA") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_junin_tarma = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "TARMA") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Provincia Yauli
poblacion_junin_yauli = 41412
positivos_junin_yauli = list(casos_positivos[casos_positivos['PROVINCIA'] == "YAULI"].shape)[0]
positivos_hombres_junin_yauli = list(casos_positivos[(casos_positivos['PROVINCIA'] == "YAULI") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_junin_yauli = list(casos_positivos[(casos_positivos['PROVINCIA'] == "YAULI") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_junin_yauli = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "YAULI"].shape)[0]
fallecidos_hombres_junin_yauli = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "YAULI") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_junin_yauli = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "YAULI") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Yauli - Etapa de vida
fallecidos_preinfancia_junin_yauli = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "YAULI") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_junin_yauli = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "YAULI") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_junin_yauli = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "YAULI") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_junin_yauli = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "YAULI") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_junin_yauli = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "YAULI") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_junin_yauli = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "YAULI") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Provincia Chupaca
poblacion_junin_chupaca = 55152
positivos_junin_chupaca = list(casos_positivos[casos_positivos['PROVINCIA'] == "CHUPACA"].shape)[0]
positivos_hombres_junin_chupaca = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CHUPACA") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_junin_chupaca = list(casos_positivos[(casos_positivos['PROVINCIA'] == "CHUPACA") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_junin_chupaca = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "CHUPACA"].shape)[0]
fallecidos_hombres_junin_chupaca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUPACA") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_junin_chupaca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUPACA") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Chupaca - Etapa de vida
fallecidos_preinfancia_junin_chupaca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUPACA") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_junin_chupaca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUPACA") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_junin_chupaca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUPACA") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_junin_chupaca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUPACA") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_junin_chupaca = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUPACA") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_junin_chupaca = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "CHUPACA") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
junin = {
"name": "Junin",
"poblacion": poblacion_junin,
"positivos": positivos_junin,
"hombres_infectados": positivos_hombres_junin,
"mujeres_infectados": positivos_mujeres_junin,
"fallecidos": fallecidos_junin,
"hombres_fallecidos": fallecidos_hombres_junin,
"mujeres_fallecidos": fallecidos_mujeres_junin,
"type": "Departamento",
"etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_junin,
"infancia": fallecidos_infancia_junin,
"adolescencia": fallecidos_adolescencia_junin,
"juventud": fallecidos_juventud_junin,
"adultez": fallecidos_adultez_junin,
"persona_mayor": fallecidos_persona_mayor_junin
},
"url": "junin",
"provincias": [
{"name": "Huancayo", "positivos": positivos_junin_huancayo,"poblacion": poblacion_junin_huancayo , "hombres_infectados": positivos_hombres_junin_huancayo,"mujeres_infectados": positivos_mujeres_junin_huancayo, "fallecidos": fallecidos_junin_huancayo, "hombres_fallecidos": fallecidos_hombres_junin_huancayo, "mujeres_fallecidos": fallecidos_mujeres_junin_huancayo, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_junin_huancayo,
"infancia": fallecidos_infancia_junin_huancayo,
"adolescencia": fallecidos_adolescencia_junin_huancayo,
"juventud": fallecidos_juventud_junin_huancayo,
"adultez": fallecidos_adultez_junin_huancayo,
"persona_mayor": fallecidos_persona_mayor_junin_huancayo
}},
{"name": "Concepcion", "positivos": positivos_junin_concepcion,"poblacion": poblacion_junin_concepcion , "hombres_infectados": positivos_hombres_junin_concepcion,"mujeres_infectados": positivos_mujeres_junin_concepcion, "fallecidos": fallecidos_junin_concepcion, "hombres_fallecidos": fallecidos_hombres_junin_concepcion, "mujeres_fallecidos": fallecidos_mujeres_junin_concepcion, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_junin_concepcion,
"infancia": fallecidos_infancia_junin_concepcion,
"adolescencia": fallecidos_adolescencia_junin_concepcion,
"juventud": fallecidos_juventud_junin_concepcion,
"adultez": fallecidos_adultez_junin_concepcion,
"persona_mayor": fallecidos_persona_mayor_junin_concepcion
}},
{"name": "Chanchamayo", "positivos": positivos_junin_chanchamayo,"poblacion": poblacion_junin_chanchamayo , "hombres_infectados": positivos_hombres_junin_chanchamayo,"mujeres_infectados": positivos_mujeres_junin_chanchamayo, "fallecidos": fallecidos_junin_chanchamayo, "hombres_fallecidos": fallecidos_hombres_junin_chanchamayo, "mujeres_fallecidos": fallecidos_mujeres_junin_chanchamayo, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_junin_chanchamayo,
"infancia": fallecidos_infancia_junin_chanchamayo,
"adolescencia": fallecidos_adolescencia_junin_chanchamayo,
"juventud": fallecidos_juventud_junin_chanchamayo,
"adultez": fallecidos_adultez_junin_chanchamayo,
"persona_mayor": fallecidos_persona_mayor_junin_chanchamayo
}},
{"name": "Jauja", "positivos": positivos_junin_jauja,"poblacion": poblacion_junin_jauja , "hombres_infectados": positivos_hombres_junin_jauja,"mujeres_infectados": positivos_mujeres_junin_jauja, "fallecidos": fallecidos_junin_jauja, "hombres_fallecidos": fallecidos_hombres_junin_jauja, "mujeres_fallecidos": fallecidos_mujeres_junin_jauja, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_junin_jauja,
"infancia": fallecidos_infancia_junin_jauja,
"adolescencia": fallecidos_adolescencia_junin_jauja,
"juventud": fallecidos_juventud_junin_jauja,
"adultez": fallecidos_adultez_junin_jauja,
"persona_mayor": fallecidos_persona_mayor_junin_jauja
}},
{"name": "Junin", "positivos": positivos_junin_junin,"poblacion": poblacion_junin_junin , "hombres_infectados": positivos_hombres_junin_junin,"mujeres_infectados": positivos_mujeres_junin_junin, "fallecidos": fallecidos_junin_junin, "hombres_fallecidos": fallecidos_hombres_junin_junin, "mujeres_fallecidos": fallecidos_mujeres_junin_junin, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_junin_junin,
"infancia": fallecidos_infancia_junin_junin,
"adolescencia": fallecidos_adolescencia_junin_junin,
"juventud": fallecidos_juventud_junin_junin,
"adultez": fallecidos_adultez_junin_junin,
"persona_mayor": fallecidos_persona_mayor_junin_junin
}},
{"name": "Satipo", "positivos": positivos_junin_satipo,"poblacion": poblacion_junin_satipo , "hombres_infectados": positivos_hombres_junin_satipo,"mujeres_infectados": positivos_mujeres_junin_satipo, "fallecidos": fallecidos_junin_satipo, "hombres_fallecidos": fallecidos_hombres_junin_satipo, "mujeres_fallecidos": fallecidos_mujeres_junin_satipo, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_junin_satipo,
"infancia": fallecidos_infancia_junin_satipo,
"adolescencia": fallecidos_adolescencia_junin_satipo,
"juventud": fallecidos_juventud_junin_satipo,
"adultez": fallecidos_adultez_junin_satipo,
"persona_mayor": fallecidos_persona_mayor_junin_satipo
}},
{"name": "Tarma", "positivos": positivos_junin_tarma,"poblacion": poblacion_junin_tarma , "hombres_infectados": positivos_hombres_junin_tarma,"mujeres_infectados": positivos_mujeres_junin_tarma, "fallecidos": fallecidos_junin_tarma, "hombres_fallecidos": fallecidos_hombres_junin_tarma, "mujeres_fallecidos": fallecidos_mujeres_junin_tarma, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_junin_tarma,
"infancia": fallecidos_infancia_junin_tarma,
"adolescencia": fallecidos_adolescencia_junin_tarma,
"juventud": fallecidos_juventud_junin_tarma,
"adultez": fallecidos_adultez_junin_tarma,
"persona_mayor": fallecidos_persona_mayor_junin_tarma
}},
{"name": "Yauli", "positivos": positivos_junin_yauli,"poblacion": poblacion_junin_yauli , "hombres_infectados": positivos_hombres_junin_yauli,"mujeres_infectados": positivos_mujeres_junin_yauli, "fallecidos": fallecidos_junin_yauli, "hombres_fallecidos": fallecidos_hombres_junin_yauli, "mujeres_fallecidos": fallecidos_mujeres_junin_yauli, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_junin_yauli,
"infancia": fallecidos_infancia_junin_yauli,
"adolescencia": fallecidos_adolescencia_junin_yauli,
"juventud": fallecidos_juventud_junin_yauli,
"adultez": fallecidos_adultez_junin_yauli,
"persona_mayor": fallecidos_persona_mayor_junin_yauli
}},
{"name": "Chupaca", "positivos": positivos_junin_chupaca,"poblacion": poblacion_junin_chupaca , "hombres_infectados": positivos_hombres_junin_chupaca,"mujeres_infectados": positivos_mujeres_junin_chupaca, "fallecidos": fallecidos_junin_chupaca, "hombres_fallecidos": fallecidos_hombres_junin_chupaca, "mujeres_fallecidos": fallecidos_mujeres_junin_chupaca, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_junin_chupaca,
"infancia": fallecidos_infancia_junin_chupaca,
"adolescencia": fallecidos_adolescencia_junin_chupaca,
"juventud": fallecidos_juventud_junin_chupaca,
"adultez": fallecidos_adultez_junin_chupaca,
"persona_mayor": fallecidos_persona_mayor_junin_chupaca
}}
]
}
print(json.dumps(junin))
sys.stdout.flush();
|
"""
SeenImages.py
Author: Jan Zahalka (jan@zahalka.net)
Encapsulates the images already seen by the user in a session.
"""
import numpy as np
import random
class DatasetExhaustedError:
pass
class SeenImages:
"""
Pretty much just a thin wrapper over the set of seen image IDs, but the
class is needed as the seen items must be centrally managed (otherwise
different buckets would show the same items again).
"""
def __init__(self, n):
"""
Constructor.
Parameters
----------
n : int
The number of images in the collection.
"""
self.seen = set()
self.n = n
def __len__(self):
"""
An override of Python's len() function.
Returns
-------
int
The number of seen images.
"""
return len(self.seen)
def all(self):
"""
Returns a list of all seen images.
Returns
-------
list
The list of all seen images.
"""
return list(self.seen)
def all_unseen(self, exclude=[]):
"""
Returns a list of unseen images.
Parameters
----------
exclude : list
The list of images to be explicitly excluded from the unseen list.
Default: empty list ([]).
Returns
-------
np.array
An array containing all images that were not seen before.
"""
return np.array(list(set(range(self.n)) - self.seen - set(exclude)))
def is_seen(self, image):
"""
Checks whether the image has been seen or not.
Parameters
----------
image : int
The ID of the image to be checked.
Returns
-------
bool
True if the image has been seen, False otherwise.
"""
return image in self.seen
def remove_seen(self, images, exclude=[]):
"""
Given an image list, remove the previously seen images, as well as the
explicitly specified exclude list.
Parameters
----------
images : list
The list of images from which seen images are to be removed.
exclude : list
A specific list of images to be excluded from the image list.
Default: an empty list ([]).
Returns:
list
A list of images from which the seen
"""
return list(set(images) - self.seen - set(exclude))
def random_unseen_images(self, n_random, exclude=[]):
"""
Provides a random sample of the unseen images from the collection.
Parameters
----------
n_random : int
The number of random samples to be produced.
exclude : list
The images to be specifically excluded from the random sample.
Default: an empty list ([]).
Returns
-------
np.array
An array of randomly sampled unseen images.
Raises
------
DatasetExhaustedError
If there are no more unseen images in the collection.
"""
candidates = set(range(self.n)) - self.seen - set(exclude)
if len(candidates) == 0:
raise DatasetExhaustedError
if n_random > len(candidates):
return np.array(list(candidates))
return np.array(random.sample(candidates, n_random))
def get_images(self, n_images=None):
"""
Fetches seen images.
Parameters
----------
n_images : int or None
The number of images to be returned. If specified, a random sample
of length n_images will be produced. If None (default), all seen
images are returned.
Returns
-------
list
The list of seen images corresponding to the n_images param value.
"""
if n_images:
return random.sample(self.seen, n_images)
else:
return list(self.seen)
def update(self, new_seen):
"""
Updates the set of seen images.
Parameters
----------
new_seen : list
The list of images to be added to the seen images.
Raises
------
DatasetExhaustedError
Raised when the length of the seen set becomes equal to the number
of images in the collection
"""
self.seen.update(new_seen)
if len(self) == self.n:
raise DatasetExhaustedError
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
"""
This module is responsible for importing other modules such that
they have chances to initialize before the application started.
"""
# The service API
from ava.web import webapi
# For serving web root static files
from ava.web import resources
# register actions for user module, e.g. 'user.notify'.
from avame import user
from avame import requests |
import sqlite3
class DBmanager:
def __init__(self):
self.mainDB = sqlite3.connect('mainDB.db')
self.mainDB.execute('''create table if not exists user_info (
door_serial unique not null,
user_id not null,
user_pw not null,
stream_address,
primary key(door_serial)
)''')
self.mainDB.execute('''create table if not exists user_images (
door_serial,
user_image blob,
FOREIGN KEY (door_serial) REFERENCES user_info(door_serial) ON DELETE CASCADE
)''')
self.mainDB.execute('''create table if not exists courier_info (
door_serial ,
courier_image blob,
bill_number,
primary key(bill_number),
FOREIGN KEY (door_serial) REFERENCES user_info(door_serial) ON DELETE CASCADE
)''')
self.mainDB.execute('''create table if not exists door_cam_match (
door_serial ,
cam_serial,
primary key(door_serial,cam_serial),
FOREIGN KEY (door_serial) REFERENCES user_info(door_serial) ON DELETE CASCADE
)''')
#테이블 추가
def getcourierPicData(self,billNum): #집에 등록된 택배원이 1명이라 정의하고 작성됨
picData = self.mainDB.execute('select courier_image from courier_info where bill_number = ?',(billNum,))
print(picData)
result = b''
for data, in picData:
result = data
return result
def getBillNumData(self,doorSerial, billNum): #집에 등록된 택배원이 1명이라 정의하고 작성됨 입구에서 검사할 때 운송장 부터 입력하는게 맞는거같음
billNumData = self.mainDB.execute('select bill_number from courier_info where door_serial = ? AND bill_number = ?',(doorSerial,billNum))
result=''
for data, in billNumData:
result = data
print(result)
return result
def getUserDoorSerialData(self,doorSerial):
searchData = self.mainDB.execute('select door_serial from user_info where door_serial = ?',(doorSerial,))
door_serial = ""
for data, in searchData:
door_serial = data
return door_serial
def getMatchedUserDoorSerialData(self,camSerial):
searchData = self.mainDB.execute('select door_serial from door_cam_match where cam_serial = ?',(camSerial,))
door_serial = ""
for data, in searchData:
door_serial = data
return door_serial
def getUserInfo(self,userId, userPw):
searchData = self.mainDB.execute('select door_serial from user_info where user_id = ? and user_pw = ?',(userId,userPw))
door_serial = ""
for data, in searchData:
door_serial = data
return door_serial
def getCamSerial(self,doorSerial):
searchData = self.mainDB.execute('select cam_serial from door_cam_match where door_serial = ?',(doorSerial,))
cam_serial = ""
for data, in searchData:
cam_serial = data
return cam_serial
def getUserPicList(self,doorSerial):
searchData = self.mainDB.execute('select user_image from user_images where door_serial = ?',(doorSerial,))
picList = []
for data, in searchData:
picList.append(data)
return picList
def getStreamingAddress(self,doorSerial):
streamAdr = self.mainDB.execute('select stream_address from user_info where door_serial = ? ',(doorSerial,))
door_serial = ""
for data, in streamAdr:
door_serial = data
return door_serial
#def ???????? 유저 이미지를 패밀리 리스트 최신화 하는 작업
def updataStreamingAddress(self,doorSerial, ip):
self.mainDB.execute("update user_info set stream_address = ? where door_serial = ?", (ip, doorSerial))
self.mainDB.commit()
def updateCourierImage(self, billNum, courierImage):
self.mainDB.execute("update courier_info set courier_image = ? where bill_number = ?",(courierImage,billNum))
self.mainDB.commit()
def recordCourierInfo(self,doorSerial, billNum):
self.mainDB.execute("insert into courier_info(door_serial, bill_number) values(?,?)", (doorSerial, billNum))
self.mainDB.commit()
def recordUserImage(self,doorSerial, userImage):
self.mainDB.execute("insert into user_images(door_serial, user_image) values(?,?)",(doorSerial, userImage))
self.mainDB.commit()
def recordUserInfo(self,doorSerial, userId, userPw):
self.mainDB.execute("insert into user_info(door_serial, user_id, user_pw) values(?,?,?)",(doorSerial, userId, userPw))
self.mainDB.commit()
def recordDoorHasCam(self,doorSerial,camSerial):
self.mainDB.execute("insert into door_cam_match(door_serial, cam_serial) values(?,?)",(doorSerial, camSerial))
self.mainDB.commit()
def deleteUserData(self,doorSerial): #--수정해야함
self.mainDB.execute('delete from user_info where door_serial = ?',(doorSerial,))
self.mainDB.commit()
|
s=input('Enter the string')
i=int(input('Enter the value of i'))
s1=s[0:i]
s2=s[i+1:len(s)]
print(s1+s2) |
import cv2
import os
import numpy as np
import pytesseract as tess
import csv
path = '/Users/fneut/Desktop/PP/QueryImages'
myPicList = os.listdir(path)
print(myPicList)
for z,k in enumerate(myPicList):
if(z == 1):
nombre_foto = k
myData = []
myData2 = []
def RegionInteres(contador):
global roi,j
if contador == 0:
j = 0
else:
j += 50
roi = [[(0,j), (89, 50+j), 'fecha'],
[(126, j), (285, 50+j), 'descripcion'],
[(402, j), (538, 50+j), 'canal'],
[(623, j), (731, 50+j), 'cargos'],
[(780, j), (878, 50+j), 'abono'],
[(916, j), (1015, 50+j), 'saldo']]
RegionInteres(0)
imgQ = cv2.imread(path + "/" + str(nombre_foto))
h,w,c = imgQ.shape
imgQ = cv2.resize(imgQ,(w//2,h//2))
cv2.imshow("output", imgQ)
imgShow = imgQ.copy()
imgMask = np.zeros_like(imgShow)
print(f' ############### Extrayendo data de la imagen {nombre_foto} ###############')
for veces in range(9):
#print(roi)
for sub,r in enumerate(roi):
#cv2.rectangle(imgMask,r[0],r[1],(0,255,0), cv2.FILLED)
cv2.rectangle(imgMask,(r[0][0],r[0][1]),(r[1][0],r[1][1]) ,(0,255,0), cv2.FILLED)
imgShow = cv2.addWeighted(imgShow, 0.99, imgMask, 0.1, 0)
imgCrop = imgQ[r[0][1]:r[1][1], r[0][0]:r[1][0]]
myData.append(tess.image_to_string(imgCrop))
myData.append('\n\n\x0c')
myData2.append(myData)
myData = []
RegionInteres(1)
print(myData2)
with open('/Users/fneut/Desktop/PP/SalidaData.csv','a+' ) as f:
for num,contenido in enumerate(myData2):
if(num != 0):
for num2,contenido2 in enumerate(contenido):
if(num2 == 6):
f.write(str(contenido2)[:-2])
else:
f.write(str(contenido2)[:-2]+',')
imgShow = cv2.resize(imgShow,(w//3,h//3))
cv2.imshow("output2", imgShow)
cv2.waitKey(0)
|
import sys
from PIL import Image
from WordsDrawer.FractalDrawer import FractalDrawer
def pixel_processing(img_x, img_y, iterations, vector):
# Вызываем рисователь для пикселя
red, green, blue = fd.get_fractal_color(img_x, img_y, iterations, vector)
return red, green, blue
def get_vector(word):
with open("vectors_real.vec") as file:
file.readline()
word = word.lower()
# Ищем в файле нужное слово и возвращаем его вектор
for line in file:
if line.split("_")[0] == word:
word_vec = line.split(" ")[1:]
return [float(word) * 10 for word in word_vec]
return [i for i in range(1, 300)]
# Создаем картинку для рисования
img = Image.new('RGB', (600, 600), (255, 255, 255))
# Задаем границы картинки и графика
width, height = img.size
start_x = -1.5
end_x = 1.5
start_y = -1.5
end_y = 1.5
# Задаем количество итераций для фракталов
iterations = 50
# Создаем объект- рисовальщик фрактала
fd = FractalDrawer(start_x, start_y, end_x, end_y, width, height)
# Получаем вектор из словаря
word = sys.argv[1]
vec = get_vector(word)
# Обрабатываем каждый пиксель картинки по очереди
x = 0
while x < width:
y = 0
while y < height:
img.putpixel((x, y), pixel_processing(x, y, iterations, vec))
y += 1
x += 1
img.show()
|
""" Advent of Code Day 4 - Security Through Obscurity"""
import re
def check_checksum(room):
"""Calculate checksum returning Sector ID if it matches the encoded one."""
checksum = re.search(r'\[(\w+)]', room).group(1)
sector_id = int(re.search(r'(\d+)', room).group(1))
letters = set(re.findall(r'([^-0-9])', room[:-(len(checksum) + 2)]))
frequencies = {}
for letter in letters:
frequencies[letter] = room.count(letter)
sorted_freq = sorted(frequencies.items(), key=lambda kv: kv[1], reverse=True)
calc_checksum = []
for i in range(sorted_freq[0][1], 1, -1):
top = []
for freq_tuple in sorted_freq:
if freq_tuple[1] == i:
top.append(freq_tuple[0])
else:
continue
sorted_top = sorted(top)
[calc_checksum.append(letter) for letter in sorted_top]
if len(calc_checksum) == 5:
break
real_checksum = ''.join(calc_checksum[:5])
if checksum == real_checksum:
return sector_id
return 0
def decrypt_name(room):
"""Decrypt shift cypher using Sector ID returning it if 'north' in decrypt."""
sector_id = int(re.search(r'(\d+)', room).group(1))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
decrypted = ''
for char in room:
if char == '-':
decrypted += ' '
elif char in alphabet:
new_char = alphabet[(alphabet.index(char) + sector_id) % 26]
decrypted += new_char
if 'north' in decrypted:
return sector_id
return 0
with open('input.txt', 'r') as f:
rooms = [line.strip() for line in f.readlines()]
# Answer One
print("Sum of Valid Rooms:", sum([check_checksum(room) for room in rooms]))
# Answer Two
print("Sector ID of North Pole Storage:", sum(decrypt_name(room) for room in rooms))
|
import copy
class Vertex():
def __init__(self,index,weight):
self.index = index
self.weight = weight
self.adj = set()
@property
def degree(self):
return len(self.adj)
def add_edge(self,adjacent_node):
self.adj.add(adjacent_node)
def is_adjacent_to(self,node):
return (node.index in self.adj)
def is_adjacent_to_index(self,index):
return (index in self.adj)
def check_conflicts(self,colouring,k):
colours = [0]*k
conflicts = []
for v in self.adj:
if v in colouring:
colours[colouring[v]]+=1
return colours
def check_total_conflicts(self,colouring,get_list_of_conflicts=False):
num_conflicts = 0
conflicts = []
for v in self.adj:
if v in colouring:
num_conflicts+=1
if(get_list_of_conflicts):
conflicts.append(v)
if get_list_of_conflicts:
return num_conflicts,conflicts
return num_conflicts
def check_colours(self,colouring,k,get_list_of_conflicts=False):
colour_conflicts = {}
colouring_copy = copy.deepcopy(colouring)
for i in range(0,k-1):
colouring_copy[self.index] = i
colour_conflicts[i] = self.check_total_conflicts(colouring_copy,k,get_list_of_conflicts)
return colour_conflicts
def __str__(self):
return (f'VERTEX {self.index}\nWeight: {self.weight}\ncolour: {self.colour}\n')
class Graph():
def __init__(self,weights,edges,colours):
self.colours = colours
self.vertexes = []
i=0
for weight in weights:
self.vertexes.append(Vertex(i, weight))
i+=1
self.edges = edges
for edge in edges:
self.vertexes[edge[0]].add_edge(edge[1])
self.vertexes[edge[1]].add_edge(edge[0])
def are_connected(self,index1,index2):
return self.vertexes[index1].is_adjacent_to(index2)
def number_of_conflicts(self,colouring):
n = 0
for e in self.edges:
if colouring[e[0]] == colouring[e[1]]:
n+=1
return n
def __str__(self):
string = f'colours: {self.colours}\nVertexes: {len(self.vertexes)}\nEdges: {len(self.edges)}\n---------\n'
for v in self.vertexes:
string+=str(v)
string+='\n'
return string |
import FWCore.ParameterSet.Config as cms
source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_101_1_pND.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_103_1_bkW.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_104_1_i5m.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_107_1_URP.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_10_1_C9B.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_113_1_6eo.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_117_1_Op8.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_118_1_3CK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_119_1_7wj.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_11_1_5iW.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_120_1_Grc.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_124_1_8MT.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_125_1_zl0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_126_1_4RA.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_127_1_8xI.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_129_1_bTX.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_12_1_e5j.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_130_1_tu2.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_131_1_H65.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_132_1_n2t.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_132_1_pU6.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_134_1_EcM.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_135_1_RxT.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_138_1_kKW.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_13_1_BtX.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_140_1_g8u.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_144_1_ptL.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_145_1_wwZ.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_146_1_f7i.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_147_1_hSO.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_148_1_UDg.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_149_1_DwW.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_14_1_0r0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_151_1_DrK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_152_1_5tk.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_153_1_0aK.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_156_1_QEE.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_159_1_L54.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_15_1_QC6.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_160_1_XId.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_161_1_pmE.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_166_1_W70.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_167_1_0qk.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_169_1_qwx.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_16_1_4H2.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_172_1_B9h.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_173_1_av6.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_175_1_4VD.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_176_1_n7X.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_17_1_cIC.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_181_1_M4o.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_182_1_790.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_184_1_lP1.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_186_1_Byz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_187_1_3ix.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_188_1_g4G.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_189_1_YjD.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_18_1_keA.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_190_1_crd.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_191_1_K6y.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_192_1_2FX.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_19_1_Y8d.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_1_1_HzN.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_200_1_OvB.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_20_1_7PD.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_21_1_Bkz.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_22_1_Ndn.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_23_1_dXT.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_24_1_CkV.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_25_1_GYy.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_26_1_oCl.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_27_1_2aV.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_28_1_c0L.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_29_1_plU.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_2_1_Sd3.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_30_1_nj7.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_31_1_S3B.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_32_1_mif.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_33_1_2rn.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_34_1_hyp.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_35_1_iPi.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_37_1_Lg0.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_3_1_RwN.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_4_1_0nH.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_55_1_Ewa.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_5_1_9vl.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_6_1_GBC.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_76_1_5HE.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_7_1_H26.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_84_1_8iq.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_8_1_U3d.root',
'/store/user/skaplan/noreplica/MinBiasBeamSpotPhi0R10_HISTATS/outfile14TeVSKIM_9_1_p47.root',
)
)
|
import pickle,re,string,os
from collections import defaultdict
def find_unigrams(sentence):
unigrams = [word.lower() for word in re.split('\W+',sentence) if word!='']
return unigrams
def find_bigrams(unigrams):
bigrams = []
for i in range(len(unigrams)-1):
string = unigrams[i]+' '+unigrams[i+1]
bigrams.append(string)
return bigrams
def create_vocabulary():
exclude = set(string.punctuation) - set('-')
path = '../data/input/train/'
for filename in os.listdir(path):
print path,filename
fin = open(path+filename,'r')
vocab_dict = defaultdict()
count = 0
for line in fin:
sentence = line.split('\t')[0]
sentence = ''.join(ch for ch in sentence if ch not in exclude)
unigrams = find_unigrams(sentence)
#bigrams = find_bigrams(unigrams)
#unigrams_bigrams = unigrams+bigrams
for word in unigrams:
if word.lower() not in vocab_dict:
vocab_dict[word.lower()]=count
count+=1
fin.close()
fin = open('../data/input/test/'+filename,'r')
for line in fin:
sentence = line.split('\t')[0]
sentence = ''.join(ch for ch in sentence if ch not in exclude)
unigrams = find_unigrams(sentence)
#bigrams = find_bigrams(unigrams)
#unigrams_bigrams = unigrams+bigrams
for word in unigrams:
if word.lower() not in vocab_dict:
vocab_dict[word.lower()] = count
count+=1
fin.close()
pickle.dump(vocab_dict,open('../data/vocabulary/'+filename+'.pkl','w'))
create_vocabulary() |
# -*- coding: utf-8 -*-
# Generated by Django 1.9a1 on 2015-11-06 21:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Чё тестируем?')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TestCase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.TextField(verbose_name='Ссылка')),
('state', models.TextField(verbose_name='Состояние')),
('anonymous', models.TextField(verbose_name='Аноним')),
('not_activated', models.TextField(verbose_name='Не активированный')),
('activated', models.TextField(verbose_name='Активированный')),
('not_active', models.TextField(verbose_name='Удалённый')),
('not_moderated', models.TextField(verbose_name='Заблокированный')),
('staff', models.TextField(verbose_name='Админ')),
('test', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tests.Test')),
],
options={
'abstract': False,
},
),
]
|
from django.shortcuts import render,HttpResponse,redirect
# Create your views here.
from django import forms
from django.forms import fields
class fqForm(forms.Form):
user = fields.CharField(
max_length=18,
min_length=6,
required=True,
error_messages={
'required': '用户名不能为空',
'max_length': '太长了',
'min_length': '太短了',
}
)
pwd = fields.CharField(min_length=32,required=True,
error_messages={
'required': '密码不能为空',
'min_length': '太长了',
})
age = fields.IntegerField(required=True,
error_messages={
'required': '年龄不能为空',
'invalid': '格式错误',
}
)
email = fields.EmailField(required=True,
error_messages={
'required': '年龄不能为空',
'invalid': '格式错误',
})
def form1(request):
if request.method =='POST':
obj = fqForm(request.POST)
# 验证成功
if obj.is_valid():
# 用户提交的数据
print(obj.cleaned_data)
return redirect('http://www.baidu.com')
else:
print(obj.errors)
return render(request,'form1.html',{'obj':obj})
# 生成html代码
obj = fqForm()
return render(request,'form1.html',{'obj':obj}) |
from time import sleep
# n = 5
# while n > 0:
# print(n)
# n = n - 1
# print('Blastoff!')
n = 1
while True:
n = n - 1
if n%3==0:
continue
print(n)
sleep(1)
print('Done!')
# while True:
# print("Entrez 'q' pour quiter")
# num=input("Enter un nombre: ")
# if num=='c':
# break
# num=float(num)
# print(num)
# print('Done') |
from datetime import datetime
from django.db import models
class COM_CD_M(models.Model):
COM_CD = models.CharField(max_length=20, primary_key=True)
COM_CD_NM = models.CharField(max_length=20)
REMARK_DC = models.CharField(max_length=20)
USE_YN = models.CharField(max_length=20)
def __str__(self):
return self.COM_CD
class COM_CD_D(models.Model):
COM_DTL_CD = models.CharField(max_length=20, primary_key=True)
COM_CD = models.ForeignKey('COM_CD_M', on_delete=models.CASCADE)
COM_DTL_NM = models.CharField(max_length=20)
REMARK_DC = models.CharField(max_length=20)
USE_YN = models.CharField(max_length=20)
def __str__(self):
return self.COM_DTL_CD
class PHRASE(models.Model):
PHRASE_SEQ = models.IntegerField(primary_key=True)
PHRASE_KIND = models.CharField(max_length=20)
QUOTE = models.CharField(max_length=300)
PHRASE_FROM = models.CharField(max_length=100)
PHRASE_URL = models.CharField(max_length=200)
EMOTION_KIND = models.CharField(max_length=20)
def __str__(self):
return self.QUOTE
class USER(models.Model):
SEX_CHOICES = [
('M', 'Male'),
('F', 'Female'),
]
EMAIL = models.CharField(max_length=100, primary_key=True)
PASSWORD = models.CharField(max_length=50)
USER_NM = models.CharField(max_length=20)
SEX_CD = models.CharField(max_length=20, choices=SEX_CHOICES)
USER_AGE = models.IntegerField()
def __str__(self):
return self.EMAIL
class FACE(models.Model):
SMILE_SEQ = models.AutoField(primary_key=True)
EMAIL = models.ForeignKey('USER', on_delete=models.CASCADE)
STUDY_DATE = models.DateTimeField('date published')
NEUTRAL_PATH = models.CharField(max_length=200)
NEUTRAL_PERCENT = models.FloatField()
SMILE1_PATH = models.CharField(max_length=200)
SMILE1_PERCENT = models.FloatField()
SMILE2_PATH = models.CharField(max_length=200)
SMILE2_PERCENT = models.FloatField()
SMILE3_PATH = models.CharField(max_length=200)
SMILE3_PERCENT = models.FloatField()
def __str__(self):
return str(self.STUDY_DATE)
# Create your models here.
|
#!/usr/bin/python3
t = {
1: [4, 5, 2],
2: [1, 6],
3: [4],
4: [3, 7],
5: [1],
6: [2],
7: [4, 8],
8: [7]
}
def height(s, e):
count = 0
for node in t[s]:
if node != e:
print(node, s)
height(node, s)
print(height(1, 0))
|
# -*- coding: utf-8 -*-
{
'name': 'Consignacion Management',
'version': '0.1',
'category': 'Consignacion Management',
'sequence': 20,
'summary': 'Consignacion Orders, Receptions, Supplier Invoices',
'description': """
Manage goods requirement by Consignacion Orders easily
==================================================
""",
'author': 'Econube | Pablo Cabezas',
'website': 'http://www.openerp.com',
'depends': ['stock', 'process', 'procurement'],
'data': [
'consignacion_view.xml',
'consignacion_workflow.xml',
'consignacion_sequence.xml',
'stock_view.xml',
'wizard/consignacion_order_group_view.xml',
'wizard/consignacion_line_invoice_view.xml',
],
#'test': [
# 'test/process/cancel_order.yml',
# 'test/process/rfq2order2done.yml',
# 'test/process/generate_invoice_from_reception.yml',
# 'test/process/run_scheduler.yml',
# 'test/process/merge_order.yml',
# 'test/process/edi_purchase_order.yml',
# 'test/process/invoice_on_poline.yml',
# 'test/ui/print_report.yml',
# 'test/ui/duplicate_order.yml',
# 'test/ui/delete_order.yml',
#],
#'demo': [
# 'purchase_order_demo.yml',
# 'purchase_demo.xml',
#],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
import pyaudio
import wave
filename = 'Sound/AudioFile/xinCamOn.wav'
fileVLC = 'Sound/AudioFile/vuiLongThuLai.wav'
data = b''
p = pyaudio.PyAudio()
# Set chunk size of 1024 samples per data frame
# Open the sound file
wf = wave.open(filename, 'rb')
numFrame = wf.getnframes()
data += wf.readframes(numFrame)
wf = wave.open('/home/lam/StudentRecognize/Sound/AudioFile/NGUYỄN.wav', 'rb')
numFrame = wf.getnframes()
data += wf.readframes(numFrame)
wf = wave.open('/home/lam/StudentRecognize/Sound/AudioFile/HỒNG.wav', 'rb')
numFrame = wf.getnframes()
data += wf.readframes(numFrame)
wf = wave.open('/home/lam/StudentRecognize/Sound/AudioFile/LÂM.wav', 'rb')
numFrame = wf.getnframes()
data += wf.readframes(numFrame)
# Create an interface to PortAudio
# wf = wave.open(fileVLC, 'rb')
# numFrame = wf.getnframes()
# data += wf.readframes(numFrame)
# Open a .Stream object to write the WAV file to
# 'output = True' indicates that the sound will be played rather than recorded
stream = p.open(format = p.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = wf.getframerate(),
output = True)
# Read data in chunks
stream.write(data)
# Play the sound by writing the audio data to the stream
# while data != '':
# stream.write(data)
# data = wf.readframes(chunk)
# Close and terminate the stream
stream.close()
p.terminate()
|
from abc import ABC
from collections import defaultdict
from django.db.models import Avg, Count, Sum
from django.db.models.functions import TruncMonth, TruncDay
from rest_framework.response import Response
from datetime import datetime, timedelta
from .mixins import ChartMixin
from .serializers import CaseSerializer, ServiceRequestsSerializer, EvidenceSerializer
from .models import Case, Request, Evidence
# Create your views here.
from rest_framework.views import APIView
class GetCasesByDateView(APIView):
def get(self, request, **kwargs):
start_date_value = request.GET.get('start_date', None)
end_date_value = request.GET.get('end_date', None)
if start_date_value and end_date_value:
start_date = datetime.strptime(start_date_value, '%m/%d/%Y')
end_date = datetime.strptime(end_date_value, '%m/%d/%Y')
total_days = end_date - start_date
cases = Case.objects.filter(open_date__gt=start_date,
open_date__lt=end_date)
else:
cases = Case.objects.all()
total_days = cases.order_by('open_date').first().open_date - cases.order_by('open_date').last().open_date
serializer = CaseSerializer(cases, many=True)
response = {
'data': serializer.data,
'period_total': len(cases),
'yesterday_total': len(cases.filter(open_date=datetime.now() - timedelta(1))),
'daily_average': round(len(cases) / int(total_days.days))
}
return Response(response)
class GetServiceRequestsByDateView(APIView):
def get(self, request, **kwargs):
start_date_value = request.GET.get('start_date', None)
end_date_value = request.GET.get('end_date', None)
if start_date_value and end_date_value:
start_date = datetime.strptime(start_date_value, '%m/%d/%Y')
end_date = datetime.strptime(end_date_value, '%m/%d/%Y')
total_days = end_date - start_date
service_requests = Request.objects.filter(request_date__gte=start_date,
request_date__lte=end_date)
else:
service_requests = Request.objects.all()
total_days = service_requests.order_by('request_date').first().request_date - service_requests.order_by(
'request_date').last().request_date
serializer = ServiceRequestsSerializer(service_requests, many=True)
response = {
'data': serializer.data,
'period_total': len(service_requests),
'yesterday_total': len(service_requests.filter(request_date=datetime.now() - timedelta(1))),
'daily_average': round(len(service_requests) / int(total_days.days))
}
return Response(response)
class GetEvidenceByDateView(APIView):
def get(self, request, **kwargs):
start_date_value = request.GET.get('start_date', None)
end_date_value = request.GET.get('end_date', None)
if start_date_value and end_date_value:
start_date = datetime.strptime(start_date_value, '%m/%d/%Y')
end_date = datetime.strptime(end_date_value, '%m/%d/%Y')
total_days = end_date - start_date
evidence = Evidence.objects.filter(key_in_date__gte=start_date,
key_in_date__lte=end_date)
else:
evidence = Evidence.objects.all()
total_days = evidence.order_by('key_in_date').first().key_in_date - evidence.order_by(
'key_in_date').last().key_in_date
serializer = EvidenceSerializer(evidence, many=True)
response = {
'data': serializer.data,
'period_total': len(evidence),
'yesterday_total': len(evidence.filter(key_in_date=datetime.now() - timedelta(1))),
'daily_average': round(len(evidence) / int(total_days.days))
}
return Response(response)
class GetBacklogByUnitView(APIView, ChartMixin):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.serialized_data = None
self.start_date = None
self.end_date = None
def get(self, request, **kwargs):
start_date_value = request.GET.get('start_date', None)
end_date_value = request.GET.get('end_date', None)
if start_date_value and end_date_value:
self.start_date = datetime.strptime(start_date_value, '%m/%d/%Y')
self.end_date = datetime.strptime(end_date_value, '%m/%d/%Y')
service_requests = Request.objects.filter(request_date__gte=self.start_date,
request_date__lte=self.end_date)
else:
service_requests = Request.objects.all()
self.start_date = service_requests.order_by('request_date').first().request_date
self.end_date = service_requests.order_by('request_date').last().request_date
serializer = ServiceRequestsSerializer(service_requests, many=True)
self.serialized_data = serializer.data
response = {
'data': self.get_chart()
}
return Response(response)
def get_chart(self):
chart_data = defaultdict(lambda: defaultdict(list))
date_counts = defaultdict(lambda: defaultdict(int))
for result in self.serialized_data:
try:
dept = result['lab_dept_abbrev']
assigned = datetime.strptime(result['assign_date'], '%m/%d/%Y')
request = datetime.strptime(result['request_date'], '%m/%d/%Y')
if assigned is None:
assigned = self.end_date + timedelta(days=1)
in_backlog = assigned - request
for day in range(in_backlog.days):
day = request + timedelta(days=day)
date_counts[dept][day] += 1
except:
continue
for unit in date_counts:
chart_data[unit]['name'] = unit
items = list(date_counts[unit].items())
items.sort()
for backlog_date, count in items:
chart_data[unit]['x'].append(self.chart_date(backlog_date))
chart_data[unit]['y'].append(count)
return [v for v in chart_data.values()]
class GetCaseloadByUnit(APIView, ChartMixin):
def get(self, request, **kwargs):
pass |
# Generated by Django 2.2.2 on 2020-06-08 12:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jazz', '0030_auto_20200608_1245'),
]
operations = [
migrations.AlterModelOptions(
name='player',
options={},
),
]
|
"""
This module defines several classes for code edition:
- CodeEditor: a simple Qt code editor with syntax highlighting, search and replace, open, save and close,
line wrapping and text block management;
- ErrorConsole: a GUI text box receiving messages from a code runner;
- CodeEditorWindow: A multi editor GUI interface based on a Qtab widget, with a default directory,
open, save and close functions, as well as tracking of modifications;
- several other utility classes used in the 3 classes introduced above.
The CodeEditor(s) as well as the CodeEditorWindow are for edition only and do not know the concept of running
the code that they contain. Nevertheless the CodeEditorWindow may have a parent and can interrogate its boolean method
closingOK(editor) - if it exists -, to get authorization for closing the editor or not.
"""
import os
import traceback
import math
import sys
import time
import re
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from syntaxhighlighter import *
from application.config.parameters import *
from application.ide.widgets.observerwidget import ObserverWidget
DEVELOPMENT = False
class ErrorConsole(QTreeWidget, ObserverWidget):
def __init__(self, codeEditorWindow, codeRunner, parent=None):
self._codeEditorWindow = codeEditorWindow
self._codeRunner = codeRunner
QTreeWidget.__init__(self, parent)
ObserverWidget.__init__(self)
self.connect(self, SIGNAL(
"itemDoubleClicked(QTreeWidgetItem *,int)"), self.itemDoubleClicked)
self.setColumnCount(2)
self.setColumnWidth(0, 400)
self.setHeaderLabels(["filename", "line"])
def updatedGui(self, subject, property, value=None):
if subject == self._codeRunner and property == "exceptions":
for info in value:
self.processErrorTraceback(info)
self._codeRunner.clearExceptions()
def itemDoubleClicked(self, item, colum):
if item.parent() is not None:
filename = unicode(item.text(0))
line = int(item.text(1))
editor = self._codeEditorWindow.openFile(filename)
if editor is None:
return
editor.highlightLine(line)
def processErrorTraceback(self, exceptionInfo):
while self.topLevelItemCount() > 20:
self.takeTopLevelItem(self.topLevelItemCount() - 1)
(exception_type, exception_value, tb) = exceptionInfo
text = traceback.format_exception_only(
exception_type, exception_value)[0]
text = text.replace("\n", " ")
tracebackEntries = traceback.extract_tb(tb)
exceptionItem = QTreeWidgetItem()
font = QFont()
font.setPixelSize(14)
exceptionItem.setFont(0, font)
self.insertTopLevelItem(0, exceptionItem)
exceptionItem.setText(0, text)
exceptionItem.setFirstColumnSpanned(True)
exceptionItem.setFlags(Qt.ItemIsEnabled)
for entry in tracebackEntries[1:]:
(filename, line_number, function_name, text) = entry
if os.path.exists(filename) and os.path.isfile(filename):
item = QTreeWidgetItem()
exceptionItem.addChild(item)
item.setText(0, filename)
item.setText(1, str(line_number))
class EditorTabBar(QTabBar):
"""
A QTabBar with tabs that are both movable horizontally and drad-and-droppable by initiating the drag vertically.
"""
def __init__(self, parent=None):
QTabBar.__init__(self, parent)
self._startDragPosition = None
def mousePressEvent(self, e):
if (e.buttons() & Qt.LeftButton):
self._startDragPosition = e.pos()
self._tab = self.tabAt(e.pos())
self.move = False
QTabBar.mousePressEvent(self, e) # why not using ignore() ?
def mouseMoveEvent(self, e):
# We try here to make compatible the movable tabs along x with a drag
# and drop triggered by a vertical drag.
if (e.buttons() & Qt.LeftButton):
# if (e.pos()-self._startDragPosition).manhattanLength() >
# QApplication.startDragDistance():
x, y, w, h, s = e.pos().x(), e.pos().y(), self.width(), self.height(), 10
self.move = self.move or abs(x - self._startDragPosition.x()) > s
# start a Drag if vertical drag by more than s pixels outside
# tabBar
if not self.move and (y < -s or y > h + s):
drag = QDrag(self)
url = QUrl.fromLocalFile(str(self.tabToolTip(self._tab)))
mimeData = QMimeData()
mimeData.setUrls([url])
drag.setMimeData(mimeData)
drag.exec_()
# insert something here to repaint the tabBar so that tab that has started to move go back to its initial position
# to do: correct the drop of file with no url.
else:
QTabBar.mouseMoveEvent(self, e)
else:
QTabBar.mouseMoveEvent(self, e)
class CodeEditorWindow(QWidget):
"""
A multi-editor class that has a parent, a working directory, and a list of editors displayed in a
QTabWidget with a QTabBar of the EditorTabBar type.
"""
def __init__(self, parent=None, gv=dict(), lv=dict(), newEditorCallback=None):
self._parent = parent
self.editors = []
# self.count = 1 # use len(self.editors)
self._workingDirectory = None
self._newEditorCallback = newEditorCallback
QWidget.__init__(self, parent)
timer = QTimer(self)
timer.setInterval(1000)
self.connect(timer, SIGNAL("timeout()"), self.onTimer)
timer.start()
myLayout = QGridLayout()
self.tabBar = EditorTabBar()
self.tab = QTabWidget()
self.tab.setTabBar(self.tabBar)
# does not work as is because of drag and drop on tabBar
self.tab.setMovable(True)
self.tab.setTabsClosable(True)
self.connect(self.tab, SIGNAL("tabCloseRequested(int)"), self.closeTab)
myLayout.addWidget(self.tab, 0, 0)
self.setLayout(myLayout)
self.restoreTabState()
def workingDirectory(self):
"""
Returns self_workingDirectory or the current directory if None.
"""
if self._workingDirectory is None:
return os.getcwd()
return self._workingDirectory
def setWorkingDirectory(self, filename):
"""
Sets _workingDirectory to the directory of the file whose name filename is passed.
"""
directory = filename
if directory is not None:
directory = os.path.dirname(str(directory))
if os.path.exists(directory):
self._workingDirectory = directory
def widgetOfOpenFile(self, filename):
"""
Retrieves, sets as current widget, and returns the QTabWidget corresponding to the passed filename.
Returns None if does not exist.
"""
if filename is None:
return None
path = os.path.normpath(str(filename))
for i in range(0, self.tab.count()):
if self.tab.widget(i).filename() == path:
self.tab.setCurrentWidget(self.tab.widget(i))
return self.tab.widget(i)
return None
def saveCurrentFile(self):
"""
Calls the save function of the current editor
"""
self._saveOrSaveAs(True)
def saveCurrentFileAs(self):
"""
Calls the saveAs function of the current editor
"""
self._saveOrSaveAs(False)
def _saveOrSaveAs(self, save=True):
""" Private function for writing the code only once"""
currentEditor = self.currentEditor()
if save:
filename = currentEditor.save()
else:
filename = currentEditor.saveAs()
self.updateTabText(currentEditor)
self.setWorkingDirectory(filename)
def getEditorForFile(self, filename):
for i in range(0, self.tab.count()):
editor = self.tab.widget(i)
if editor.filename() == filename:
return editor
return None
def updateTabText(self, editor):
index = self.tab.indexOf(editor)
shortname = editor._shortname
if shortname is None:
shortname = '[untitled]'
filename = editor.filename()
if filename is None:
filename = shortname
# extraText = editor.tabText()
if editor.hasUnsavedModifications():
changedText = "*"
else:
changedText = ""
self.tab.setTabText(index, shortname + changedText)
self.tab.setTabToolTip(index, filename)
def openFile(self, filename=None):
"""
Opens a file from the passed or prompted filename, then creates the editor if the opening was successful.
(Does not call the open method of a created editor)
"""
if filename is None:
filename = str(QFileDialog.getOpenFileName(
caption='Open file', filter="Python(*.py *.pyw)", directory=self.workingDirectory()))
if filename == '':
return None
check = self.widgetOfOpenFile(filename)
if check is not None:
return check
if os.path.isfile(str(filename)):
self.setWorkingDirectory(filename)
editor = self.newEditor()
editor.openFile(filename)
self.updateTabText(editor)
self.saveTabState()
return editor
return None
def editorHasUnsavedModifications(self, editor, changed):
self.updateTabText(editor)
def newEditor(self, editor=None):
if editor is None:
editor = CodeEditor(parent=self)
editor.append('')
editor.activateHighlighter()
# find the lowest index not already used a names of type 'untiltled n'
names = [str(self.tab.tabText(i)) for i in range(self.tab.count())]
names = [name for name in names if name.startswith('[untitled')]
indices = [int(''.join([s for s in name if s.isdigit()])) for name in names]
index = 1
while index in indices:
index += 1
name = '[untitled %i]' % index
editor._shortname = name
# append editor
self.editors.append(editor)
self.tab.addTab(editor, name)
self.updateTabText(editor)
# self.tab.setTabToolTip(self.tab.indexOf(editor), name)
self.connect(editor, SIGNAL("hasUnsavedModifications(bool)"), lambda changed,
editor=editor: self.editorHasUnsavedModifications(editor, changed))
# self.count = self.count + 1
self.tab.setCurrentWidget(editor)
if self._newEditorCallback is not None:
self._newEditorCallback(editor)
return editor
def saveTabState(self):
openFiles = list()
for i in range(0, self.tab.count()):
widget = self.tab.widget(i)
if widget.filename() is not None:
openFiles.append(QString(widget.filename()))
settings = QSettings()
settings.setValue('Editor/OpenFiles', openFiles)
def restoreTabState(self):
settings = QSettings()
if settings.contains("Editor/OpenFiles"):
openFiles = settings.value("Editor/OpenFiles").toList()
if openFiles is not None:
for file in openFiles:
self.openFile(file.toString())
else:
self.newEditor()
def closeEditor(self, editor, askOnly=False, checkWithParent=False):
"""
Try to close a particular editor:
- If checkWithParent is true, call self._parent.closing0k(editor) to confirm or cancel the closing;
- if askOnly is False, removes the editor both from list of editors and from the tab widget.
"""
if checkWithParent and self._parent is not None and hasattr(self._parent, 'closing0k'):
if not self._parent.closing0k(editor):
return
if editor.hasUnsavedModifications():
self.tab.setCurrentWidget(editor)
messageBox = QMessageBox()
messageBox.setWindowTitle("Warning!")
if editor.filename() is not None:
messageBox.setText(
"Save changes made to file \"%s\"?" % editor.filename())
else:
messageBox.setText(
'Save changes made to unsaved buffer %s?' % editor._shortname)
yes = messageBox.addButton("Yes", QMessageBox.YesRole)
no = messageBox.addButton("No", QMessageBox.NoRole)
cancel = messageBox.addButton("Cancel", QMessageBox.RejectRole)
messageBox.exec_()
choice = messageBox.clickedButton()
if choice == yes:
if not self.saveCurrentFile():
return False
elif choice == cancel:
return False
if askOnly:
return True
if editor.close():
self.editors.remove(editor)
editor.destroy()
self.tab.removeTab(self.tab.indexOf(editor))
if self.tab.count() == 0:
# self.count = 1
self.newEditor()
self.saveTabState()
return True
return False
def closeEvent(self, e):
for i in range(0, self.tab.count()):
if not self.closeTab(i, askOnly=True, runCheck=False):
e.ignore()
return
self.saveTabState()
def closeCurrentFile(self):
index = self.tab.indexOf(self.currentEditor())
return self.closeTab(index)
def closeTab(self, index, askOnly=False, runCheck=True):
editor = self.tab.widget(index)
return self.closeEditor(editor, askOnly, runCheck)
def currentEditor(self):
return self.tab.currentWidget()
def askToReloadChangedFile(self, editor):
if editor.fileReloadPolicy() == CodeEditor.FileReloadPolicy.Always:
editor.reloadFile()
return
elif editor.fileReloadPolicy() == CodeEditor.FileReloadPolicy.Never:
return
MyMessageBox = QMessageBox()
MyMessageBox.setWindowTitle("Warning!")
MyMessageBox.setText(
"File contents of \"%s\" have changed. Reload?" % editor.filename())
yes = MyMessageBox.addButton("Yes", QMessageBox.YesRole)
no = MyMessageBox.addButton("No", QMessageBox.NoRole)
never = MyMessageBox.addButton("Never", QMessageBox.RejectRole)
always = MyMessageBox.addButton("Always", QMessageBox.AcceptRole)
MyMessageBox.exec_()
choice = MyMessageBox.clickedButton()
if choice == yes:
editor.reloadFile()
elif choice == no:
editor.updateFileModificationDate()
elif choice == never:
editor.setFileReloadPolicy(CodeEditor.FileReloadPolicy.Never)
editor.updateFileModificationDate()
elif choice == always:
editor.setFileReloadPolicy(CodeEditor.FileReloadPolicy.Always)
editor.reloadFile()
def onTimer(self):
for i in range(0, self.tab.count()):
editor = self.tab.widget(i)
if editor.fileHasChangedOnDisk():
currentEditor = self.tab.currentWidget()
try:
self.tab.setCurrentWidget(editor)
self.askToReloadChangedFile(editor)
finally:
self.tab.setCurrentWidget(currentEditor)
def sendCloseEventToParent():
# under development
app = QtGui.QApplication.instance()
event = QEvent(1000)
target = self.parent()
app.sendEvent(target, event)
class LineNumbers(QPlainTextEdit):
def __init__(self, parent, width=50):
QPlainTextEdit.__init__(self, parent)
self.setFixedWidth(width)
self.setReadOnly(True)
MyDocument = self.document()
MyDocument.setDefaultFont(parent.document().defaultFont())
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setDisabled(True)
class LineTextWidget(QPlainTextEdit):
"""
"""
class NumberBar(QWidget):
"""
???
"""
def __init__(self, *args):
QWidget.__init__(self, *args)
self.edit = None
# This is used to update the width of the control.
# It is the highest line that is currently visibile.
self.highest_line = 0
def setTextEdit(self, edit):
self.edit = edit
def update(self, *args):
maxline = self.edit.document().lastBlock().blockNumber() + \
self.edit.document().lastBlock().lineCount()
width = QFontMetrics(self.edit.document().defaultFont()).width(str(maxline)) + 10 + 10
if self.width() != width:
self.setFixedWidth(width)
margins = QMargins(width, 0, 0, 0)
self.edit.setViewportMargins(margins)
self.edit.viewport().setContentsMargins(margins)
QWidget.update(self, *args)
def mousePressEvent(self, e):
block = self.edit.firstVisibleBlock()
contents_y = self.edit.verticalScrollBar().value() * 0
viewport_offset = self.edit.contentOffset() - QPointF(0, contents_y)
changed = False
while block.isValid():
topLeft = self.edit.blockBoundingGeometry(
block).topLeft() + viewport_offset
bottomLeft = self.edit.blockBoundingGeometry(
block).bottomLeft() + viewport_offset
if e.pos().y() > topLeft.y() and e.pos().y() < bottomLeft.y():
if not block.next().isVisible():
while not block.next().isVisible() and block.next().isValid():
block.next().setVisible(True)
block.setLineCount(block.layout().lineCount())
self.edit.document().markContentsDirty(
block.next().position(), block.next().length())
block = block.next()
changed = True
elif self.isBeginningOfBlock(block):
(startBlock, endBlock) = self.getEnclosingBlocks(block)
self.edit.hideBlocks(startBlock, endBlock)
block = block.next()
if changed:
self.edit.viewport().update()
if bottomLeft.y() > self.edit.viewport().geometry().bottomLeft().y():
break
def isBeginningOfBlock(self, block):
if block.text()[:2] == "##":
return True
else:
if re.match("^\s*$", block.text()):
return False
matchBlock = re.search("^(\s+)", block.text())
if matchBlock is None:
indentation = ""
else:
indentation = matchBlock.group(1)
nextBlock = block.next()
while nextBlock.isValid() and re.match("(^\s*$)|(^\s*\#.*$)", nextBlock.text()):
nextBlock = nextBlock.next()
matchNextBlock = re.search("^(\s+)", nextBlock.text())
if matchNextBlock is None:
nextIndentation = ""
else:
nextIndentation = matchNextBlock.group(1)
if len(nextIndentation) > len(indentation):
return True
def getEnclosingBlocks(self, block):
if block.text()[:2] == "##":
startBlock = block
while block.next().isValid() and block.next().text()[:2] != "##":
block = block.next()
endBlock = block
return (startBlock, endBlock)
else:
matchBlock = re.search("^(\s+)", block.text())
if matchBlock is None:
indentation = ""
else:
indentation = matchBlock.group(1)
nextBlock = block.next()
while nextBlock.next().isValid() and re.match("(^\s*$)|(^\s*\#.*$)", nextBlock.text()):
nextBlock = nextBlock.next()
matchNextBlock = re.search("^(\s+)", nextBlock.text())
if matchNextBlock is None:
nextIndentation = ""
else:
nextIndentation = matchNextBlock.group(1)
startBlock = block
endBlock = startBlock
while block.next().isValid() and (block.next().text()[:len(nextIndentation)] == nextIndentation or re.match("(^\s*$)|(^\s*\#.*$)", block.next().text())):
block = block.next()
endBlock = block
while endBlock.isValid() and re.match("(^\s*$)|(^\s*\#.*$)", endBlock.text()):
endBlock = endBlock.previous()
return (startBlock, endBlock)
def paintEvent(self, event):
contents_y = self.edit.verticalScrollBar().value() * 0
page_bottom = self.edit.viewport().height()
font_metrics = QFontMetrics(self.edit.document().defaultFont())
current_block = self.edit.document().findBlock(self.edit.textCursor().position())
painter = QPainter(self)
# Iterate over all text blocks in the document.
block = self.edit.firstVisibleBlock()
viewport_offset = self.edit.contentOffset() - QPointF(0, contents_y)
line_count = block.blockNumber() + 1
painter.setFont(self.edit.document().defaultFont())
while block.isValid():
invisibleBlock = False
while not block.isVisible() and block.isValid():
invisibleBlock = True
block = block.next()
if block == self.edit.document().lastBlock():
break
# The top left position of the block in the document
position = self.edit.blockBoundingGeometry(
block).topLeft() + viewport_offset
position2 = self.edit.blockBoundingGeometry(
block).bottomLeft() + viewport_offset
# Check if the position of the block is out side of the visible
# area.
line_count = block.blockNumber() + 1
additionalText = ""
if not block.next().isVisible():
additionalText = "+"
elif self.isBeginningOfBlock(block):
additionalText = "-"
if position.y() > page_bottom:
break
# We want the line number for the selected line to be bold.
bold = False
if block == current_block:
bold = True
font = painter.font()
font.setBold(True)
painter.setFont(font)
# Draw the line number right justified at the y position of the
# line. 3 is a magic padding number. drawText(x, y, text).
painter.drawText(self.width() - 10 - font_metrics.width(str(line_count)) - 3, round(position.y(
)) + font_metrics.ascent() + font_metrics.descent() - 1, str(line_count) + additionalText)
# Remove the bold style if it was set previously.
if bold:
font = painter.font()
font.setBold(False)
painter.setFont(font)
block = block.next()
if block.isValid():
topLeft = self.edit.blockBoundingGeometry(
block).topLeft() + viewport_offset
bottomLeft = self.edit.blockBoundingGeometry(
block).bottomLeft() + viewport_offset
if bottomLeft.y() > self.edit.viewport().geometry().bottomLeft().y():
break
self.highest_line = line_count
painter.end()
QWidget.paintEvent(self, event)
def __init__(self, *args):
QPlainTextEdit.__init__(self, *args)
self.number_bar = self.NumberBar(self)
self.number_bar.setTextEdit(self)
self.viewport().installEventFilter(self)
def appendPlainText(self, string):
QPlainTextEdit.appendPlainText(self, string)
def append(self, string):
self.appendPlainText(string)
def resizeEvent(self, e):
self.number_bar.setFixedHeight(self.height())
super(LineTextWidget, self).resizeEvent(e)
def setDefaultFont(self, font):
self.document().setDefaultFont(font)
def eventFilter(self, object, event):
# Update the line numbers for all events on the text edit and the viewport.
# This is easier than connecting all necessary singals.
if object is self.viewport():
self.number_bar.update()
return QPlainTextEdit.eventFilter(self, object, event)
def paintEvent(self, event):
QPlainTextEdit.paintEvent(self, event)
# This functions paints a dash-dotted line before hidden blocks.
contents_y = self.verticalScrollBar().value() * 0 + 1
page_bottom = self.viewport().height()
painter = QPainter(self.viewport())
# Iterate over all text blocks in the document.
block = self.firstVisibleBlock()
viewport_offset = self.contentOffset() - QPointF(0, contents_y)
line_count = block.blockNumber() + 1
painter.setFont(self.document().defaultFont())
pen = QPen()
pen.setWidth(1)
pen.setStyle(Qt.DotLine)
pen.setColor(QColor(0, 100, 0))
painter.setBrush(QBrush(QColor(255, 0, 0, 122)))
painter.setPen(pen)
while block.isValid():
invisibleBlock = False
while not block.isVisible() and block.isValid():
invisibleBlock = True
block = block.next()
if block == self.document().lastBlock():
break
# The top left position of the block in the document
topLeft = self.blockBoundingGeometry(
block).topLeft() + viewport_offset
bottomLeft = self.blockBoundingGeometry(
block).bottomLeft() + viewport_offset
# Check if the position of the block is out side of the visible
# area.
if not block.next().isVisible():
rect = QRectF(bottomLeft.x(), bottomLeft.y(), self.viewport(
).width(), topLeft.y() - bottomLeft.y())
# painter.drawRect(rect)
painter.drawLine(bottomLeft.x(), bottomLeft.y(),
self.viewport().width(), bottomLeft.y())
# if bottomLeft.y() > page_bottom:
# break
block = block.next()
class SearchableEditor(QPlainTextEdit):
"""
A QPlainTextEdit with a searchbar that can be displayed/hidden.
"""
def __init__(self, parent=None):
self._panel = QFrame(self)
self._panel.setFrameStyle(QFrame.Box)
self._layout = QBoxLayout(QBoxLayout.LeftToRight)
self._panel.setLayout(self._layout)
self._searchText = QLineEdit('')
self._caseSensitive = QCheckBox('Case Sensitive')
self._useRegex = QCheckBox('Regex')
self._forwardButton = QPushButton('Forward')
self._backwardButton = QPushButton('Backward')
self._replaceButton = QPushButton('Replace by')
self._replaceText = QLineEdit('')
# self._panel.setFocusPolicy(Qt.ClickFocus)
# self._searchText.setFocusPolicy(Qt.StrongFocus)
# self._replaceText.setFocusPolicy(Qt.StrongFocus)
self._layout.addWidget(QLabel('Search'))
self._layout.addWidget(self._searchText)
self._layout.addWidget(self._caseSensitive)
self._layout.addWidget(self._useRegex)
self._layout.addWidget(self._forwardButton)
self._layout.addWidget(self._backwardButton)
self._layout.addWidget(self._replaceButton)
self._layout.addWidget(self._replaceText)
self._layout.addWidget(QLabel('(Esc to exit search)'))
self._layout.addStretch()
self._panel.hide()
self.connect(self._searchText, SIGNAL('enterPressed()'), self.searchText)
self.connect(self._forwardButton, SIGNAL('clicked()'), self.searchText)
self.connect(self._backwardButton, SIGNAL('clicked()'), lambda: self.searchText(backward=True))
self.connect(self._replaceButton, SIGNAL('clicked()'), self.replaceText)
self._lastBackward = False
def resizeEvent(self, e):
self._panel.setGeometry(0, self.viewport().height(), self.viewport().width(), 40)
self.adjustMargins()
def adjustMargins(self):
bottom = 0
if self._panel.isVisible():
bottom = 40
margins = self.viewport().contentsMargins() # error here bad coordinate system
margins.setBottom(bottom)
self.setViewportMargins(margins)
def searchText(self, backward=False, clip=True):
text = self._searchText.text()
pos = self.textCursor().position()
flag = QTextDocument.FindFlag(0)
self._lastBackward = False
if backward:
pos = self.textCursor().selectionStart()
flag = flag | QTextDocument.FindBackward
self._lastBackward = True
if self._caseSensitive.isChecked():
flag = flag | QTextDocument.FindCaseSensitively
if self._useRegex.isChecked():
text = QRegExp(text)
result = self.document().find(text, pos, flag)
if not result.isNull():
self.setTextCursor(result)
self.ensureCursorVisible()
selection = QTextEdit.ExtraSelection
selection.cursor = result
selection.format = QTextCharFormat()
selection.format.setBackground(QBrush(QColor(255, 0, 0, 140)))
self.selections = []
self.selections.append(selection)
# self.setExtraSelections(self.selections)
self.setFocus()
self._searchText.setStyleSheet("background:#5F5;")
else:
cursor = QTextCursor(self.document())
if backward:
cursor.setPosition(self.document().lastBlock().position() + self.document().lastBlock().length() - 1)
else:
cursor.setPosition(0)
self.setTextCursor(cursor)
self._searchText.setStyleSheet("background:#F55;")
if clip:
self.searchText(backward, clip=False)
def replaceText(self):
search = self._searchText.text()
if self._useRegex.isChecked():
search = QRegExp(search)
if self.textCursor().selectedText() == search:
replacement = self._replaceText.text()
if self._useRegex.isChecked():
replacement = QRegExp(replacement)
self.textCursor().insertText(replacement)
self.searchText(self._lastBackward)
def showSearchBar(self):
self._panel.show()
self._searchText.setFocus()
self._searchText.selectAll()
self._searchText.setStyleSheet('')
self.adjustMargins()
def hideSearchBar(self):
self._panel.hide()
self.setFocus()
self.adjustMargins()
def keyPressEvent(self, e):
if (e.key() == Qt.Key_F) and (e.modifiers() & Qt.ControlModifier): # CTRL+F = show
self.showSearchBar()
elif (e.key() == Qt.Key_Escape): # ESC = Hide
self.hideSearchBar()
if not self._panel.isVisible():
e.ignore()
return
elif (e.key() == Qt.Key_Enter or e.key() == Qt.Key_Return): # Enter or Return = search
e.accept()
self.searchText(self._lastBackward)
elif e.key() == Qt.Key_Up or e.key() == Qt.Key_Down: # Down = search downward
e.accept() # Up = search upward
backward = False
if e.key() == Qt.Key_Up:
backward = True
self.searchText(backward=backward)
elif e.key() == Qt.Key_Tab: # Tab = Tab at the panel level
e.accept()
self._panel.keyPressEvent(e)
else:
e.accept()
class CodeEditor(SearchableEditor, LineTextWidget):
"""
A simple SearchableEditor with
- _filename, _shortname, _modifiedAt properties, as well as open, save, saveas and close methods;
- text set or returned by setTabText() or tabText();
- syntax highlighting;
- indentation (automatic and controlled with Key_Left and Key_Reight);
- line wrapping;
- text block management (block delimiter = ##);
- reloading capabilities set or read with setFileReloadPolicy/fileReloadPolicy.
"""
class FileReloadPolicy:
Always = 0
Never = 1
Ask = 2
def __init__(self, parent=None, lineWrap=True):
self._parent = parent
LineTextWidget.__init__(self, parent)
SearchableEditor.__init__(self, parent)
self._filename = None
self._shortname = '[untitled]'
self._tabToolTip = self._shortname
self.setTabStopWidth(30)
self._modifiedAt = None
self._tabText = ''
self._fileReloadPolicy = CodeEditor.FileReloadPolicy.Ask
self._errorSelections = []
self._blockHighlighting = True
self.setAttribute(Qt.WA_DeleteOnClose, True)
self.setStyleSheet("""
CodeEditor
{
color:#000;
background:#FFF;
font-family:Consolas, Courier New,Courier;
font-size:14px;
font-weight:normal;
}
""")
self.connect(self.document(), SIGNAL('modificationChanged(bool)'), self.updateUndoStatus)
self.connect(self, SIGNAL("cursorPositionChanged()"), self.cursorPositionChanged)
self.setLineWrap(lineWrap)
self.setHasUnsavedModifications(False)
def tabText(self):
return self._tabText
def setTabText(self, text):
self._tabText = text
def reloadFile(self):
if self.filename() is None or not (os.path.exists(self.filename()) and os.path.isfile(self.filename())):
raise Exception('CodeEditor.reloadFile: Unable to perform reload since no filename has been defined!')
self.openFile(self.filename())
def fileReloadPolicy(self):
return self._fileReloadPolicy
def setFileReloadPolicy(self, policy):
self._fileReloadPolicy = policy
def resizeEvent(self, e):
LineTextWidget.resizeEvent(self, e)
SearchableEditor.resizeEvent(self, e)
def checkForText(self):
if not self.fileOpenThread.textReady:
self.timer = QTimer(self)
self.timer.setSingleShot(True)
self.timer.setInterval(1000)
self.connect(self.timer, SIGNAL("timeout()"), self.checkForText)
self.timer.start()
return
# self.setPlainText(self.fileOpenThread.text)
def highlightLine(self, line):
block = self.document().findBlockByLineNumber(line - 1)
selection = QTextEdit.ExtraSelection()
cursor = self.textCursor()
cursor.setPosition(block.position() + 1)
cursor.movePosition(QTextCursor.StartOfLine, QTextCursor.KeepAnchor)
cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)
selection.cursor = cursor
format = QTextCharFormat()
format.setBackground(QBrush(QColor(255, 255, 0)))
format.setProperty(QTextFormat.FullWidthSelection, True)
selection.format = format
cursor = self.textCursor()
cursor.setPosition(block.position())
self.setTextCursor(cursor)
self.setErrorSelections([selection])
self.cursorPositionChanged()
self.ensureCursorVisible()
def filename(self):
return self._filename
def setFilename(self, filename):
self._filename = os.path.normpath(str(filename))
(di, self._shortname) = os.path.split(self._filename)
if re.search(".py$", self._filename) or re.search(".pyw$", self._filename):
self.activateHighlighter(True)
else:
self.activateHighlighter(False)
if os.path.exists(self._filename):
self._modifiedAt = os.path.getmtime(self._filename)
else:
self._modifiedAt = 0
def activateHighlighter(self, activate=True):
if activate:
self.highlighter = Python(self.document())
else:
if hasattr(self, "highlighter"):
del self.highlighter
def hasUnsavedModifications(self):
return self._hasUnsavedModifications
def setHasUnsavedModifications(self, hasUnsavedModifications=True):
self._hasUnsavedModifications = hasUnsavedModifications
self.emit(SIGNAL("hasUnsavedModifications(bool)"),
hasUnsavedModifications)
self.document().setModified(hasUnsavedModifications)
def autoIndentCurrentLine(self):
cursor = self.textCursor()
start = cursor.position()
cursor.movePosition(QTextCursor.StartOfLine, QTextCursor.MoveAnchor)
cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)
text = cursor.selection().toPlainText()
lastLine = QTextCursor(cursor)
lastLine.movePosition(QTextCursor.PreviousBlock,
QTextCursor.MoveAnchor)
lastLine.movePosition(QTextCursor.StartOfLine, QTextCursor.MoveAnchor)
lastLine.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)
lastLineText = lastLine.selection().toPlainText()
blankLine = QRegExp("^[\t ]*$")
indents = QRegExp(r"^[ \t]*")
index = indents.indexIn(lastLineText)
cursor.insertText(lastLineText[:indents.matchedLength()] + text)
cursor.setPosition(start + indents.matchedLength())
self.setTextCursor(cursor)
def indentCurrentSelection(self):
cursor = self.textCursor()
start = cursor.selectionStart()
end = cursor.selectionEnd()
cursor.setPosition(start, QTextCursor.MoveAnchor)
cursor.movePosition(QTextCursor.StartOfLine, QTextCursor.MoveAnchor)
start = cursor.selectionStart()
cursor.setPosition(end, QTextCursor.KeepAnchor)
cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)
text = cursor.selection().toPlainText()
text.replace(QRegExp(r"(\n|^)"), "\\1\t")
cursor.insertText(text)
cursor.setPosition(start)
cursor.setPosition(start + len(text), QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
def unindentCurrentSelection(self):
cursor = self.textCursor()
start = cursor.selectionStart()
end = cursor.selectionEnd()
cursor.setPosition(start, QTextCursor.MoveAnchor)
start = cursor.selectionStart()
cursor.movePosition(QTextCursor.StartOfLine, QTextCursor.MoveAnchor)
cursor.setPosition(end, QTextCursor.KeepAnchor)
cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)
text = cursor.selection().toPlainText()
text.replace(QRegExp(r"(\n)[ \t]([^\n]+)"), "\\1\\2")
text.replace(QRegExp(r"^[ \t]([^\n]+)"), "\\1")
cursor.insertText(text)
cursor.setPosition(start)
cursor.setPosition(start + len(text), QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
def gotoNextBlock(self):
block = self.getCurrentBlock()
cursor = self.textCursor()
cursor.setPosition(block.cursor.selectionEnd())
if not cursor.atEnd():
cursor.setPosition(block.cursor.selectionEnd() + 1)
self.setTextCursor(cursor)
def gotoPreviousBlock(self):
block = self.getCurrentBlock()
cursor = self.textCursor()
if cursor.position() == block.cursor.selectionStart() and block.cursor.selectionStart() != 0:
cursor.setPosition(block.cursor.selectionStart() - 1)
block = self.getCurrentBlock()
cursor.setPosition(block.cursor.selectionStart())
else:
cursor.setPosition(block.cursor.selectionStart())
self.setTextCursor(cursor)
def getCurrentBlock(self, delimiter="\n##"):
# Bug Check what happens in all cases
text = unicode(self.document().toPlainText())
blockStart = 0
blockEnd = len(text)
if delimiter != "":
cursorStart = self.textCursor().anchor() # dv
cursorStop = self.textCursor().position()
blockStart = text.rfind(
delimiter, 0, max(0, cursorStart - 1)) + 1 # dv
blockEnd = text.find(delimiter, cursorStop) - 1
if blockStart == -1:
blockStart = 0
if blockStart == blockEnd:
return None
if blockEnd != -1:
blockEnd += 1
selection = QTextEdit.ExtraSelection()
cursor = self.textCursor()
cursor.setPosition(blockStart, QTextCursor.MoveAnchor)
if blockEnd != -1:
cursor.setPosition(blockEnd, QTextCursor.KeepAnchor)
else:
cursor.movePosition(QTextCursor.End, QTextCursor.KeepAnchor)
selection.cursor = cursor
return selection
def cursorPositionChanged(self):
if not self._blockHighlighting:
return
selection = self.getCurrentBlock()
if selection is None:
return
selections = []
selections.extend(self._errorSelections)
self._errorSelections = []
selection = self.getCurrentBlock()
selection.format = QTextCharFormat()
pen = QPen()
# selection.format.setProperty(QTextFormat.OutlinePen,pen)
# selection.format.setBackground(QBrush(QColor(240,240,240)))
# selection.format.setProperty(QTextFormat.FullWidthSelection, True)
selections.append(selection)
self.setExtraSelections(selections)
def setErrorSelections(self, selections):
self._errorSelections = selections
def errorSelections(self):
return self._errorSelections
def getCurrentCodeBlock(self, delimiter="\n##"):
selection = self.getCurrentBlock(delimiter)
block = self.document().findBlock(selection.cursor.selectionStart())
n = block.blockNumber()
return "\n" * n + unicode(selection.cursor.selection().toPlainText()) + u"\n"
def hideBlocks(self, startBlock, endBlock):
block = startBlock.next()
while block.isValid():
self.document().markContentsDirty(block.position(), block.length())
block.setVisible(False)
block.setLineCount(0)
if block == endBlock:
break
block = block.next()
# bugfix: scrollbar value is not updated if unless calling "resize"
# explicitly...
self.resize(self.size() + QSize(1, 1))
self.resize(self.size() + QSize(-1, -1))
self.viewport().update()
def hideCurrentBlock(self):
block = QTextBlock()
selection = self.getCurrentBlock()
startBlock = self.document().findBlock(selection.cursor.selectionStart())
endBlock = self.document().findBlock(selection.cursor.selectionEnd())
self.hideBlocks(startBlock, endBlock)
def contextMenuEvent(self, event):
MyMenu = self.createStandardContextMenu()
hideBlock = MyMenu.addAction("Hide block")
MyMenu.addSeparator()
if self._lineWrap:
lineWrap = MyMenu.addAction("Disable line wrap")
else:
lineWrap = MyMenu.addAction("Enable line wrap")
self.connect(lineWrap, SIGNAL("triggered()"), self.toggleLineWrap)
self.connect(hideBlock, SIGNAL('triggered()'), self.hideCurrentBlock)
MyMenu.exec_(self.cursor().pos())
def toggleLineWrap(self):
self.setLineWrap(not self._lineWrap)
def openFile(self, filename):
if os.path.isfile(filename):
try:
file = open(filename, 'r')
text = file.read()
except IOError:
raise
self.setPlainText(text)
self.setFilename(filename)
self.setHasUnsavedModifications(False)
else:
raise IOError("Invalid path: %s" % filename)
def save(self):
"""
Saves the editor content in the current file by calling saveAs(self._filename).
"""
return self.saveAs(self._filename)
def saveAs(self, filename=None):
"""
Saves the editor content in a file with the passed filename or with a name prompted on the fly.
"""
if filename is None: # prompt user for a new file name with a proposed directory and name
directory = self._filename # proposing first the existing filename
if directory is None or not os.path.exists(directory):
try:
directory = self._parent.workingDirectory() # or the parent working directory
except:
directory = os.getcwd() # or the os current directory
filename = str(QFileDialog.getSaveFileName(caption='Save file as',
filter="Python(*.py *.pyw)", directory=directory))
if filename != '':
try:
file = open(filename, 'w')
file.write(unicode(self.document().toPlainText()))
file.close()
self.setHasUnsavedModifications(False)
self._modifiedAt = os.path.getmtime(filename) + 1
self._filename = filename
di, self._shortname = os.path.split(filename)
except:
raise Error('Could not save file %s' % filename)
finally:
file.close()
return filename
def updateFileModificationDate(self):
self._modifiedAt = os.path.getmtime(self.filename()) + 1
def fileHasChangedOnDisk(self):
if self.filename() is not None:
if not os.path.exists(self.filename()):
return True
elif os.path.getmtime(self.filename()) > self._modifiedAt:
return True
return False
def activateBlockHighlighting(self, activate=False):
self._blockHighlighting = activate
def updateUndoStatus(self, status):
self.setHasUnsavedModifications(status)
def keyPressEvent(self, e):
SearchableEditor.keyPressEvent(self, e)
if e.isAccepted():
return
if (e.key() == Qt.Key_Up or e.key() == Qt.Key_Down) and e.modifiers() & Qt.ControlModifier:
if e.key() == Qt.Key_Up:
self.gotoPreviousBlock()
else:
self.gotoNextBlock()
e.accept()
return
if (e.key() == Qt.Key_Left or e.key() == Qt.Key_Right) and e.modifiers() & Qt.ControlModifier:
if e.key() == Qt.Key_Left:
self.unindentCurrentSelection()
else:
self.indentCurrentSelection()
e.accept()
return
LineTextWidget.keyPressEvent(self, e)
if e.key() == Qt.Key_Return or e.key() == Qt.Key_Enter:
self.autoIndentCurrentLine()
def setLineWrap(self, state):
self._lineWrap = state
if state:
self.setLineWrapMode(QPlainTextEdit.WidgetWidth)
else:
self.setLineWrapMode(QPlainTextEdit.NoWrap)
|
import datetime
import requests
from multiprocessing import Pool
import random
import json
from bs4 import BeautifulSoup
from scraper import get_data
import billboard
HEADERS = {
'User-Agent': 'yt.py'
}
def downloadHTML(url, timeout=25):
"""Downloads and returns the webpage with the given URL.
Returns an empty string on failure.
"""
assert url.startswith('http')
req = requests.get(url, headers=HEADERS, timeout=timeout)
req.encoding = 'utf-8'
if req.status_code == 200:
return req.text
else:
print(req.status_code)
return ''
def scrape_vids(params):
'''
Returns list of artist, title tuple scraped from the given
year and page number
'''
year, page = params
print("Scraping year {} page {}".format(year, page))
url = 'http://imvdb.com/calendar/{}?page={}'.format(year, page)
html = downloadHTML(url)
soup = BeautifulSoup(html, 'html.parser')
table = soup.find('table', {'class', 'imvdbTable'})
rows = table.find_all('tr')
out = []
for row in rows:
title = row.find('h3').find('a').contents[0].strip()[:-6].strip()
artist = row.find('h4').find('a').contents[0].strip()
out.append((artist, title))
return out
def get_mvs_by_year(year, limit=120):
# returns an array of (artist, title)
# of music videos released in the given year.
url = 'http://imvdb.com/calendar/{}?page=1'.format(year)
html = downloadHTML(url)
soup = BeautifulSoup(html, 'html.parser')
# print(html)
# with open('blah.html', 'w') as f:
# f.write(str(soup))
num_vids = soup.find('h3', {'class', 'rack_title'}).contents[0].split(' ')[-1]
num_vids = int(num_vids[1:-1].replace(',', ''))
table = soup.find('table', {'class', 'imvdbTable'})
rows = table.find_all('tr')
# num_pages = num_vids // len(rows) + 1
num_pages = 10
params = []
for i in range(1, num_pages + 1):
params.append((year, i))
p = Pool()
batches = p.map(scrape_vids, params)
vids = []
for batch in batches:
vids.extend(batch)
random.shuffle(vids)
return vids[:limit]
def fetch_mvs_data(vids, year):
# given a list of vids, fetch needed data
# skip songs that are in the hot-100 year-end chart of billboard
print(vids)
print("Fetching video data")
chart = billboard.ChartData(name='hot-100-songs', date=str(year), yearEnd=True)
billboard_titles = set([e.title.lower() for e in chart])
# params is an array of tuple: (artist, title, rank, year)
params = [(vid[0], vid[1], 0, year) for vid in vids if vid[1] not in billboard_titles]
p = Pool()
return p.map(get_data, params)
if __name__ == '__main__':
years = range(2010, 2018)
for year in years:
print("Processing year {}".format(year))
vids = get_mvs_by_year(year)
data = fetch_mvs_data(vids, year)
with open('{}_non_billboard_data.json'.format(year), 'w') as f:
f.write(json.dumps(data))
|
import ConfigParser
class Configuration(ConfigParser.ConfigParser):
def __init__(self):
self.add_section('extensions')
self.set('extensions', 'button1', '100')
self.set('extensions', 'button2', '101')
with open('example.cfg', 'wb') as configfile:
self.write(configfile)
Configuration() |
from colorama import Fore, Style, init as colorama_init
colorama_init()
COLOR_DICT = {
"neutral": Style.RESET_ALL,
"match": Fore.YELLOW + Style.BRIGHT,
"diff@": Fore.CYAN + Style.BRIGHT,
"diff+": Fore.GREEN,
"diff-": Fore.RED,
"message": Fore.WHITE + Style.BRIGHT,
}
SEPARATOR = "=" * 80
class Color(object):
def __init__(self, color_dict):
self.color_dict = color_dict
def get(self, name):
return self.color_dict.get(name, "")
def __getitem__(self, name):
"""
color[<color_code>] returns a function that colors
given text to that color
"""
def apply_color(content):
return self.get(name) + content + self.get("neutral")
return apply_color
def get_color(color=False):
color_dict = {}
if color:
color_dict = COLOR_DICT
return Color(color_dict)
|
import base64
from hashlib import md5
from django.db import models
from django.core.validators import URLValidator
from shortz import settings
class URLEntry(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
url = models.URLField(validators=[URLValidator()])
code = models.CharField(primary_key=True, max_length=200, unique=True, blank=True)
shortened_url = models.URLField(validators=[URLValidator()], blank=True)
class Meta:
verbose_name_plural = "URL entries"
verbose_name = "URL entry"
def __str__(self):
return "URLEntry: code={code}, full_url={full_url}".format(code=self.code, full_url=self.url)
def _shortcode(self, url):
"""Based on: https://pypi.org/project/url_shortener/"""
digest = md5(url.encode('utf-8')).digest()
b64enc = base64.b64encode(digest)
return b64enc.replace(b'=',b'').replace(b'/', b'_')
def save(self, *args, **kwargs):
shortcode = self._shortcode(self.url).decode('utf-8')
self.code = shortcode
self.shortened_url = '{}{}'.format(settings.DEV_HOST, shortcode) # TODO: replace settings.DEV_HOST
super().save(*args, **kwargs)
|
#Comando para destruir todas as QoS e todas as Filas
sudo ovs-vsctl -- --all destroy QoS -- --all destroy Queue
#Comando para rodar o ryu
sudo ryu-manager ryu.app.ofctl_rest ryu.app.simple_swit_13_mod ryu.app.rest_conf_switch ryu.app.rest_qos ~/ryu/Bruno/MeuApp.py
sudo ryu-manager ryu.app.ofctl_rest ryu.app.simple_switch_13_mod ryu.app.rest_conf_switch ryu.app.rest_qos ~/ryu/Bruno/MeuApp.py
#Como ja foi importado os modulos dentro do MeuApp
sudo ryu-manager ~/ryu/Bruno/MeuApp.py --observe-links
#Instalando o networkx
https://networkx.github.io/documentation/networkx-1.1/install.html
|
import itertools
from pysat.solvers import Glucose3
from pysat.card import CardEnc, EncType
ids = ['314923301', '206693665']
def solve_problem(inputs):
solutionDict = {}
nPolice, nMedics = inputs["police"], inputs["medics"]
observations = inputs["observations"]
b, nRows, nCols = len(observations), len(observations[0]), len(observations[0][0])
clauses, atomsToIndex = calcAllClauses(b, nRows, nCols, nPolice, nMedics, observations)
for query in inputs["queries"]:
querynum = atomsToIndex[(query[1], query[0], query[2])]
solver = Glucose3()
for clause in clauses:
solver.add_clause(clause)
withquery = solver.solve(assumptions=[querynum])
withoutquery = solver.solve(assumptions=[-querynum])
if withquery and withoutquery:
solutionDict[query] = "?"
elif withquery:
solutionDict[query] = "T"
else:
solutionDict[query] = "F"
return solutionDict
def calcAllClauses(b, nRows, nCols, nPolice, nMedics, observations):
totalclauses = []
atomsToIndex, len1 = AtomsToIndexMatching(b, nRows, nCols, nPolice, nMedics)
allActionsToIndex, _ = actionToIndexMatching(b, nPolice, nMedics, len1, nRows, nCols, atomsToIndex)
initialClauses = BuildInitialClauses(b, nRows, nCols, atomsToIndex, observations)
totalclauses += initialClauses
actionClauses = buildClausesAndEffects(b, nRows, nCols, nPolice, nMedics, allActionsToIndex, atomsToIndex)
totalclauses += actionClauses
return totalclauses, atomsToIndex
def AllPossibleInfectionIndices(nRows, nCols):
indices = []
for row in range(nRows):
for col in range(nCols):
for neighbor in [(row - 1, col), (row + 1, col), (row, col - 1), (row, col + 1)]:
if 0 <= neighbor[0] <= nRows - 1 and 0 <= neighbor[1] <= nCols - 1:
indices.append(((row, col), neighbor))
return indices
def buildMapIndices(nRows, nCols):
indices = []
for i in range(nRows):
for j in range(nCols):
indices.append((i, j))
return indices
def AtomsToIndexMatching(b, nRows, nCols, nPolice, nMedics):
atomsToIndex = {}
count = 1
states = ["U", "H", "S", "I", "Q"]
for roundT in range(b):
for row in range(nRows):
for col in range(nCols):
for state in states:
atom = (roundT, (row, col), state)
atomsToIndex[atom] = count
count += 1
for roundT in range(b - 1):
for p in range(nPolice):
atomsToIndex[("Q", p, roundT)] = count
count += 1
for m in range(nMedics):
atomsToIndex[("V", m, roundT)] = count
count += 1
return atomsToIndex, count
def actionToIndexMatching(b, nPolice, nMedics, len3, nRows, nCols, atomsToIndex):
count = len3 + 1
actionToIndex = {}
mapIndicesList = buildMapIndices(nRows, nCols)
DeseaseSpread = AllPossibleInfectionIndices(nRows, nCols)
for roundT in range(b - 1):
for row in range(nRows):
for col in range(nCols):
for p in range(nPolice):
actionToIndex[("Q", p, roundT, (row, col))] = count
count += 1
for m in range(nMedics):
actionToIndex[("V", m, roundT, (row, col))] = count
count += 1
for roundT in range(b - 1):
for pair in DeseaseSpread:
actionToIndex[(roundT, pair)] = count
count += 1
for atom, atomIdx in atomsToIndex.items():
actionToIndex[atomIdx] = count
count += 1
for roundT in range(2, b):
for idx in mapIndicesList:
actionToIndex[("heal", roundT, idx)] = count
count += 1
for roundT in range(1, b):
for idx in mapIndicesList:
actionToIndex[("exitQ", roundT, idx)] = count
count += 1
return actionToIndex, count
def BuildInitialClauses(b, nRows, nCols, atomsToIndex, observations):
states = ["U", "H", "S", "I", "Q"]
initialConstraints = []
for row in range(nRows):
for col in range(nCols):
for state in ["Q", "I"]:
idx = atomsToIndex[(0, (row, col), state)]
initialConstraints.append([-idx])
for roundT in range(b):
for row in range(nRows):
for col in range(nCols):
cur_state = observations[roundT][row][col]
if cur_state != "?":
for state in states:
curIdx = atomsToIndex[(roundT, (row, col), state)]
if cur_state == state:
initialConstraints.append([curIdx])
else:
initialConstraints.append([-curIdx])
atleastOne = [atomsToIndex[(roundT, (row, col), x)] for x in states]
else:
if roundT == 0:
atleastOne = [atomsToIndex[(roundT, (row, col), x)] for x in ["U", "H", "S"]]
else:
atleastOne = [atomsToIndex[(roundT, (row, col), x)] for x in ["U", "H", "S", "I", "Q"]]
onlyOne = negateLinearConstraints(atleastOne)
initialConstraints += [atleastOne]
initialConstraints += onlyOne
return initialConstraints
def negateLinearConstraints(actionsList: list):
return list(list(x) for x in list(itertools.combinations([-x for x in actionsList], 2)))
def buildClausesAndEffects(b, nRows, nCols, nPolice, nMedics, actionToIndex, atomsToIndex):
actionClauses = []
mapIndicesList = buildMapIndices(nRows, nCols)
DeseaseSpread = AllPossibleInfectionIndices(nRows, nCols)
actionEffectsAtPreviousT = {}
for roundT in range(b - 1):
actionEffectsAtT = {}
for idx in mapIndicesList:
for p in range(nPolice):
actionIndex = actionToIndex[("Q", p, roundT, idx)]
curClauses, curPre, curAdd, curDel = modelAgentQAction(b, actionIndex,
roundT, idx, atomsToIndex)
actionEffectsAtT[actionIndex] = (curPre, curAdd, curDel)
actionClauses += curClauses
for m in range(nMedics):
actionIndex = actionToIndex[("V", m, roundT, idx)]
curClauses, curPre, curAdd, curDel = modelAgentVAction(b, actionIndex,
roundT, idx, atomsToIndex)
actionEffectsAtT[actionIndex] = (curPre, curAdd, curDel)
actionClauses += curClauses
for idx in mapIndicesList: # at most 1 quarantine and at most 1 vaccination in each place in the map
qvars = [actionToIndex[("Q", p, roundT, idx)] for p in range(nPolice)]
ivars = [actionToIndex[("V", m, roundT, idx)] for m in range(nMedics)]
actionClauses += CardEnc.atmost(lits=qvars, encoding=EncType.pairwise, bound=1).clauses
actionClauses += CardEnc.atmost(lits=ivars, encoding=EncType.pairwise, bound=1).clauses
for p in range(nPolice): # each police can be used at most once each turn
qvars = [actionToIndex[("Q", p, roundT, idx)] for idx in mapIndicesList]
actionClauses += CardEnc.atmost(lits=qvars, encoding=EncType.pairwise, bound=1).clauses
for m in range(nMedics): # each medic can be used at most once each turn
ivars = [actionToIndex[("V", m, roundT, idx)] for idx in mapIndicesList]
actionClauses += CardEnc.atmost(lits=ivars, encoding=EncType.pairwise, bound=1).clauses
for p in range(nPolice):
actionClauses += [[-actionToIndex[("Q", p, roundT, idx)], atomsToIndex[("Q", p, roundT)]] for idx in
mapIndicesList]
actionClauses += [
[-atomsToIndex[("Q", p, roundT)]] + [actionToIndex[("Q", p, roundT, idx)] for idx in mapIndicesList]]
for m in range(nMedics):
actionClauses += [[-actionToIndex[("V", m, roundT, idx)], atomsToIndex[("V", m, roundT)]] for idx in
mapIndicesList]
actionClauses += [
[-atomsToIndex[("V", m, roundT)]] + [actionToIndex[("V", m, roundT, idx)] for idx in mapIndicesList]]
for pair in DeseaseSpread:
actionIndex = actionToIndex[(roundT, pair)]
curClauses, curPre, curAdd, curDel = ModelInfection(b, roundT, pair, atomsToIndex, actionIndex)
actionEffectsAtT[actionIndex] = (curPre, curAdd, curDel)
actionClauses += curClauses
for atom, atomIdx in atomsToIndex.items():
if atom[0] == roundT:
add = atomsToIndex[(atom[0] + 1, atom[1], atom[2])]
cur_state = atom[2]
actionIdx = actionToIndex[atomIdx]
actionEffectsAtT[actionToIndex[atomIdx]] = ([atomIdx], [add], [])
actionClauses.append([-actionIdx, atomIdx])
if cur_state == "H":
required = [atomsToIndex[("V", m, roundT)] for m in range(nMedics)]
for req in required:
actionClauses.append([req, -actionIdx])
row, col = atom[1][0], atom[1][1]
for neighbor in [(row - 1, col), (row + 1, col), (row, col - 1), (row, col + 1)]:
if 0 <= neighbor[0] <= nRows - 1 and 0 <= neighbor[1] <= nCols - 1:
actionClauses.append([-actionIdx, atomsToIndex[(roundT + 1, neighbor, "Q")],
-atomsToIndex[(roundT, neighbor, "S")]])
if cur_state == "Q":
if roundT >= 1:
preC = [atomsToIndex[(roundT, atom[1], "Q")], atomsToIndex[(roundT - 1, atom[1], "Q")]]
clause = [-actionIdx]
for pre in preC:
clause.append(-pre)
actionClauses.append(clause)
if cur_state == "S":
required = [atomsToIndex[("Q", p, roundT)] for p in range(nPolice)]
for req in required:
actionClauses.append([req, -actionIdx])
if roundT >= 2:
preC = [atomsToIndex[(roundT, atom[1], "S")], atomsToIndex[(roundT - 1, atom[1], "S")],
atomsToIndex[(roundT - 2, atom[1], "S")]]
clause = [-actionIdx]
for pre in preC:
clause.append(-pre)
actionClauses.append(clause)
if roundT >= 2:
for idx in mapIndicesList:
actionIndex = actionToIndex[("heal", roundT, idx)]
curClauses, curPre, curAdd, curDel = ModelHealing(roundT, idx, atomsToIndex, actionIndex)
actionEffectsAtT[actionIndex] = (curPre, curAdd, curDel)
actionClauses += curClauses
if roundT >= 1:
for idx in mapIndicesList:
actionIndex = actionToIndex[("exitQ", roundT, idx)]
curClauses, curPre, curAdd, curDel = ModelExitQ(roundT, idx, atomsToIndex, actionIndex)
actionEffectsAtT[actionIndex] = (curPre, curAdd, curDel)
actionClauses += curClauses
interferClauses = BuildInterferClauses(actionEffectsAtT)
actionClauses += interferClauses
if roundT >= 1:
factAchieveClauses = BuildFactAchieveClauses(actionEffectsAtPreviousT, atomsToIndex, roundT)
actionClauses += factAchieveClauses
actionEffectsAtPreviousT = actionEffectsAtT
if actionEffectsAtPreviousT != {}:
factAchieveClauses = BuildFactAchieveClauses(actionEffectsAtPreviousT, atomsToIndex, b - 1)
actionClauses += factAchieveClauses
return actionClauses
def ModelExitQ(roundT, idx, atomsToIndex, actionIdx):
clauses, preC, addE, delE = [], [], [], []
preC += [atomsToIndex[(roundT, idx, "Q")], atomsToIndex[(roundT - 1, idx, "Q")]]
addE.append(atomsToIndex[(roundT + 1, idx, "H")])
delE += [atomsToIndex[(roundT + 1, idx, "Q")], atomsToIndex[(roundT + 1, idx, "S")],
atomsToIndex[(roundT + 1, idx, "I")], atomsToIndex[(roundT + 1, idx, "U")]]
for pre in preC:
clauses.append([-actionIdx, pre])
return clauses, preC, addE, delE
def ModelHealing(roundT, idx, atomsToIndex, actionIdx):
clauses, preC, addE, delE = [], [], [], []
preC += [atomsToIndex[(roundT, idx, "S")], atomsToIndex[(roundT - 1, idx, "S")],
atomsToIndex[(roundT - 2, idx, "S")]]
addE.append(atomsToIndex[(roundT + 1, idx, "H")])
delE += [atomsToIndex[(roundT + 1, idx, "S")], atomsToIndex[(roundT + 1, idx, "Q")],
atomsToIndex[(roundT + 1, idx, "I")], atomsToIndex[(roundT + 1, idx, "U")]]
for pre in preC:
clauses.append([-actionIdx, pre])
return clauses, preC, addE, delE
def BuildFactAchieveClauses(actionEffectsAtPreviousT, atomsToIndex, roundT):
clauses = []
for atom, atomIdx in atomsToIndex.items():
clause = [-atomIdx]
if atom[0] == roundT:
for actionIdx, actionEffects in actionEffectsAtPreviousT.items():
if atomIdx in actionEffects[1]:
clause.append(actionIdx)
clauses.append(clause)
return clauses
def BuildInterferClauses(actionEffectsAtT):
clauses = []
for actionidx1, action1Effects in actionEffectsAtT.items():
for actionidx2, action2Effects in actionEffectsAtT.items():
interfering = AreInterfering(action1Effects, action2Effects)
if actionidx1 != actionidx2 and interfering:
clauses.append([-actionidx1, -actionidx2])
return clauses
def AreInterfering(action1Effects, action2Effects):
set1 = set(set(action1Effects[2]) & (set(action2Effects[0]) | set(action2Effects[1])))
set2 = set(set(action2Effects[2]) & (set(action1Effects[0]) | set(action1Effects[1])))
return set1 != set() or set2 != set()
def ModelInfection(b, roundT, pair, atomsToIndex, actionIndex):
clauses = []
preConditions, addEffects, deleteEffects = [], [], []
preConditions += [atomsToIndex[(roundT, pair[1], "H")], atomsToIndex[(roundT, pair[0], "S")],
-atomsToIndex[(roundT + 1, pair[0], "Q")]]
# [atomsToIndex[(roundT + 1, neighbor, "Q")], atomsToIndex[(roundT, neighbor, "S")]]
if roundT + 1 <= b - 1:
deleteEffects += [atomsToIndex[(roundT + 1, pair[1], "H")], atomsToIndex[(roundT + 1, pair[1], "Q")],
atomsToIndex[(roundT + 1, pair[1], "I")],
atomsToIndex[(roundT + 1, pair[1], "U")]]
addEffects += [atomsToIndex[(roundT + 1, pair[1], "S")]]
for condition in preConditions:
clauses.append([-actionIndex, condition])
return clauses, preConditions, addEffects, deleteEffects
def modelAgentQAction(b, actionIdx, roundT, indexQ, atomsToIndex):
actionClauses, deleteEffects, addEffects, preConditions = [], [], [], []
preConditions.append(atomsToIndex[(roundT, indexQ, "S")])
if roundT + 1 <= b - 1:
addEffects.append(atomsToIndex[(roundT + 1, indexQ, "Q")])
deleteEffects += [atomsToIndex[(roundT + 1, indexQ, "S")], atomsToIndex[(roundT + 1, indexQ, "H")],
atomsToIndex[(roundT + 1, indexQ, "I")], atomsToIndex[(roundT + 1, indexQ, "U")]]
for condition in preConditions:
actionClauses.append([-actionIdx, condition])
return actionClauses, preConditions, addEffects, deleteEffects
def modelAgentVAction(b, actionIdx, roundT, indexV, atomsToIndex):
actionClauses, deleteEffects, addEffects, preConditions = [], [], [], []
preConditions.append(atomsToIndex[(roundT, indexV, "H")])
for t in range(1, b):
if roundT + t <= b - 1:
addEffects.append(atomsToIndex[(roundT + t, indexV, "I")])
deleteEffects += [atomsToIndex[(roundT + t, indexV, "H")], atomsToIndex[(roundT + t, indexV, "Q")],
atomsToIndex[(roundT + t, indexV, "H")], atomsToIndex[(roundT + t, indexV, "U")]]
for condition in preConditions:
actionClauses.append([-actionIdx, condition])
return actionClauses, preConditions, addEffects, deleteEffects
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, RetrieveAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from django.contrib.auth.models import User
from .models import Brand, Person, Location, Data
from .serializers import BrandSerializer, PersonSerializer, \
LocationSerializer, DataSerializer, UserSerializer
class BrandList(ListCreateAPIView):
queryset = Brand.objects.all()
serializer_class = BrandSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (JSONWebTokenAuthentication,)
class BrandDetail(RetrieveUpdateDestroyAPIView):
queryset = Brand.objects.all()
serializer_class = BrandSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (JSONWebTokenAuthentication,)
class PersonList(ListCreateAPIView):
queryset = Person.objects.all()
serializer_class = PersonSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (JSONWebTokenAuthentication,)
class PersonDetail(RetrieveUpdateDestroyAPIView):
queryset = Person.objects.all()
serializer_class = PersonSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (JSONWebTokenAuthentication,)
class LocationList(ListCreateAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (JSONWebTokenAuthentication,)
class LocationDetail(RetrieveUpdateDestroyAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (JSONWebTokenAuthentication,)
class DataList(ListCreateAPIView):
queryset = Data.objects.all()
serializer_class = DataSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (JSONWebTokenAuthentication,)
class DataDetail(RetrieveUpdateDestroyAPIView):
queryset = Data.objects.all()
serializer_class = DataSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (JSONWebTokenAuthentication,)
class UserView(RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (IsAuthenticated, )
authentication_classes = (JSONWebTokenAuthentication, ) |
'''Convert pretrained Darknet weights into YOLOv2.
Darknet19 model download from: https://drive.google.com/file/d/0B4pXCfnYmG1WRG52enNpcV80aDg/view
'''
import torch
import numpy as np
import torch.nn as nn
from darknet import Darknet
net = Darknet()
darknet = np.load('./model/darknet19.weights.npz')
# layer1
conv_ids = [0,4,8,11,14,18,21,24,28,31,34,37,40]
for i,conv_id in enumerate(conv_ids):
net.layer1[conv_id].weight.data = torch.from_numpy(darknet['%d-convolutional/kernel:0' % i].transpose((3,2,0,1)))
net.layer1[conv_id].bias.data = torch.from_numpy(darknet['%d-convolutional/biases:0' % i])
bn_id = conv_id + 1
net.layer1[bn_id].weight.data = torch.from_numpy(darknet['%d-convolutional/gamma:0' % i])
net.layer1[bn_id].running_mean = torch.from_numpy(darknet['%d-convolutional/moving_mean:0' % i])
net.layer1[bn_id].running_var = torch.from_numpy(darknet['%d-convolutional/moving_variance:0' % i])
# layer2
conv_ids = [1,4,7,10,13]
for i,conv_id in enumerate(conv_ids):
net.layer2[conv_id].weight.data = torch.from_numpy(darknet['%d-convolutional/kernel:0' % (13+i)].transpose((3,2,0,1)))
net.layer2[conv_id].bias.data = torch.from_numpy(darknet['%d-convolutional/biases:0' % (13+i)])
bn_id = conv_id + 1
net.layer2[bn_id].weight.data = torch.from_numpy(darknet['%d-convolutional/gamma:0' % (13+i)])
net.layer2[bn_id].running_mean = torch.from_numpy(darknet['%d-convolutional/moving_mean:0' % (13+i)])
net.layer2[bn_id].running_var = torch.from_numpy(darknet['%d-convolutional/moving_variance:0' % (13+i)])
torch.save(net.state_dict(), './model/darknet.pth')
|
#!/usr/bin/env python2
import sys
from finder.finder import XSSFinder
if __name__ == '__main__':
finder = XSSFinder('https://xss-game.appspot.com/level1/frame')
finder.scan() |
#!/usr/bin/python3
import http.server
PORT = 8888
server_address = ("", PORT)
server = http.server.HTTPServer
handler = http.server.CGIHTTPRequestHandler
handler.cgi_directories = ["/web"]
try:
print("Serveur actif sur le port :", PORT)
httpd = server(server_address, handler)
httpd.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
server.close() |
import math
from unittest import TestCase, main
import numpy as np
from ... import LinearProgram
class TestToSEF(TestCase):
def test_to_sef(self) -> None:
A = np.array([[1, 5, 3], [2, -1, 2], [1, 2, -1]])
b = np.array([5, 4, 2])
c = np.array([1, -2, 4])
z = 0
p = LinearProgram(A, b, c, z, "min", [">=", "<=", "="], [3])
p.to_sef(in_place=True)
self.assertTrue(
np.allclose(
p.A,
np.array(
[[1, 5, 3, -3, -1, 0], [2, -1, 2, -2, 0, 1], [1, 2, -1, 1, 0, 0]]
),
),
"Should compute correct coefficient matrix in SEF.",
)
self.assertTrue(
np.allclose(p.b, np.array([5, 4, 2])),
"Should compute correct constraint values in SEF.",
)
self.assertTrue(
np.allclose(p.c, np.array([-1, 2, -4, 4, 0, 0])),
"Should compute correct coefficient vector in SEF.",
)
self.assertTrue(math.isclose(p.z, 0), "Should compute correct constant in SEF.")
self.assertEqual(
p.inequalities,
["="] * len(b),
"Should compute correct inequalities in SEF.",
)
self.assertEqual(
p.objective, "max", "Should be maximizing objective function in SEF."
)
self.assertEqual(p.free_variables, [], "Should have no free variables.")
self.assertEqual(p.negative_variables, [], "Should have no negative variables.")
A = np.array([[1, 2, 0, 1], [1, -2, 16, 0], [8, 2, -3, 1]])
b = np.array([10, 14, -2])
c = np.array([-2, 3, -4, 1])
z = 0
p = LinearProgram(A, b, c, z, "max", ["<=", "<=", "="], [2])
p.to_sef(in_place=True)
self.assertTrue(
np.allclose(
p.A,
np.array(
[
[1, 2, -2, 0, 1, 1, 0],
[1, -2, 2, 16, 0, 0, 1],
[8, 2, -2, -3, 1, 0, 0],
]
),
),
"Should compute correct coefficient matrix in SEF.",
)
self.assertTrue(
np.allclose(p.b, np.array([10, 14, -2])),
"Should compute correct constraint values in SEF.",
)
self.assertTrue(
np.allclose(p.c, np.array([-2, 3, -3, -4, 1, 0, 0])),
"Should compute correct coefficient vector in SEF.",
)
self.assertTrue(math.isclose(p.z, 0), "Should compute correct constant in SEF.")
self.assertEqual(
p.inequalities,
["="] * len(b),
"Should compute correct inequalities in SEF.",
)
self.assertEqual(
p.objective, "max", "Should be maximizing objective function in SEF."
)
self.assertEqual(p.free_variables, [], "Should have no free variables.")
self.assertEqual(p.negative_variables, [], "Should have no negative variables.")
A = np.array([[1, 2, 0, 1], [1, -2, 16, 0], [8, 2, -3, 1]])
b = np.array([10, 14, -2])
c = np.array([-2, 3, -4, 1])
z = 0
p = LinearProgram(A, b, c, z, "max", ["<=", "<=", "="], [2], [3, 4])
p.to_sef(in_place=True)
self.assertTrue(
np.allclose(
p.A,
np.array(
[
[1, 2, -2, 0, -1, 1, 0],
[1, -2, 2, -16, 0, 0, 1],
[8, 2, -2, 3, -1, 0, 0],
]
),
),
"Should compute correct coefficient matrix in SEF.",
)
self.assertTrue(
np.allclose(p.b, np.array([10, 14, -2])),
"Should compute correct constraint values in SEF.",
)
self.assertTrue(
np.allclose(p.c, np.array([-2, 3, -3, 4, -1, 0, 0])),
"Should compute correct coefficient vector in SEF.",
)
self.assertTrue(math.isclose(p.z, 0), "Should compute correct constant in SEF.")
self.assertEqual(
p.inequalities,
["="] * len(b),
"Should compute correct inequalities in SEF.",
)
self.assertEqual(
p.objective, "max", "Should be maximizing objective function in SEF."
)
self.assertEqual(p.free_variables, [], "Should have no free variables.")
self.assertEqual(p.negative_variables, [], "Should have no negative variables.")
def test_random_order_free_variables(self) -> None:
A = np.array([[1, 2, 0, 1], [1, -2, 16, 0], [8, 2, -3, 1]])
b = np.array([10, 14, -2])
c = np.array([-2, 3, -4, 1])
z = 0
p = LinearProgram(A, b, c, z, "max", free_variables=[2, 1])
p.to_sef(in_place=True)
self.assertTrue(
np.allclose(
p.A,
np.array(
[[1, -1, 2, -2, 0, 1], [1, -1, -2, 2, 16, 0], [8, -8, 2, -2, -3, 1]]
),
),
"Should compute correct coefficient matrix in SEF.",
)
self.assertTrue(
np.allclose(p.b, np.array([10, 14, -2])),
"Should compute correct constraint values in SEF.",
)
self.assertTrue(
np.allclose(p.c, np.array([-2, 2, 3, -3, -4, 1])),
"Should compute correct coefficient vector in SEF.",
)
self.assertTrue(math.isclose(p.z, 0), "Should compute correct constant in SEF.")
self.assertEqual(
p.inequalities,
["="] * len(b),
"Should compute correct inequalities in SEF.",
)
self.assertEqual(
p.objective, "max", "Should be maximizing objective function in SEF."
)
self.assertEqual(p.free_variables, [], "Should have no free variables.")
self.assertEqual(p.negative_variables, [], "Should have no negative variables.")
def test_random_order_negative_variables(self) -> None:
A = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
b = np.array([10, 14, -2])
c = np.array([2, 3, 4])
z = 0
p = LinearProgram(
A, b, c, z, "max", free_variables=[2], negative_variables=[3, 1]
)
p.to_sef(in_place=True)
self.assertTrue(
np.allclose(p.A, np.array([[0, 0, 0, -1], [-1, 0, 0, 0], [0, 1, -1, 0]])),
"Should compute correct coefficient matrix in SEF.",
)
self.assertTrue(
np.allclose(p.b, np.array([10, 14, -2])),
"Should compute correct constraint values in SEF.",
)
self.assertTrue(
np.allclose(p.c, np.array([-2, 3, -3, -4])),
"Should compute correct coefficient vector in SEF.",
)
self.assertTrue(math.isclose(p.z, 0), "Should compute correct constant in SEF.")
self.assertEqual(
p.inequalities,
["="] * len(b),
"Should compute correct inequalities in SEF.",
)
self.assertEqual(
p.objective, "max", "Should be maximizing objective function in SEF."
)
self.assertEqual(p.free_variables, [], "Should have no free variables.")
self.assertEqual(p.negative_variables, [], "Should have no negative variables.")
if __name__ == "__main__":
main()
|
from lib.saga_service.filesystem_service import FilesystemService
import time
import saga
class JobSubmissionService:
"""
Service for submitting jobs into the GRID.
Supports submitting jobs over ssh.
"""
def __init__(self, saga=saga, saga_job=saga.job,
filesystem=FilesystemService()):
"""
Creates new instance of JobSubmissionService.
:param saga: Saga module.
:param saga_job: Saga job module.
:param filesystem: Service for performing filesystem operations.
"""
self._saga = saga
self._saga_job = saga_job
self._filesystem = filesystem
self._job_output = None
def submit_job(self, command, arguments, input_file,
output_file, connection_string):
"""
Submits a job to the GRID.
If input file is specified it's set as job's argument.
Remote input file is copied to staging directory.
If not output file is specified, method returns job output.
If output file is a remote file, it'll be copied.
Temporary files (input/output) are staged to /tmp directory.
:param command: Command to invoke.
:param arguments: Command's arguments.
:param input_file: Optional command's input file.
:param output_file: Optional command's output file.
:param connetion_string: String specifying how to connect to remote host.
"""
self._connection_string = connection_string
self._prepare_input_output(input_file, output_file)
self._run_job(command, arguments)
self._handle_output(output_file)
return self._job_output
def _prepare_input_output(self, input_file, output_file):
self._prepare_input_file(input_file)
self._set_output_file(output_file)
def _run_job(self, command, arguments):
job = self._create_job(command, arguments)
job.run()
job.wait()
def _create_job(self, command, arguments):
session = self._get_session()
js = self._saga_job.Service(self._connection_string, session=session)
jd = self._get_job_description(command, arguments)
return js.create_job(jd)
def _get_session(self):
connection_type = self._connection_string.split("://")[0]
ctx = self._saga.Context(connection_type)
session = self._saga.Session()
session.add_context(ctx)
return session
def _get_job_description(self, command, arguments):
jd = self._saga_job.Description()
jd.executable = command
jd.arguments = [arguments]
if self._input_file:
jd.input = self._input_file
jd.output = self._job_output_file
return jd
def _set_output_file(self, output_file):
is_local_file = (output_file is not None
and self._is_local_file(output_file))
if is_local_file:
self._job_output_file = output_file
else:
self._job_output_file = self._get_tmp_output_file()
def _prepare_input_file(self, input_file):
if input_file is None or self._is_local_file(input_file):
self._input_file = input_file
else:
self._copy_input_file(input_file)
def _is_local_file(self, file_path):
host_separator_index = file_path.find("://")
return host_separator_index == -1
def _copy_input_file(self, input_file):
local_path = self._get_tmp_input_file()
dst_path = self._saga_file_path(local_path)
self._filesystem.copy_and_overwrite([input_file], dst_path)
self._input_file = local_path
def _handle_output(self, output_file):
self._copy_output_file(output_file)
if output_file is None:
self._capture_job_output()
def _copy_output_file(self, output_file):
if output_file is None:
return
is_remote = not self._is_local_file(output_file)
if is_remote:
src = self._saga_file_path(self._job_output_file)
self._filesystem.copy_and_overwrite([src], output_file)
def _saga_file_path(self, path):
return self._connection_string + path
def _capture_job_output(self):
path = self._connection_string + self._job_output_file
self._job_output = self._filesystem.cat([path])
def _get_tmp_output_file(self):
return self._unique_path("/tmp/s210664-saga-tmp-output-file")
def _get_tmp_input_file(self):
return self._unique_path("/tmp/s210664-saga-tmp-input-file")
def _unique_path(self, path):
return "%s-%s" % (path, time.time())
|
#!/usr/bin/python
# coding: utf-8
import json
import requests
__author__ = 'tangjia'
import mysql.connector
JSON_HEADER = {'content-type': 'applocation/json'}
REST_HOST = "https://a1.easemob.com"
# 测试
# APP_KEY="beijingfahaifuneng#baymax"
# APP_CLIENT_ID="YXA6kynz0MrhEeSS2Q2e11-3BA"
# APP_CLIENT_SECRET="YXA63Xe21wigqK4mgDB1Z0T70KGKNZ4"
# 正式
APP_KEY = "fahaicom#baymax"
APP_CLIENT_ID = "YXA6qzxTcAmWEeWG_fVPP2mf6g"
APP_CLIENT_SECRET = "YXA6FVCmT-5D0SHkLgXBtXlPsdjxMwk"
REQU_URL = REST_HOST + '/' + APP_KEY.split("#")[0] + "/" + APP_KEY.split("#")[1];
# 测试
# conn = mysql.connector.connect(host='wusong.xproj.net', user='common', password='0vCMeQIF4OpHzd6ld0', db='common',
# port='9527')
# conn = mysql.connector.connect(host='wusong.xproj.net', user='zhanghao', password='123456', db='common',
# port='9527')
# 正式
# 微信
con_token = "lPkwLJUoal17Tyi";
con_ASEkey = "Swpl1KEHnMiLXN3CJIv1PYV7uKgURXH6bWN5f36PegK";
corpID = "wx795e9c2599be030a";
secret = "HKszRWFcwBrqQ-aneyDxrFjbOEHzihMA7nt8-U1UiUFdriCR-YTWBDB-mlSiMD8a";
access_token = "Yn8sjFAxSFJRKG6tMFROK97FH3VY8BxROWLpP7OM7ZYLgrwihYeIlkYWWTMyO5QOoXh378DHCyKCIsRmmMI7Qw";
def post(url, param_data):
return requests.post(url, data=json.dumps(param_data), headers=JSON_HEADER).json();
def get(url):
response = requests.get(url, headers=JSON_HEADER)
response.encoding = 'utf-8'
return response.json()
def get_HXtoken():
param_data = {
"grant_type": "client_credentials",
"client_id": APP_CLIENT_ID,
"client_secret": APP_CLIENT_SECRET
}
print(REQU_URL + '/token')
return post(REQU_URL + '/token', param_data);
def get_WXtoken():
return get("https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=" + corpID + "&corpsecret=" + secret)[
'access_token'];
def get_department():
return get("https://qyapi.weixin.qq.com/cgi-bin/department/list?access_token=" + get_WXtoken() + "&id=0")
def get_attr_list_from_list(attr, list):
return map(lambda x: x[attr], list)
def get_all_users():
return get(
"https://qyapi.weixin.qq.com/cgi-bin/user/list?access_token=" + get_WXtoken() + "&department_id=1&fetch_child=1&status=0")[
'userlist']
# 到commonUser查询id 没有的话提示
def get_commonUserId_by_wxUser(mobile):
cursor = conn.cursor();
cursor.execute("select id from commonUser where mobile = %s" % mobile)
try:
id = int(cursor.fetchone()[0])
return id
except TypeError as error:
#在数据库不存在
print(error)
# def insert_in_commonUser(user):
# bytes[] uuid =
def sendMessage(msg):
return post(REQU_URL+"/messages",msg)
def insert_user(user):
cursor = conn.cursor()
cursor.execute("select * from wxUser where mobile = %s " % user['mobile'])
if cursor.fetchone() is None:
cursor = conn.cursor()
user_tuple = (
user['userid'], user['name'], user['department'], user['position'], user['mobile'], user['gender'],
user['weixinid'], user['avatar'], '{}')
print(user_tuple)
sql= ("INSERT INTO wxUser(userid,name,department,position,mobile,gender,weixinid,avatar_mediaid,extattr,commonid)VALUES(\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',%d)" % (user['userid'], user['name'], user['department'], user['position'], user['mobile'], user['gender'],user['weixinid'],user['avatar'],'{}',int(get_commonUserId_by_wxUser(user['mobile']))))
print(sql)
cursor.execute(sql)
conn.commit()
# <{department: }>,
# <{position: }>,
# <{mobile: }>,
# <{gender: }>,
# <{email: }>,
# <{weixinid: }>,
# <{avatar_mediaid: }>,
# <{extattr: }>,
# <{commonid: }>,
# <{updateTime: CURRENT_TIMESTAMP}>);
# ")
def get_token():
param_data = {
"grant_type": "client_credentials",
"client_id": APP_CLIENT_ID,
"client_secret": APP_CLIENT_SECRET
}
print(REQU_URL + '/token')
return post(REQU_URL + '/token', param_data);
def get_value_list_from_dict(dic):
list = []
for key in dic.keys():
list.append(dic[key])
return list
if __name__ == '__main__':
# print insert_user({'mobile':'qweqw'})
# T = (('1', '1', '10', '1', '10', '1', 'atyu30'), ('2', '1', '10', '1', '10', '1', 'atyu30'))
# list = get_all_users();
# for user in list:
# insert_user(user)
# # print(user['mobile'])
# print((get_value_list_from_dict(user)))
# print(get_all_users())
# print(get_token()["access_token"])
JSON_HEADER['Authorization'] = 'Bearer ' + get_token()["access_token"]
msg = {"msg":{"msg":"[):][):]","type":"txt"},"target_type":"users","from":"a495dfab82f9dd75e01f6a02f036c67c","target":["3cd65361e7330531db437f6eb67b46f5"]}
# msg = {"msg":{"msg":"不","type":"txt"},"target_type":"chatgroups","from":"6583de0cde3741784b39520316d5a85a","target":["138907das844","asdas","asasadas"]}
print sendMessage(msg)
# print(get_attr_list_from_list('id',get_department()['department']))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.