text stringlengths 8 6.05M |
|---|
# 3. Use `functools.wraps` to preserve the function attributes
# including the docstring that you wrote.
# 1. Write a function decorator that can be used to measure
# the run time of a functions. Use `timeit.default_timer` to get time stamps.
import functools
from timeit import default_timer as timer
def runtime1(func):
@functools.wraps(func)
def _runtime(*args, **kwargs):
start = timer()
x = func(*args, **kwargs)
end = timer()
print("Run Time is {}".format(end - start))
return x
return _runtime
@runtime1
def add(a, b):
return a + b
print(add(322, 3))
# 2. Make the decorator parameterized. It should take an integer
# that specifies how often the function has to be run.
# Make sure you divide the resulting run time by this number.
def check(*argtypes):
'''Function argument type checker.'''
def _check(func):
'''Takes the function.'''
@functools.wraps(func)
def __check(*args):
'''Takes the arguments'''
if len(args) != len(argtypes):
msg = 'Expected %d but got %d arguments' % (
len(argtypes), len(args))
raise TypeError(msg)
for arg, argtype in zip(args, argtypes):
if not isinstance(arg, argtype):
msg = 'Expected %s but got %s' % (
argtypes, tuple(type(arg) for arg in args))
raise TypeError(msg)
return func(*args)
return __check
return _check
@check(int)
def runtime2(times):
def _runtime(func):
@functools.wraps(func)
def __runtime(*args, **kwargs):
x = []
start = timer()
for _ in range(times):
x.append(func(*args, **kwargs))
end = timer()
print('Average runtime is {}'.format((end - start) / times))
return x
return __runtime
return _runtime
@runtime2(5)
def power(a, b):
return pow(a, b)
print(power(322, 3))
# 4. Make the time measurement optional by using a global switch in the
# module that can be set to True or False to turn time measurement on or off.
TIME_LOG = False
@check(int)
def runtime3(times):
def _runtime3(func):
@functools.wraps(func)
def __runtime3(*args, **kwargs):
x = []
if TIME_LOG:
start = timer()
for _ in range(times):
x.append(func(*args, **kwargs))
end = timer()
print('Average runtime is {}'.format((end - start) / times))
else:
for _ in range(times):
x.append(func(*args, **kwargs))
return x
return __runtime3
return _runtime3
@runtime3(5)
def power(a, b):
return pow(a, b)
print(power(4, 12))
TIME_LOG = True
print(power(4, 12))
# 5. Write another decorator that can be used with a class and registers
# every class that it decorates in a dictionary.
registry = {}
def decor_all_methods(decorator):
def decorate(cls):
'''for attr in cls.__dict__: # there's propably a better way to do this
if callable(getattr(cls, attr)) and attr != '__init__':
setattr(cls, attr, decorator(getattr(cls, attr)))
registry.setdefault(cls, []).append(attr)'''
callable_attributes = {k: v for k,
v in cls.__dict__.items() if callable(v) and k != '__init__'}
for name, func in callable_attributes.items():
decorated = decorator(func)
setattr(cls, name, decorated)
registry.setdefault(cls, []).append(name)
return cls
return decorate
@decor_all_methods(runtime3(2))
class Math():
'''decor all methos of Math class (except __init__) with runtime3 decorator which make the method run 2 times'''
def __init__(self, a, b):
self.a = a
self.b = b
def power(self):
return pow(self.a, self.b)
m = Math(34, 12)
print(m.power())
print(registry)
|
# Generated by Django 3.1.1 on 2020-09-15 06:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attend', '0005_face_emp_id'),
]
operations = [
migrations.CreateModel(
name='gender',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
],
),
migrations.AlterField(
model_name='face',
name='emp_id',
field=models.TextField(),
),
]
|
#Faça um programa que leia uma frase pelo teclado e mostre quantas vezes aparece a letra “A”, em que posição ela aparece a primeira vez e em que posição ela aparece a última vez.
frase = str(input('Digite uma fase: ')).upper().strip()
print('A letra "A" aparece {} vezes na frase'.format(frase.count('A')))
print('A primeira letra "A" aparece na posição {}'.format(frase.find('A')+1))
print('A última letra "A" apareceu na posição {}'.format(frase.rfind('A')+1)) |
# Generated by Django 2.1.5 on 2019-01-29 21:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('telecomNews', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='articles',
name='image',
field=models.ImageField(blank=True, upload_to='telecomNews/static/telecomNews/images'),
),
]
|
"""
- numpy is not used here because it's not necessary and
it's simpler using list for this question
"""
import matplotlib.pyplot as plt
class population_growth(object):
# alpha_fun is a function that gives the growth of the population
# according to the size of the current population
# by default we pose self.alpha_fun = lambda x : (1+self.alpha) * x
alpha = 0.1
alpha_fun = None
def __init__(self, p0, **kwargs):
self.p = [p0]
for k in kwargs.keys():
if hasattr(self, k):
setattr(self, k, kwargs[k])
else:
print("Warning: parameter name {} not found!".format(k))
if self.alpha_fun is None:
self.alpha_fun = lambda x : (1 + self.alpha) * x
def grow(self, num_year):
for _ in range(num_year):
self.p.append(self.p[-1] + self.alpha_fun(self.p[-1]))
def plot(self, label):
if label is None:
plt.plot(range(len(self.p)), self.p)
else:
plt.plot(range(len(self.p)), self.p, label=label)
plt.xlabel("years $n$")
plt.ylabel("population size $p$")
def show(self, label = None):
self.plot(label)
if label is not None:
plt.legend()
plt.show()
def savefig(self, pathname, label = None):
self.plot(label)
if label is not None:
plt.legend()
plt.savefig(pathname)
|
#导入模块
from flask_sqlalchemy import SQLAlchemy
import pymysql
#创建flask对象
app = Flask(__name__)
#配置flask配置对象中键:SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql+pymysql://username:password@hostname/database"
#配置flask配置对象中键:SQLALCHEMY_COMMIT_TEARDOWN,设置为True,应用会自动在每次请求结束后提交数据库中变动
app.config['SQLALCHEMY_COMMIT_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
#获取SQLAlchemy实例对象,接下来就可以使用对象调用数据
db = SQLAlchemy(app) |
import numpy as np
import cv2
CHOOSEM_CONTOURS_NUM = 400
CARDS_ALPHA = 0.2
def preprocess_threshhold_background_noise(img, thresh_size):
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
_, saturation, _ = cv2.split(hsv)
lap = cv2.Laplacian(
saturation, cv2.CV_8U, saturation, ksize=3)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
grayscale_img = cv2.dilate(lap, kernel, iterations=1)
grayscale_img = cv2.blur(grayscale_img, (5, 5))
grayscale_img = cv2.adaptiveThreshold(grayscale_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, thresh_size, 2)
return grayscale_img
def preprocess_threshhold_hsv1(img):
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
_, saturation, _ = cv2.split(hsv)
thresh = cv2.adaptiveThreshold(saturation, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 15, 2)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
return thresh
def preprocess_threshhold_hsv2(img):
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
_, saturation, _ = cv2.split(hsv)
grayscale_img = cv2.blur(saturation, (5, 5))
thresh = cv2.adaptiveThreshold(grayscale_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 19, 2)
kernel = np.ones((3, 3))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
return thresh
def preprocess_threshhold(img):
grayscale_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
grayscale_img = cv2.blur(grayscale_img, (5, 5))
thresh = cv2.adaptiveThreshold(grayscale_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 19, 2)
kernel = np.ones((3, 3))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
return thresh
def find_contour_wrapper(img):
contours = []
height, width = img.shape[:2]
imshape = (height, width)
contours.append(find_contours(
imshape, preprocess_threshhold_background_noise(img, 15)))
contours.append(find_contours(
imshape, preprocess_threshhold_background_noise(img, 11)))
contours.append(find_contours(imshape, preprocess_threshhold_hsv1(img)))
contours.append(find_contours(imshape, preprocess_threshhold_hsv2(img)))
contours.append(find_contours(imshape, preprocess_threshhold(img)))
filtered_contours = remove_duplicate_contours(contours, imshape)
return filtered_contours
def remove_duplicate_contours(cnts, imshape):
imsize = imshape[0] * imshape[1]
contours_flatten = []
for contours_group in cnts:
for contour in contours_group:
contours_flatten.append(contour)
filtered_contours = []
contours_map = np.ones(imshape)
for c in contours_flatten:
_, contour = c
copied_map = np.copy(contours_map)
cv2.drawContours(copied_map, [contour], -1,
(0, 255, 0), thickness=cv2.FILLED)
# if difference smaller than 1% of image size => skip contour
# it's very probably that we get contour for the same card, but different method returned slightly different contour
if abs(np.count_nonzero(copied_map)-np.count_nonzero(contours_map)) < 0.01 * imsize:
continue
contours_map = copied_map
filtered_contours.append(c)
return filtered_contours
def find_contours(imshape, thresholding_function):
height, width = imshape
thresh = thresholding_function
# detect contours
contours_detected = cv2.Canny(thresh, 50, 250)
_, contours, _ = cv2.findContours(
contours_detected, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# sort contours by area
choosen_contours = sorted(
contours, key=lambda contour: cv2.contourArea(contour), reverse=True)
# set smallest contour's area as 1/4 of biggest one's area
smallest_area = cv2.contourArea(choosen_contours[0])/4
choosen_contours = list(
filter(lambda c: cv2.contourArea(c) > smallest_area, choosen_contours))
contours_map = np.ones((height, width))
card_contours = []
for c in choosen_contours:
perimeter = 0.015*cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, perimeter, True)
# skip when it's not an rectangle
if len(approx) != 4:
continue
# check if contour isn't nested in another one
copied_map = np.copy(contours_map)
cv2.drawContours(copied_map, [c], -1,
(0, 255, 0), thickness=cv2.FILLED)
# if so, just skip it
if np.array_equal(copied_map, contours_map):
continue
# if no, add to "map" of already added contours
contours_map = copied_map
card_contours.append((approx, c))
return card_contours
def highlight_detected(img, cards, image):
height, width = img.shape[:2]
fontsize = max(0.5, int(min(width, height)*0.00075))
for (contour, match) in cards:
# create transparent background
overlay = img.copy()
cv2.drawContours(overlay, [contour], -1, (0, 255, 0), -1)
cv2.addWeighted(overlay, CARDS_ALPHA, img, 1 - CARDS_ALPHA, 0, img)
# add notmal contour
cv2.drawContours(img, [contour], -1, (0, 255, 0), 2)
# calculate center if contour => get position for text
M = cv2.moments(contour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.putText(img, match, (cX-(len(match)*int(fontsize*10)), cY),
cv2.FONT_HERSHEY_SIMPLEX, fontsize, (255, 255, 255), int(fontsize*5))
# cv2.imshow(image, img)
# cv2.waitKey(0)
cv2.imwrite('./processed/'+image, img)
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from kouzi_crawler.items import KouziCrawlerItem
class LelejiaSpider(CrawlSpider):
name = 'lelejia'
allowed_domains = ['lelejia.top']
start_urls = ['http://lelejia.top/']
rules = (
Rule(LinkExtractor(allow=r'\?fl=\d+'), callback='parse_item', follow=True),
)
def parse_item(self, response):
app_list = response.xpath('//section[@class="nr"]//a')
kouzi_name = '乐乐家'
kouzi_link = response.url
kouzi_type = 'web'
for item in app_list:
app_item = KouziCrawlerItem()
app_item['app_name'] = item.xpath('.//div[@class="list_a3"]//p[@class="p1"]/text()').extract_first().strip()
app_item['app_link'] = item.xpath('./@href').extract_first()
app_item['kouzi_type'] = kouzi_type
app_item['kouzi_name'] = kouzi_name
app_item['kouzi_link'] = kouzi_link
yield app_item
|
# -*_ coding: utf-8 -*-
import os
import glob
print(os.getcwd())
print(os.path)
pathname = '/Users/pilgrim/diveintopython3/examples/humansize.py'
(dirname,filename) = os.path.split(pathname)
print(dirname)
print(filename)
metadata = os.stat('test2.py')
print(metadata.st_mtime)
import time
print(time.localtime(metadata.st_mtime))
print(metadata.st_size)
print("test2.py real path :",os.path.realpath('test2.py'))
a_list = [1,2,3,4,5]
b_list = [elem * 2 for elem in a_list]
print(a_list)
print(b_list)
|
"""A captioner implementation that batch-processes new images.
In the future, this could be switched out with a streaming implementation
that doesn't require the image recognition framework to boot up each time
for potentially sizable speed increases."""
import glob
import os
import re
import requests
import shutil
import subprocess
from urlparse import urlparse
from tii.reddit import RedditBot
class BatchRecognizer(object):
REGEX = re.compile(r'^cp\s+"([^"]+)"\s+(?:(?:[^\/\r\n]+)(?:\/[^\/\r\n]+)*)(?:\r?\n)+image \d+:\s+?(.+)$', re.M)
def __init__(self, root_dir):
self._root = root_dir
# find the model
results = glob.glob('./env/*.t7')
if len(results) < 1:
raise Exception('didn\'t find model in env/')
elif len(results) > 1:
raise Exception('found more than one model in env/')
self._model = os.path.abspath(results[0])
print 'using model %s' % self._model
def recognize(self, urls):
# set up a session (empty object)
session = {}
# clean directory if necessary
if os.path.exists(self._root):
shutil.rmtree(self._root)
os.makedirs(self._root)
# create a reverse map of filenames urls
has_some = False
for submission, url in urls:
has_some = True
urll = urlparse(url)
_, ext = os.path.splitext(urll.path)
filepath = os.path.join(self._root, submission.name) + ext
session[filepath] = submission
headers = {'User-Agent': RedditBot.USER_AGENT}
response = requests.get(url=url, headers=headers, stream=True)
with file(filepath, 'wb') as out:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, out)
if not has_some:
return []
process = subprocess.Popen(['th', './eval.lua', '-model', self._model, '-image_folder', self._root, '-num_images', '-1', '-gpuid', '-1'], stdout=subprocess.PIPE, cwd='./ext/neuraltalk2')
process.wait()
if process.returncode != 0:
print process.communicate()[0]
print process.communicate()[1]
raise Exception('problem during recognition')
output = process.communicate()[0]
# run recognizer
matcher = BatchRecognizer.REGEX.finditer(output)
# return parser generator
def iter():
for match in matcher:
submission = session[match.group(1)]
caption = match.group(2).strip()
yield (submission, caption)
return iter()
|
#!/usr/bin/python3.4
# -*-coding:Utf-8
maliste = [1, 2, 3, 4, 5]
i = 0
maliste.extend("END")
maliste.insert(2, "2.5")
while i < len(maliste):
print(maliste[i])
i += 1
for elt in maliste:
print(elt)
i = 0
for i, elt in enumerate(maliste):
print("A l'indice {} se trouve {}".format(i, elt))
|
# tarot dot py is a program by socrates mcbadger (beinnisbog.tumblr.com)
# it is liscensed under the apache license 2.0
import random
major_arcana = ['0: the fool', 'I: the magician', 'II: the high preistess',
'III: the empress', 'IV: the emperor', 'V: the hierophant',
'VI: the lovers', 'VII: the chariot', 'VIII: justice',
'IX: the hermit', 'X: wheel of fortune', 'XI: strength',
'XII: the hanged man', 'XIII: death', 'XIV: temperance',
'XV: the devil', 'XVI: the tower', 'XVII: the star',
'XVIII: the moon', 'XIX: the sun', 'XX: judgment',
'XXI: the world']
wands = ['ace of wands', 'two of wands', 'three of wands', 'four of wands',
'five of wands', 'six of wands', 'seven of wands', 'eight of wands',
'nine of wands', 'ten of wands', 'page of wands', 'knight of wands',
'queen of wands', 'king of wands']
coins = ['ace of coins', 'two of coins', 'three of coins', 'four of coins',
'five of coins', 'six of coins', 'seven of coins', 'eight of coins',
'nine of coins', 'ten of coins', 'page of coins', 'knight of coins',
'queen of coins', 'king of coins']
cups = ['ace of cups', 'two of cups', 'three of cups', 'four of cups',
'five of cups', 'six of cups', 'seven of cups', 'eight of cups',
'nine of cups', 'ten of cups', 'page of cups', 'knight of cups',
'queen of cups', 'king of cups']
swords = ['ace of swords', 'two of swords', 'three of swords', 'four of swords',
'five of swords', 'six of swords', 'seven of swords', 'eight of swords',
'nine of swords', 'ten of swords', 'page of swords', 'knight of swords',
'queen of swords', 'king of swords']
deck = major_arcana + wands + cups + swords + coins
print ("Welcome to tarot dot py")
def shuffleDeck():
print ("Shuffle the deck?")
print ("Please enter y/n")
shuf = input("==> ")
if shuf == "y":
while shuf == "y":
random.shuffle(deck)
print ("The deck has been shuffled. Do it again?")
print ("Please enter y/n")
shuf = input("==> ")
shuffleDeck()
def menuList():
print ("Do you want to...")
print (" 1: ask a specific question?")
print (" 2: do a spread?")
print (" 3: just draw cards?")
print (" 4: shuffle the deck again?")
print ("Please enter appropriate number.")
opt = input("==> ")
return opt
choice = menuList()
def singleQuestion():
print ("Please enter your question.")
q = input("==> ")
print ("...")
print ("...")
print ("...")
print ('"' + q + '"')
print ("Hold this question in your mind as you draw the cards.")
print ("...")
print ("Draw a card? y/n")
draw = input("==> ")
if draw == "y":
while draw == "y":
side = random.randint(1, 2)
if side == 1:
print ("you drew " + deck[1] + " upright")
elif side == 2:
print ("you drew " + deck[1] + " reversed")
deck.pop(1)
print ("Draw another card? y/n")
draw = input("==> ")
def spreadFun():
name = input("Enter a name for the spread: ")
cards = int(input("How many cards are used in this spread? : "))
poslist = []
for i in range(0, int(cards)):
print ("Describe the position or function of card " + str(i + 1))
pos = input("==> ")
poslist.append(pos)
print ("...")
print ("...")
print (name)
for i in range(0, cards):
side = random.randint(1, 2)
sCard = (poslist[i] + ":" + deck[1] + "upright") if side == 1 else (poslist[i] + ":" + deck[1] + "reversed")
print (sCard)
deck.pop(1)
if choice == "1":
singleQuestion()
print ("Ask another question?")
q = input("==> ")
if q == "y":
singleQuestion()
print ("Thank you for using tarot dot py")
elif choice == "2":
spreadFun()
elif choice == "3":
print ("Draw a card? y/n")
draw = input("==> ")
if draw == "y":
while draw == "y":
side = random.randint(1, 2)
if side == 1:
print ("you drew " + deck[1] + " upright")
elif side == 2:
print ("you drew " + deck[1] + " reversed")
deck.pop(1)
print ("Draw another card? y/n")
draw = input("==> ")
print ("Thank you for using tarot dot py")
print ("You may now close the program")
|
# Generated by Django 3.0.3 on 2020-03-31 13:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='url',
old_name='shortened_url',
new_name='shortcode',
),
]
|
# helper methods for the users app
def users_session_data(request):
responseData = {
'user': {
'id': request.user.id,
'username': request.user.username,
'sessionKey': request.session.session_key,
}
}
return responseData
|
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import Sequence
from pants.option.option_types import StrListOption
from pants.option.subsystem import Subsystem
from pants.util.strutil import safe_shlex_join, safe_shlex_split
class PythonNativeCodeSubsystem(Subsystem):
options_scope = "python-native-code"
help = "Options for building native code using Python, e.g. when resolving distributions."
class EnvironmentAware(Subsystem.EnvironmentAware):
env_vars_used_by_options = ("CPPFLAGS", "LDFLAGS")
# TODO(#7735): move the --cpp-flags and --ld-flags to a general subprocess support subsystem.
_cpp_flags = StrListOption(
default=["<CPPFLAGS>"],
help=(
"Override the `CPPFLAGS` environment variable for any forked subprocesses. "
"Use the value `['<CPPFLAGS>']` to inherit the value of the `CPPFLAGS` "
"environment variable from your runtime environment target."
),
advanced=True,
)
_ld_flags = StrListOption(
default=["<LDFLAGS>"],
help=(
"Override the `LDFLAGS` environment variable for any forked subprocesses. "
"Use the value `['<LDFLAGS>']` to inherit the value of the `LDFLAGS` environment "
"variable from your runtime environment target."
),
advanced=True,
)
@property
def subprocess_env_vars(self) -> dict[str, str]:
return {
"CPPFLAGS": safe_shlex_join(self._iter_values("CPPFLAGS", self._cpp_flags)),
"LDFLAGS": safe_shlex_join(self._iter_values("LDFLAGS", self._ld_flags)),
}
def _iter_values(self, env_var: str, values: Sequence[str]):
for value in values:
if value == f"<{env_var}>":
yield from safe_shlex_split(self._options_env.get(env_var, ""))
else:
yield value
|
from datetime import timedelta, datetime
from typing import Optional
from jose import jwt
# Generated using: openssl rand -hex 32
SECRET_KEY = 'b0b8c74b7ef83e39fc9395050f68583fb8b6c643fa082d475518fe436ac6ddb5'
ALGORITHM = 'HS256'
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 * 7 # 1 Week
class JwtService:
"""Service to handle JWT generation and decoding
"""
def create_access_token(self, data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
expire = datetime.utcnow() + (expires_delta or timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES))
to_encode.update({'exp': expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
def decode_token(self, token):
return jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
|
from django.db import models
import datetime
# Create your models here.
class Notice(models.Model):
notice = models.CharField(max_length=1000)
images = models.FileField(upload_to='notice/images/', default='', blank=True, null=True)
pdf = models.FileField(upload_to='notice/files/',default='', blank=True, null=True)
date = models.DateField(default=datetime.date.today)
def __str__(self):
return self.notice
class Feedback(models.Model):
name = models.CharField(max_length=25)
feedback = models.CharField(max_length=1000)
date = models.DateField(default=datetime.date.today)
def __str__(self):
return self.feedback
class Student(models.Model):
name = models.CharField(max_length=25)
father_name = models.CharField(max_length=25)
mother_name = models.CharField(max_length=25)
email = models.CharField(max_length=50)
gender = models.CharField(max_length=6)
phone_number = models.IntegerField()
address = models.CharField(max_length=50)
pincode = models.IntegerField()
def __str__(self):
return self.email
class Account(models.Model):
email = models.CharField(max_length=50)
password = models.CharField(max_length=15)
def __str__(self):
return self.email
class Admin(models.Model):
name = models.CharField(max_length=15)
password = models.CharField(max_length=15)
def __str__(self):
return self.name |
from django.contrib import admin
from principal.models import Actor, Pelicula
# Register your models here.
@admin.register(Actor)
class ActorAdmin(admin.ModelAdmin):
pass
@admin.register(Pelicula)
class PeliculaAdmin(admin.ModelAdmin):
pass
|
import numpy as np
from PIL import Image
import torch
import os
import torch.utils.data as data
from glob import glob
from .common import BaseDataset
class davis2017(BaseDataset):
def __init__(self, base_dir, split, transforms=None, to_tensor=None):
super(davis2017, self).__init__(base_dir)
self.split = split
self.annFile = f'{base_dir}/annotations.txt'
self.transforms = transforms
self.to_tensor = to_tensor
self.base_dir = base_dir
def __len__(self):
count = 0
for _,_ in enumerate(open(self.annFile,'rU')):
count = count + 1
return count
def catId(self, item):
"""
:param class_name:
:return: id_cla
"""
cat_dir = self.base_dir + '/Annotations/480p'
class_list = os.listdir(cat_dir)
class_list = sorted(class_list)
class_id = item.split('/')[0]
id_dic = class_list.index(class_id) + 1
return id_dic
def load_anno(self,item):
pass
def get_Imgids(self,class_id):
base_dir = self.base_dir + '/Annotations/480p'
class_list = sorted(os.listdir(base_dir))
class_name = class_list[class_id - 1]
image_dir = self.base_dir + f'/JPEGImages/480p/{class_name}'
img_ids = []
for _, _, img_id in os.walk(image_dir):
for way in img_id:
way = class_name + '/' + way
way = way.strip('\n')
img_ids.append(way)
return sorted(img_ids) # img_ids is the name list of a certa
def P2msks(self, Img, obj_ids):
img = np.array(Img)
Imgs = []
for idx in obj_ids:
label = Image.fromarray((img == idx) * 255.0).convert('L')
Imgs.append(np.array(label))
return Imgs,img.shape
def msks2P(self, msks, obj_ids, img_size):
# if max_num == 1:
# return msks[0]
if len(msks) != len(obj_ids):
print('error, len(msks) != len(objs_ids)')
if obj_ids[0] != -1:
P = np.zeros(msks[0].shape)
elif obj_ids[0] == -1:
P = np.zeros(img_size)
else:
print("error")
for idx, msk in enumerate(msks):
ids = np.nonzero(msk)
if len(obj_ids) > 0:
for i in range(len(ids[0])):
P[ids[0][i], ids[1][i]] = idx + 1
# distinguish different objects by idx+1
return P
def __getitem__(self, item):
# id = self.catId()
# anno_list = []
# with open(self.annFile) as f:
# for line in f.readlines():
# line = line.strip('\n')
# anno_list.append(line)
# Open Image
image = Image.open(f'{self.base_dir}/JPEGImages/480p/{item}')
if image.mode == 'L':
image = image.convert('RGB')
# Process masks include current mask and pre mask
# semantic_masks = {}
mask_item = item.split('.')[0]+'.png'
cla_id = self.catId(item)
semantic_mask = Image.open(f'{self.base_dir}/Annotations/480p/{mask_item}')
obj_ids = list(set(np.asarray(semantic_mask).reshape(-1))) # new ;number of objects in a picture
obj_ids.sort()
obj_ids = obj_ids[1:] # to filter the background mask
if len(obj_ids) == 0:
obj_ids = [-1,]
semantic_mask, img_size = self.P2msks(semantic_mask, obj_ids)
semantic_mask = self.msks2P(semantic_mask, obj_ids, img_size)
semantic_mask = Image.fromarray(semantic_mask)
semantic_masks = {cla_id:semantic_mask}
pre_base_dir = item.split('/')[1]
pre_base_dir = pre_base_dir.split('.')[0]
pre_num = int(pre_base_dir)
if pre_num ==0:
pre_num = pre_num
else:
pre_num = pre_num - 1
pre_num = str(pre_num)
pre_num = pre_num.rjust(5,'0') + '.png'
pre_num = item.split('/')[0] + '/' + pre_num
pre_base_dir = f'{self.base_dir}/Annotations/480p/{pre_num}'
pre_semantic_mask = Image.open(pre_base_dir)
pre_semantic_mask, _= self.P2msks(pre_semantic_mask, obj_ids)
pre_semantic_mask = self.msks2P(pre_semantic_mask, obj_ids, img_size)
pre_semantic_mask = Image.fromarray(pre_semantic_mask)
pre_semantic_masks = {cla_id: pre_semantic_mask}
for i in range(len(obj_ids)):
obj_ids[i] = int(obj_ids[i])
sample = {'image':image,
'pre_label':pre_semantic_masks,
'label':semantic_masks,
'obj_ids':obj_ids, # list of objects id
'img_size':img_size,
'label_t':semantic_masks}
# Image-level transformation
if self.transforms is not None:
sample = self.transforms(sample)
# Save the original image (without mean subtraction/normalization)
image_t = torch.from_numpy(np.array(sample['image']).transpose(2, 0, 1))
# Transform to tensor
if self.to_tensor is not None:
sample = self.to_tensor(sample)
sample['id'] = item
sample['image_t'] = image_t
# Add auxiliary attributes
for key_prefix in self.aux_attrib:
# Process the data sample, create new attributes and save them in a dictionary
aux_attrib_val = self.aux_attrib[key_prefix](sample, **self.aux_attrib_args[key_prefix])
for key_suffix in aux_attrib_val:
# one function may create multiple attributes, so we need suffix to distinguish them
sample[key_prefix + '_' + key_suffix] = aux_attrib_val[key_suffix]
a = key_prefix + '_' + key_suffix
return sample
# to generate Davis annotations
def generate_ann_list(self, base_dir=None):
base_dir = 'D:/Dataset/DAVIS/Annotations/480p'
class_list = os.listdir(base_dir)
a = 'DAVIS/Annotations/480p'
fw = open('D:/Dataset/DAVIS/annotations.txt',mode = 'w')
# b = 0
for clist in class_list:
dir = base_dir + '/' + clist
# a is root;b is folder name; c is file name
for _, _, c in os.walk(dir):
for name in c:
ann_name = a + '/' + clist + '/' + name
ann_name = clist + ';' + ann_name
#b = b + 1
fw.write(ann_name +'\n')
fw.close()
return
if __name__ == '__main__':
base_dir = 'D:/Dataset/DAVIS/Annotations/480p'
generate_ann_list(base_dir) |
from tkinter import *
from Connect import *
import sqlite3
import LoginPage
from tkinter.messagebox import *
class Register(object):
def __init__(self, master=None):
self.root = master
self.root.geometry('400x350')
self.root.resizable(width=False, height=False)
self.Name = StringVar()
self.email = StringVar()
self.password = StringVar()
self.telnum = StringVar()
self.address = StringVar()
self.CreatePage()
def CreatePage(self):
self.page = Frame(self.root)
self.page.pack()
Label(self.page).grid()
Label(self.page, text = '姓 名:',bg="#a1dbcd").grid(row=1, stick=W, pady=10)
Entry(self.page, textvariable=self.Name).grid(row=1, column=1, stick=E)
Label(self.page, text = '邮 件:',bg="#a1dbcd").grid(row=2, stick=W, pady=10)
Entry(self.page, textvariable=self.email).grid(row=2, column=1, stick=E)
Label(self.page, text = '密 码:',bg="#a1dbcd").grid(row=3, stick=W, pady=10)
Entry(self.page, textvariable=self.password).grid(row=3, column=1, stick=E)
Label(self.page, text = '电 话:',bg="#a1dbcd").grid(row=4, stick=W, pady=10)
Entry(self.page, textvariable=self.telnum).grid(row=4, column=1, stick=E)
Label(self.page, text = '地 址:',bg="#a1dbcd").grid(row=5, stick=W, pady=10)
Entry(self.page, textvariable=self.address).grid(row=5, column=1, stick=E)
Button(self.page, text = '保 存',command=self.Save,width=20,relief=GROOVE,bg='#1e90ff',activebackground='#00bfff').grid(row=7,column=1,pady=10)
Button(self.page, text = '返 回',command=self.Return,width=20,relief=GROOVE,activebackground='#a9a9a9').grid(row=8,column=1)
def Save(self):
if self.Name.get() =='' or self.password.get()=='' or self.telnum.get()=='':
showinfo(title='警告', message='姓名,密码,电话为必填项')
else:
save_sql = '''INSERT INTO user values (?, ?, ?, ?, ?)'''
data = [(self.Name.get(), self.email.get(), self.password.get(), self.telnum.get(), self.address.get())]
conn = get_conn()
EX = save(conn, save_sql, data)
if EX:
self.page.destroy()
LoginPage.LoginPage(self.root)
else:
showinfo(title='警告', message='帐号已存在')
def Return(self):
self.page.destroy()
LoginPage.LoginPage(self.root) |
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
import factory
from .models import TodoTask
class TodoTaskFactory(factory.django.DjangoModelFactory):
class Meta:
model = TodoTask
title = factory.Faker('text')
class TodoTaskTest(TestCase):
"""Test TodoTask API"""
def setUp(self):
self.client = APIClient()
self.tasks = [
TodoTaskFactory(),
TodoTaskFactory(),
TodoTaskFactory(),
]
def test_get_task_list(self):
resp = self.client.get(reverse('task_list_create'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json()), 3)
|
"""
Example: /home/mcbs913_2018/shared/homoeologs_assembly/experiments_lambda/diploid/alf/div_05pct/illumina$ ls ref_contig_??.vcf | python3 /home/mcbs913_2018/shared/homoeologs_assembly/homoeolog_assembly/run_compare_vcf_on_contigs.py -A ../homolog5-1.vcf -a ../homolog5-1prime.vcf -B ../homolog5-2.vcf -b ../homolog5-2prime.vcf 2> bounce.log > delta.log
Combine resulting .tsvs into one file for analysis. Example: cat poly*tsv > polymorphism_lengths_alf_05.tsv
"""
import os
import sys
import argparse
def read_contigs():
for line in sys.stdin:
yield line.rstrip()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-A")
parser.add_argument("-a")
parser.add_argument("-B")
parser.add_argument("-b")
args = parser.parse_args()
contigs = read_contigs()
print(args, file=sys.stderr, flush=True)
i = 0
while True:
try:
contig = next(contigs)
command = ""
command += "cat {0}".format(contig)
command += " | python3 /home/mcbs913_2018/shared/homoeologs_assembly/homoeolog_assembly/compare_vcf.py"
command += " {1} -o polymorphism_lengths_haplotype_04_{0}.tsv".format(i, args.A)
command += " --relationship homologous "
command += " | python3 /home/mcbs913_2018/shared/homoeologs_assembly/homoeolog_assembly/compare_vcf.py"
command += " {1} -o polymorphism_lengths_haplotype_02_{0}.tsv".format(i, args.a)
command += " --relationship homologous "
command += "| python3 /home/mcbs913_2018/shared/homoeologs_assembly/homoeolog_assembly/compare_vcf.py"
command += " {1} -o polymorphism_lengths_haplotype_01_{0}.tsv".format(i, args.B)
command += " --relationship homoeologous "
command += "| python3 /home/mcbs913_2018/shared/homoeologs_assembly/homoeolog_assembly/compare_vcf.py"
command += " {1} -o polymorphism_lengths_haplotype_03_{0}.tsv".format(i, args.b)
command += " --relationship homoeologous "
print(command, file=sys.stderr, flush=True)
os.system(command)
i += 1
except StopIteration:
break
exit(0) |
#!/user/bin/python
import argparse
import collections
import logging
import os
import random
import sys
logging.getLogger("scapy").setLevel(1)
from scapy.all import *
parser = argparse.ArgumentParser(description="Test packet generator")
parser.add_argument('--out-dir', help="Output path", type=str, action='store', default=os.getcwd())
args = parser.parse_args()
all_pkts = collections.OrderedDict()
def gen_udp_pkts():
# ETH|VLAN|VLAN|IP|UDP
all_pkts['vlan2-udp'] = Ether(src="34:17:eb:96:bf:1b", dst="34:17:eb:96:bf:1c") / \
Dot1Q(vlan=3393) / Dot1Q(vlan=2000) / IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=6639)
# ETH|VLAN|IP|UDP
all_pkts['vlan-udp'] = Ether(src="34:17:eb:96:bf:1b", dst="34:17:eb:96:bf:1c") / \
Dot1Q(vlan=3393) / IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000)
# ETH|VLAN|IP|UDP
all_pkts['udp-small'] = Ether(src="00:00:00:00:00:01", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000)
# ETH|VLAN|IP|UDP|PAYLOAD
data = bytearray(os.urandom(1000))
all_pkts['udp-large'] = Ether(src="34:17:eb:96:bf:1b", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data))
data = bytearray(os.urandom(500))
all_pkts['udp-mid'] = Ether(src="34:17:eb:96:bf:1b", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data))
# ETH|VLAN|IP|UDP
sweep_small = PacketList()
for i in range(8):
sweep_small.append(Ether(src="34:17:eb:96:bf:1b", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.12", dst="10.0.0.{}".format(i)) / \
UDP(sport=6000, dport=20000))
all_pkts['udp-sweep-small'] = sweep_small
# ETH|IP|UDP|PAYLOAD X 10
udp_10 = PacketList()
for i in range(10):
data = bytearray(os.urandom(random.randint(1,100)))
udp_10.append(Ether(src="34:17:eb:96:bf:1b", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data)))
all_pkts['udp-burst'] = udp_10
vlan_10 = PacketList()
for i in range(10):
data = bytearray(os.urandom(random.randint(1,100)))
vlan_10.append(Ether(src="34:17:eb:96:bf:1b", dst="34:17:eb:96:bf:1c") / \
Dot1Q(vlan=3393) / IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data)))
all_pkts['vlan-burst'] = vlan_10
# ETH|IP|UDP|PAYLOAD X 10
udp_5 = PacketList()
for i in range(5):
data = bytearray(os.urandom(random.randint(1,100)))
udp_5.append(Ether(src="34:17:eb:96:bf:1b", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data)))
all_pkts['udp-burst-5'] = udp_5
udp_10 = PacketList()
for i in range(10):
data = bytearray(os.urandom(10))
udp_10.append(Ether(src="00:00:00:00:00:01", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data)))
all_pkts['udp-burst-10'] = udp_10
udp_128b = PacketList()
data = bytearray(os.urandom(87))
udp_128b.append(Ether(src="00:00:00:00:00:01", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data)))
all_pkts['udp-128b'] = udp_128b
udp_256b = PacketList()
data = bytearray(os.urandom(215))
udp_256b.append(Ether(src="00:00:00:00:00:01", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data)))
all_pkts['udp-256b'] = udp_256b
udp_512b = PacketList()
data = bytearray(os.urandom(471))
udp_512b.append(Ether(src="00:00:00:00:00:01", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data)))
all_pkts['udp-512b'] = udp_512b
udp_1024b = PacketList()
data = bytearray(os.urandom(983))
udp_1024b.append(Ether(src="00:00:00:00:00:01", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data)))
all_pkts['udp-1024b'] = udp_1024b
udp_1516b = PacketList()
data = bytearray(os.urandom(1475))
udp_1516b.append(Ether(src="00:00:00:00:00:01", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data)))
all_pkts['udp-1516b'] = udp_1516b
udp_64b = PacketList()
data = bytearray(os.urandom(22))
udp_64b.append(Ether(src="00:00:00:00:00:01", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data)))
all_pkts['udp-64b'] = udp_64b
udp_65 = PacketList()
data = bytearray(os.urandom(23))
udp_65.append(Ether(src="00:00:00:00:00:01", dst="34:17:eb:96:bf:1c") / \
IP(src="10.0.0.1", dst="10.0.0.2") / \
UDP(sport=6000, dport=20000) / Raw(str(data)))
all_pkts['udp-65'] = udp_65
def main():
gen_udp_pkts()
with open("packet.mk", "w") as f:
f.write("TEST_PACKET=")
for packet in all_pkts.keys():
f.write(" "+packet)
for k, v in all_pkts.iteritems():
wrpcap('%s.pcap' % k, v)
if __name__ == '__main__':
main()
|
import unittest
from katas.kyu_7.highest_and_lowest import high_and_low
class HighAndLowTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(high_and_low('1 2 3 4 5'), '5 1')
def test_equal_2(self):
self.assertEqual(high_and_low('1 2 -3 4 5'), '5 -3')
def test_equal_3(self):
self.assertEqual(high_and_low('1 9 3 4 -5'), '9 -5')
def test_equal_4(self):
self.assertEqual(
high_and_low('4 5 29 54 4 0 -214 542 -64 1 -3 6 -6'), '542 -214'
)
def test_equal_5(self):
self.assertEqual(high_and_low('1 -1'), '1 -1')
def test_equal_6(self):
self.assertEqual(high_and_low('1 1'), '1 1')
def test_equal_7(self):
self.assertEqual(high_and_low('-1 -1'), '-1 -1')
def test_equal_8(self):
self.assertEqual(high_and_low('1 -1 0'), '1 -1')
def test_equal_9(self):
self.assertEqual(high_and_low('1 1 0'), '1 0')
def test_equal_10(self):
self.assertEqual(high_and_low('-1 -1 0'), '0 -1')
def test_equal_11(self):
self.assertEqual(high_and_low('42'), '42 42')
|
def selectionsort (list1):
temp = 0
for i in range (0,len(list1)-1):
max_possible = i
for j in range (i,len(list1)):
if list1[j] > list1[max_possible]:
temp = list1[max_possible]
list1[max_possible] = list1[j]
list1[j] = temp
return list1
myL = [1,5,36,25,14,9,52,41]
print (selectionsort(myL))
|
from flask import Flask
from extensions import *
from config import DevelopmentConfig, STATIC_FOLDER
from models import *
from commands import test
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
def create_app(config=DevelopmentConfig):
app = Flask(__name__, static_folder = STATIC_FOLDER)
app.config.from_object(config)
register_extensions(app)
register_blueprints(app)
register_commands(app)
engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'])
if not database_exists(engine.url):
create_database(engine.url)
with app.app_context():
db.create_all()
return app
def register_extensions(app):
"""Register Flask extensions."""
db.init_app(app)
socketio.init_app(app)
redis_store.init_app(app)
marshmallow.init_app(app)
def register_blueprints(app):
"""Register Flask blueprints."""
from .controllers.web.web import app as application
app.register_blueprint(application)
from .controllers.interface.authentication.authentication import app as namespace
app.register_blueprint(namespace)
from .controllers.interface.user.user import app as namespace
app.register_blueprint(namespace)
from .controllers.interface.question.question import app as namespace
app.register_blueprint(namespace)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(test) |
#_*_coding:utf-8 _*_
#用二分法求平方根
def sqrt_dichotomy(x, max, min=0):
print x, min, max
mid = (min + max)/2.0
print mid
if (mid * mid) - x > 0.0001:
max = mid
sqrt_dichotomy(x, max, min)
if (mid * mid) - x < -0.0001:
min = mid
sqrt_dichotomy(x, max, min)
else:
return mid
if __name__ == '__main__':
x = 1024
max = x
print sqrt_dichotomy(x, max)
|
from django import forms
from .models import Bank, Category, Transaction, Budget, BudgetCategory
import datetime
class BankForm(forms.ModelForm):
class Meta:
model = Bank
fields = ('starting_amount', 'name')
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ('category',)
class TransactionForm(forms.ModelForm):
class Meta:
model = Transaction
fields = ('date', 'amount', 'category', 'location', 'notes', 'card_used')
def __init__(self, user=None, *args, **kwargs):
super().__init__(*args, **kwargs)
categories = []
if user != None:
for category in Category.objects.filter(user__exact=user):
categories.append((str(category.id), str(category)))
banks = [(str(bank.id),str(bank.name)) for bank in Bank.objects.filter(user=user)]
else:
for category in Category.objects.all():
categories.append((str(category.id), str(category)))
banks = []
self.fields['category'].widget.choices=categories
self.fields['category'].widget.attrs={'id':'form_category'}
self.fields['date'].widget=forms.SelectDateWidget(empty_label=None,
years=range(1950,datetime.date.today().year+2))
self.fields['date'].widget.attrs={'id':'form_date'}
self.fields['location'].label='Location/To (for transfers)'
self.fields['location'].widget.attrs={'autocomplete':'off', 'list':'Locations', 'id': 'form_location'}
self.fields['amount'].widget.attrs={'id':'form_amount', 'step':'0.01'}
self.fields['notes'].widget.attrs={'id':'form_notes'}
self.fields['card_used'].widget.choices=banks
self.fields['card_used'].widget.attrs={'id':'form_card_used'}
class BudgetForm(forms.ModelForm):
class Meta:
model = Budget
fields = ('start', 'end')
class BudgetCategoryForm(forms.ModelForm):
class Meta:
model = BudgetCategory
fields = ('budget', 'category', 'amount')
class UploadFileForm(forms.Form):
file = forms.FileField()
|
from django.urls import path
from currency.views import currency
app_name = 'currency'
urlpatterns = [
path('', currency, name='currency'),
] |
import sys
import os
from os.path import isfile, join
import csv
import pandas as pd
import numpy as np
from scipy import stats
from collections import defaultdict
class MinMaxNormalise:
def __init__(self):
self.global_values = defaultdict()
def set_local_norms(self, csvs):
with open('mmx_local_norms.data', 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
# set header (column names) for 512 (sp) + 1 (ap) + 1 (f0) + 1 (vuv)
header = ['label']
for i in range(1,517):
header.append('{}_max'.format(i))
header.append('{}_min'.format(i))
wr.writerow(header)
for file in csvs:
label = os.path.split(os.path.splitext(os.path.normpath(file))[0])[1]
# set initial arbitrary min/max values to be iterated over
local_values_max = defaultdict()
local_values_min = defaultdict()
for i in range(1,517):
local_values_max[str(i)] = 0 # default max
local_values_min[str(i)] = 1000 # default min
# iterate through and update minmax values for WORLD features
df = pd.read_csv(file, header=None)
for feature in range(1,517): # 516 WORLD features
max = df.iloc[:,feature][:].max()
min = df.iloc[:,feature][:].min()
if max >= local_values_max[str(feature)]:
local_values_max[str(feature)] = max
if min <= local_values_min[str(feature)]:
local_values_min[str(feature)] = min
row = [label]
for i in range(1,517):
row.append(local_values_max[str(i)])
row.append(local_values_min[str(i)])
wr.writerow(row)
def set_global_norms(self):
df = pd.read_csv('mmx_local_norms.data')
with open('mmx_global_norms.data', 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
# set header (column names) for 512 (sp) + 1 (ap) + 1 (f0) + 1 (vuv)
header = []
for i in range(1,517):
header.append('{}_max'.format(i))
header.append('{}_min'.format(i))
wr.writerow(header)
# get global min/max values from local
global_values_max = defaultdict()
global_values_min = defaultdict()
for i in range(1,517):
global_values_max[str(i)] = df['{}_max'.format(i)].max()
global_values_min[str(i)] = df['{}_min'.format(i)].min()
row = []
for i in range(1,517):
row.append(global_values_max[str(i)])
row.append(global_values_min[str(i)])
wr.writerow(row)
def get_global_norms(self):
df = pd.read_csv('mmx_global_norms.data')
header = []
for i in range(1,517):
header.append('{}_max'.format(i))
header.append('{}_min'.format(i))
for i in header:
self.global_values[i] = np.float64(df.iloc[:,i][:])
def normalise_by_local(self, csvs):
local_minmax = pd.read_csv('mmx_local_norms.data')
local_minmax.set_index('label', inplace=True)
for file in csvs:
# read in data file (to normalise)
df = pd.read_csv(file, header=None)
# get file label and minmax values
label = os.path.split(os.path.splitext(os.path.normpath(file))[0])[1]
minmax = local_minmax.loc[label,:]
max = defaultdict()
min = defaultdict()
rng = defaultdict()
feature = 1
for i in range(0,len(minmax),2): # set max values for feature
max[str(feature)] = minmax[i]
feature += 1
feature = 1
for i in range(1,len(minmax),2): # set min values for feature
min[str(feature)] = minmax[i]
feature += 1
for i in range(1,517): # set value range for feature
rng[str(i)] = max[str(i)] - min[str(i)]
normalised = '{}.mmx'.format(os.path.splitext(os.path.normpath(file))[0])
with open(normalised, 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
for row in range(len(df)):
world_516 = [df.iloc[row,:][0]] # start with list containing label
for i in range(1,517):
value = df.iloc[row,:][i]
value_prime = (((value - min[str(i)]) * (0.9-0.1)) / rng[str(i)]) + 0.1
world_516.append(value_prime)
if np.isnan(world_516[514]): # hardcode replace NaNs if ap range == 0
world_516[514] = (1*(0.9-0.1))+0.1
if np.isnan(world_516[515]): # hardcode replace NaNs if f0 range == 0
world_516[515] = (0*(0.9-0.1))+0.1
if np.isnan(world_516[516]): # hardcode replace NaNs if vuv range == 0
world_516[516] = (0*(0.9-0.1))+0.1
wr.writerow(world_516) # minmax mormalisation scaled to [0.1, 0.9]
class MVN:
def __init__(self):
self.global_values = defaultdict()
def set_local_norms(self, csvs):
with open('mvn_local_norms.data', 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
# set header (column names) for 512 (sp) + 1 (ap) + 1 (f0) + 1 (vuv)
header = ['label']
for i in range(1,517):
header.append('{}_mean'.format(i))
header.append('{}_std'.format(i))
wr.writerow(header)
for file in csvs:
label = os.path.split(os.path.splitext(os.path.normpath(file))[0])[1]
# set initial default dicts for mean/variance values
local_values_mean = defaultdict()
local_values_std = defaultdict()
# iterate through and update mean/variance for WORLD features
df = pd.read_csv(file, header=None)
for feature in range(1,517): # 516 WORLD features
local_values_mean[str(feature)] = np.mean(df.iloc[:,feature][:])
local_values_std[str(feature)] = np.std(df.iloc[:,feature][:])
row = [label]
for i in range(1,517):
row.append(local_values_mean[str(i)])
row.append(local_values_std[str(i)])
wr.writerow(row)
def set_global_norms(self):
df = pd.read_csv('mvn_local_norms.data')
with open('mvn_global_norms.data', 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
# set header (column names) for 512 (sp) + 1 (ap) + 1 (f0) + 1 (vuv)
header = []
for i in range(1,517):
header.append('{}_mean'.format(i))
header.append('{}_std'.format(i))
wr.writerow(header)
# get global mean/variance values from local
global_values_mean = defaultdict()
global_values_std = defaultdict()
for i in range(1,517):
global_values_mean[str(i)] = np.mean(df['{}_mean'.format(i)])
global_values_std[str(i)] = np.std(df['{}_std'.format(i)])
row = []
for i in range(1,517):
row.append(global_values_mean[str(i)])
row.append(global_values_std[str(i)])
wr.writerow(row)
def get_global_norms(self):
df = pd.read_csv('mvn_global_norms.data')
header = []
for i in range(1,517):
header.append('{}_mean'.format(i))
header.append('{}_std'.format(i))
for i in header:
self.global_values[i] = np.float64(df.iloc[:,i][:])
def normalise_by_local(self, csvs):
local_mvn = pd.read_csv('mvn_local_norms.data')
local_mvn.set_index('label', inplace=True)
for file in csvs:
# read in data file (to normalise)
df = pd.read_csv(file, header=None)
# get file label and mean/variance values
label = os.path.split(os.path.splitext(os.path.normpath(file))[0])[1]
mvn = local_mvn.loc[label,:]
mean = defaultdict()
std = defaultdict()
feature = 1
for i in range(0,len(mvn),2): # set mean values for feature
mean[str(feature)] = mvn[i]
feature += 1
feature = 1
for i in range(1,len(mvn),2): # set variance values for feature
std[str(feature)] = mvn[i]
feature += 1
normalised = '{}.mvn'.format(os.path.splitext(os.path.normpath(file))[0])
with open(normalised, 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
for row in range(len(df)):
world_516 = [df.iloc[row,:][0]] # start with list containing label
for i in range(1,517):
value = df.iloc[row,:][i]
try:
value_prime = (value - mean[str(i)]) / std[str(i)]
except ZeroDivisionError: # guard against ap feature == 1 or f0/vuv == 0
value_prime = 0
world_516.append(value_prime)
wr.writerow(world_516) # mean/variance mormalisation
class BoxStandardise:
def __init__(self):
self.global_values = defaultdict()
def set_local_norms(self, csvs):
with open('box_local_norms.data', 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
# set header (column names) for 512 (sp) + 1 (ap) + 1 (f0) + 1 (vuv)
header = ['label']
for i in range(1,517):
header.append('{}_var'.format(i))
wr.writerow(header)
for file in csvs:
label = os.path.split(os.path.splitext(os.path.normpath(file))[0])[1]
# set initial default dicts for var values
local_values_var = defaultdict()
# iterate through and update var for WORLD features
df = pd.read_csv(file, header=None)
for feature in range(1,517): # 516 WORLD features
local_values_var[str(feature)] = np.var(df.iloc[:,feature][:])
row = [label]
for i in range(1,517):
row.append(local_values_var[str(i)])
wr.writerow(row)
def set_global_norms(self):
df = pd.read_csv('box_local_norms.data')
with open('box_global_norms.data', 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
# set header (column names) for 512 (sp) + 1 (ap) + 1 (f0) + 1 (vuv)
header = []
for i in range(1,517):
header.append('{}_var'.format(i))
wr.writerow(header)
# get global var values from local
global_values_var = defaultdict()
for i in range(1,517):
global_values_var[str(i)] = np.var(df['{}_var'.format(i)])
row = []
for i in range(1,517):
row.append(global_values_var[str(i)])
wr.writerow(row)
def get_global_norms(self):
df = pd.read_csv('box_global_norms.data')
header = []
for i in range(1,517):
header.append('{}_var'.format(i))
for i in header:
self.global_values[i] = np.float64(df.iloc[:,i][:])
def normalise_by_local_old(self, csvs):
local_box = pd.read_csv('box_local_norms.data')
local_box.set_index('label', inplace=True)
for file in csvs:
# read in data file (to normalise)
df = pd.read_csv(file, header=None)
# get file label and var values
label = os.path.split(os.path.splitext(os.path.normpath(file))[0])[1]
box = local_box.loc[label,:]
var = defaultdict()
for i in range(0,len(box)): # set var values for feature
var[str(i+1)] = box[i]
normalised = '{}.box'.format(os.path.splitext(os.path.normpath(file))[0])
with open(normalised, 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
for row in range(len(df)):
world_516 = [df.iloc[row,:][0]] # start with list containing label
for i in range(1,517):
value = df.iloc[row,:][i]
value_prime = (((value + 10e-7)**0.043) -1)/0.043 # lambdas as per https://danielsdiscoveries.wordpress.com/2017/09/29/spectrogram-input-normalisation-for-neural-networks/
world_516.append(value_prime)
wr.writerow(world_516) # boxcox standardisation
def normalise_by_local_new(self, csvs):
local_box = pd.read_csv('box_local_norms.data')
local_box.set_index('label', inplace=True)
for file in csvs:
# read in data file (to normalise)
df = pd.read_csv(file, header=None)
# get file label and var values
label = os.path.split(os.path.splitext(os.path.normpath(file))[0])[1]
box = local_box.loc[label,:]
var = defaultdict()
for i in range(0,len(box)): # set var values for feature
var[str(i+1)] = box[i]
normalised = '{}.box'.format(os.path.splitext(os.path.normpath(file))[0])
with open(normalised, 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
f0 = df.iloc[:,515].values
for index, value in enumerate(f0):
if value <= 0:
f0[index] = 10e-7 # smooth zero values for f0
df.iloc[:,515] = f0
vuv = df.iloc[:,516].values
for index, value in enumerate(vuv):
if value <= 0:
vuv[index] = 10e-7 # smooth zero values for vuv
df.iloc[:,516] = vuv
for column in range(1,517):
df.iloc[:,column] = stats.boxcox(df.iloc[:,column])[0] # in-place boxcox standardisation
for row in range(len(df)):
wr.writerow(df.iloc[row,:]) # boxcox standardisation
data = [f for f in os.listdir('.') if os.path.splitext(f)[1] == '.csv']
minmax = MinMaxNormalise()
minmax.set_local_norms(data)
minmax.set_global_norms()
minmax.normalise_by_local(data)
mvn = MVN()
mvn.set_local_norms(data)
mvn.set_global_norms()
mvn.normalise_by_local(data)
boxcox = BoxStandardise()
boxcox.set_local_norms(data)
boxcox.set_global_norms()
#boxcox.normalise_by_local_old(data)
boxcox.normalise_by_local_new(data) |
from django.forms import ModelForm
from django.core.exceptions import ValidationError
from school_list.models import Student,Teacher
class StudentForm(ModelForm):
class Meta:
model= Student
fields= ['s_name', 's_class', 's_dob', 's_address', 's_phonenum', 's_email']
class TeacherForm(ModelForm):
class Meta:
model= Teacher
fields= ['t_name', 't_subject', 't_dob', 't_address', 't_phonenum', 't_email'] |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple utility values, methods, and classes."""
import logging
import re
import time
from vtdb import dbexceptions
from vtdb import vtdb_logger
from vtproto import vtrpc_pb2
INITIAL_DELAY_MS = 5
NUM_RETRIES = 3
MAX_DELAY_MS = 100
BACKOFF_MULTIPLIER = 2
# This pattern is used in transient error messages to differentiate
# between a transient error and a throttling error.
throttler_err_re = re.compile(
r'exceeded (.*) quota, rate limiting', re.IGNORECASE)
def log_exception(exc, keyspace=None, tablet_type=None):
"""This method logs the exception.
Args:
exc: exception raised by calling code
keyspace: keyspace for the exception
tablet_type: tablet_type for the exception
"""
logger_object = vtdb_logger.get_logger()
shard_name = None
if isinstance(exc, dbexceptions.IntegrityError):
logger_object.integrity_error(exc)
else:
logger_object.vtclient_exception(keyspace, shard_name, tablet_type, exc)
def exponential_backoff_retry(
retry_exceptions,
initial_delay_ms=INITIAL_DELAY_MS,
num_retries=NUM_RETRIES,
backoff_multiplier=BACKOFF_MULTIPLIER,
max_delay_ms=MAX_DELAY_MS):
"""Decorator for exponential backoff retry.
Log and raise exception if unsuccessful.
Do not retry while in a session.
Args:
retry_exceptions: tuple of exceptions to check.
initial_delay_ms: initial delay between retries in ms.
num_retries: number max number of retries.
backoff_multiplier: multiplier for each retry e.g. 2 will double the
retry delay.
max_delay_ms: upper bound on retry delay.
Returns:
A decorator method that returns wrapped method.
"""
def decorator(method):
"""Returns wrapper that calls method and retries on retry_exceptions."""
def wrapper(self, *args, **kwargs):
attempt = 0
delay = initial_delay_ms
while True:
try:
return method(self, *args, **kwargs)
except retry_exceptions as e:
attempt += 1
if attempt > num_retries or self.session:
# In this case it is hard to discern keyspace
# and tablet_type from exception.
log_exception(e)
raise e
logging.error(
'retryable error: %s, retrying in %d ms, attempt %d of %d', e,
delay, attempt, num_retries)
time.sleep(delay/1000.0)
delay *= backoff_multiplier
delay = min(max_delay_ms, delay)
return wrapper
return decorator
class VitessError(Exception):
"""VitessError is raised by an RPC with a server-side application error.
VitessErrors have an error code and message.
The individual protocols are responsible for getting the code and message
from their protocol-specific encoding, and creating this error.
Then this error can be converted to the right dbexception.
"""
_errno_pattern = re.compile(r'\(errno (\d+)\)')
def __init__(self, method_name, code, message):
"""Initializes a VitessError from a code and message.
Args:
method_name: RPC method name, as a string, that was called.
code: integer that represents the error code. From vtrpc_pb2.Code.
message: string representation of the error.
"""
self.method_name = method_name
self.code = code
self.message = message
# Make self.args reflect the error components
super(VitessError, self).__init__(message, method_name, code)
def __str__(self):
"""Print the error nicely, converting the proto error enum to its name."""
return '%s returned %s with message: %s' % (
self.method_name, vtrpc_pb2.Code.Name(self.code), self.message)
def convert_to_dbexception(self, args):
"""Converts from a VitessError to the appropriate dbexceptions class.
Args:
args: argument tuple to use to create the new exception.
Returns:
An exception from dbexceptions.
"""
# FIXME(alainjobart): this is extremely confusing: self.message is only
# used for integrity errors, and nothing else. The other cases
# have to provide the message in the args.
if self.code == vtrpc_pb2.UNAVAILABLE:
if throttler_err_re.search(self.message):
return dbexceptions.ThrottledError(args)
return dbexceptions.TransientError(args)
if self.code == vtrpc_pb2.FAILED_PRECONDITION:
return dbexceptions.QueryNotServed(args)
if self.code == vtrpc_pb2.ALREADY_EXISTS:
# Prune the error message to truncate after the mysql errno, since
# the error message may contain the query string with bind variables.
msg = self.message.lower()
parts = self._errno_pattern.split(msg)
pruned_msg = msg[:msg.find(parts[2])]
new_args = (pruned_msg,) + tuple(args[1:])
return dbexceptions.IntegrityError(new_args)
if self.code == vtrpc_pb2.INVALID_ARGUMENT:
return dbexceptions.ProgrammingError(args)
return dbexceptions.DatabaseError(args)
def unique_join(str_list, delim='|'):
return delim.join(sorted(set(str(item) for item in str_list)))
def keyspace_id_prefix(packed_keyspace_id):
"""Return the first str byte of packed_keyspace_id if it exists."""
return '%02x' % ord(packed_keyspace_id[0])
def keyspace_id_prefixes(packed_keyspace_ids):
"""Return the first str byte of each packed_keyspace_id if it exists."""
return unique_join(keyspace_id_prefix(pkid) for pkid in packed_keyspace_ids)
def convert_exception_kwarg(key, value):
if value is None:
return key, value
if key in (
'entity_column_name',
'keyspace',
'num_queries',
'sql',
'tablet_type'):
return key, value
elif key == 'entity_keyspace_id_map':
return 'entity_keyspace_ids', keyspace_id_prefixes(
value.values())
elif key in (
'keyspace_ids',
'merged_keyspace_ids'):
return key, keyspace_id_prefixes(value)
elif key in (
'keyranges',
'keyspaces',
'sqls'):
return key, unique_join(value)
elif key in (
'not_in_transaction',
'as_transaction'):
return key, str(value)
else:
return key, 'unknown'
def convert_exception_kwargs(kwargs):
"""Convert kwargs into a readable str.
Args:
kwargs: A (str: value) dict.
Returns:
A comma-delimited string of converted, truncated key=value pairs.
All non-None kwargs are included in alphabetical order.
"""
new_kwargs = {}
for key, value in kwargs.iteritems():
new_key, new_value = convert_exception_kwarg(key, value)
new_kwargs[new_key] = new_value
return ', '.join(
('%s=%s' % (k, v))[:256]
for (k, v) in sorted(new_kwargs.iteritems())
if v is not None)
|
'''
author: Zitian(Daniel) Tong
date: 17:43 2019-05-25 2019
editor: PyCharm
email: danieltongubc@gmail.com
'''
from flask import Blueprint, render_template, request, url_for, redirect, session
from models.user import User, UserError
user_blueprint = Blueprint('users', __name__)
@user_blueprint.route('/register', methods=['GET', 'POST'])
def register_user():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
try:
if User.register_user(email, password):
session['email'] = email
return email
except UserError as e:
return e.message
return render_template('users/register.html') # send user an error if login was invalid
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login_user():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
try:
if User.is_login_valid(email,password):
session['email'] = email
return redirect(url_for('alerts.index'))
except UserError as e:
return e.message
return render_template('users/login.html')
@user_blueprint.route('/logout')
def logout_user():
session['email'] = None
return render_template('users/login.html')
|
#! /usr/bin/env python
"""
Normalizes a vidoe by dividing against it's background.
See: BackgroundExtractor.py to get the background of a video.
USING:
As a command line utility:
$ Normalizer.py input_video input_image output_video
As a module:
from Normalizer import Normalizer
norm = Normalizer("input_video.avi", input_image, "output_video.avi")
norm.normalize()
Author: Martin Humphreys
"""
from argparse import ArgumentParser
import numpy as np
import os
import cv2
class Normalizer:
def __init__(self):
pass
def imageFromArg(self, image):
if isinstance(image, (str, unicode)):
return cv2.imread(image, 0)
else:
return image
def videoReaderFromArg(self, video):
if isinstance(video, (str, unicode)):
vc = cv2.VideoCapture(video)
else:
vc = video
return vc
def normalize(self, background, in_video, out_video):
vc = self.videoReaderFromArg(in_video)
frames = int(vc.get(cv2.CAP_PROP_FRAME_COUNT))
fps = float(vc.get(cv2.CAP_PROP_FPS))
if fps == float('inf'):
fps = 300
width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = int(vc.get(cv2.CAP_PROP_FOURCC))
vw = cv2.VideoWriter(out_video, fourcc, fps, (width, height), False)
self.normalizeVideo(background, vc, vw)
def normalizeVideo(self, background, video_reader, video_writer):
f = 1
while(True):
ret, frame = video_reader.read()
if not ret:
break;
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
f += 1
normal_frame = self.normalizeFrame(background, frame)
video_writer.write(normal_frame)
def normalizeFrame(self, background, frame):
if callable(background):
bg = background(frame)
else:
bg = self.imageFromArg(background)
a = frame.astype('float')
a = self.transformRange(a, 0, 255, 1, 255)
b = bg.astype('float')
b = self.transformRange(b, 0, 255, 1, 255)
c = a/((b+1)/256)
d = c*(c < 255)+255*np.ones(np.shape(c))*(c > 255)
return d.astype('uint8')
def transformRange(self, value, oldmin, oldmax, newmin, newmax):
return (((value - oldmin) * (newmax - newmin)) / (oldmax - oldmin)) + newmin
def build_parser():
parser = ArgumentParser()
parser.add_argument('input_video', help='video to process')
parser.add_argument('background', help='background image')
parser.add_argument('output_video', help='file to save normalized video to')
return parser
def main():
parser = build_parser()
opts = parser.parse_args()
if not os.path.isfile(opts.input_video):
parser.error("Video file %s does not exist." % opts.input_video)
if not os.path.isfile(opts.background):
parser.error("Image file %s does not exist." % opts.background)
norm = Normalizer()
norm.normalize(opts.background, opts.input_video, opts.output_video)
if __name__ == '__main__':
main()
|
# Generated by Django 3.2.6 on 2021-08-02 12:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('social', '0003_comment'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='profile', serialize=False, to='auth.user', verbose_name='user')),
('name', models.CharField(blank=True, max_length=30, null=True)),
('bio', models.TextField(blank=True, max_length=500, null=True)),
('birth_date', models.DateField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=100, null=True)),
('picture', models.ImageField(blank=True, default='uploads/profile_picture/default.png', upload_to='uploads/profile_picture')),
],
options={
'verbose_name': 'Profile',
'verbose_name_plural': 'Profiles',
'ordering': ['name'],
},
),
]
|
import logging
import time
import os.path
from numpy.random import RandomState
import lasagne
from lasagne.updates import adam
from lasagne.objectives import categorical_crossentropy
from lasagne.nonlinearities import elu,softmax,identity
from hyperoptim.parse import cartesian_dict_of_lists_product,\
product_of_list_of_lists_of_dicts
from hyperoptim.util import save_npy_artifact, save_pkl_artifact
from braindecode.datasets.combined import CombinedCleanedSet
from braindecode.mywyrm.processing import resample_cnt, bandpass_cnt, exponential_standardize_cnt
from braindecode.datasets.cnt_signal_matrix import CntSignalMatrix
from braindecode.datasets.signal_processor import SignalProcessor
from braindecode.datasets.loaders import BCICompetition4Set2A
from braindecode.models.deep5 import Deep5Net
from braindecode.veganlasagne.layer_util import print_layers
from braindecode.datahandling.splitters import SeveralSetsSplitter
from braindecode.datahandling.batch_iteration import CntWindowTrialIterator
from braindecode.veganlasagne.layers import get_n_sample_preds
from braindecode.veganlasagne.monitors import CntTrialMisclassMonitor, LossMonitor, RuntimeMonitor
from braindecode.experiments.experiment import Experiment
from braindecode.veganlasagne.stopping import MaxEpochs, NoDecrease, Or
from braindecode.veganlasagne.update_modifiers import MaxNormConstraintWithDefaults
from braindecode.results.results import Result
from braindecode.configs.sacred.super_conf import * # PARENTCONFIG
log = logging.getLogger(__name__)
def get_templates():
lalalala_fn()
return {}
def get_grid_param_list():
dictlistprod = cartesian_dict_of_lists_product
default_params = [{
'save_folder': './data/models/sacred/paper/bcic-iv-2a/repl/',
'only_return_exp': False,
'n_chans': 22
}]
subject_folder_params = dictlistprod({
'subject_id': range(1,10),
'data_folder': ['/home/schirrmr/data/bci-competition-iv/2a/',]
})
grid_params = product_of_list_of_lists_of_dicts([
default_params,
subject_folder_params,
])
return grid_params
def sample_config_params(rng, params):
return params
def run(ex, data_folder, subject_id, n_chans,
only_return_exp,):
start_time = time.time()
assert (only_return_exp is False) or (n_chans is not None)
ex.info['finished'] = False
load_sensor_names = None
train_filename = 'A{:02d}T.mat'.format(subject_id)
test_filename = 'A{:02d}E.mat'.format(subject_id)
train_filepath = os.path.join(data_folder, train_filename)
test_filepath = os.path.join(data_folder, test_filename)
# trial ivan in milliseconds
# these are the samples that will be predicted, so for a
# network with 2000ms receptive field
# 1500 means the first receptive field goes from -500 to 1500
segment_ival = [1500,4000]
train_loader = BCICompetition4Set2A(train_filepath, load_sensor_names=load_sensor_names)
test_loader = BCICompetition4Set2A(test_filepath, load_sensor_names=load_sensor_names)
# Preprocessing pipeline in [(function, {args:values)] logic
cnt_preprocessors = [
(resample_cnt , {'newfs': 250.0}),
(bandpass_cnt, {
'low_cut_hz': 0,
'high_cut_hz': 38,
}),
(exponential_standardize_cnt, {})
]
marker_def = {'1- Right Hand': [1], '2 - Left Hand': [2], '3 - Rest': [3],
'4 - Feet': [4]}
train_signal_proc = SignalProcessor(set_loader=train_loader,
segment_ival=segment_ival,
cnt_preprocessors=cnt_preprocessors,
marker_def=marker_def)
train_set = CntSignalMatrix(signal_processor=train_signal_proc, sensor_names='all')
test_signal_proc = SignalProcessor(set_loader=test_loader,
segment_ival=segment_ival,
cnt_preprocessors=cnt_preprocessors,
marker_def=marker_def)
test_set = CntSignalMatrix(signal_processor=test_signal_proc, sensor_names='all')
from braindecode.mywyrm.clean import MaxAbsCleaner
train_cleaner = MaxAbsCleaner(segment_ival=[0,4000], threshold=800, marker_def=marker_def)
test_cleaner = MaxAbsCleaner(segment_ival=[0,4000], threshold=800, marker_def=marker_def)
combined_set = CombinedCleanedSet(train_set, test_set,train_cleaner, test_cleaner)
if not only_return_exp:
combined_set.load()
in_chans = train_set.get_topological_view().shape[1]
input_time_length = 1000 # implies how many crops are processed in parallel, does _not_ determine receptive field size
# receptive field size is determined by model architecture
num_filters_time = 25
filter_time_length = 10
num_filters_spat = 25
pool_time_length = 3
pool_time_stride = 3
num_filters_2 = 50
filter_length_2 = 10
num_filters_3 = 100
filter_length_3 = 10
num_filters_4 = 200
filter_length_4 = 10
final_dense_length = 2
n_classes = 4
final_nonlin=softmax
first_nonlin=elu
first_pool_mode='max'
first_pool_nonlin=identity
later_nonlin=elu
later_pool_mode='max'
later_pool_nonlin=identity
drop_in_prob=0.0
drop_prob=0.5
batch_norm_alpha=0.1
double_time_convs=False
split_first_layer=True
batch_norm=True
# ensure reproducibility by resetting lasagne/theano random generator
lasagne.random.set_rng(RandomState(34734))
d5net = Deep5Net(in_chans=in_chans, input_time_length=input_time_length, num_filters_time=num_filters_time,
filter_time_length=filter_time_length,
num_filters_spat=num_filters_spat, pool_time_length=pool_time_length, pool_time_stride=pool_time_stride,
num_filters_2=num_filters_2, filter_length_2=filter_length_2,
num_filters_3=num_filters_3, filter_length_3=filter_length_3,
num_filters_4=num_filters_4, filter_length_4=filter_length_4,
final_dense_length=final_dense_length, n_classes=n_classes,
final_nonlin=final_nonlin, first_nonlin=first_nonlin,
first_pool_mode=first_pool_mode, first_pool_nonlin=first_pool_nonlin,
later_nonlin=later_nonlin, later_pool_mode=later_pool_mode, later_pool_nonlin=later_pool_nonlin,
drop_in_prob=drop_in_prob, drop_prob=drop_prob, batch_norm_alpha=batch_norm_alpha,
double_time_convs=double_time_convs, split_first_layer=split_first_layer, batch_norm=batch_norm)
final_layer = d5net.get_layers()[-1]
print_layers(final_layer)
dataset_splitter = SeveralSetsSplitter(valid_set_fraction=0.2, use_test_as_valid=False)
iterator = CntWindowTrialIterator(batch_size=45,input_time_length=input_time_length,
n_sample_preds=get_n_sample_preds(final_layer))
monitors = [LossMonitor(), CntTrialMisclassMonitor(input_time_length=input_time_length), RuntimeMonitor()]
#debug: n_no_decrease_max_epochs = 2
#debug: n_max_epochs = 4
n_no_decrease_max_epochs = 80
n_max_epochs = 800#100
# real values for paper were 80 and 800
stop_criterion = Or([NoDecrease('valid_misclass', num_epochs=n_no_decrease_max_epochs),
MaxEpochs(num_epochs=n_max_epochs)])
dataset = combined_set
splitter = dataset_splitter
loss_expression = categorical_crossentropy
updates_expression = adam
updates_modifier = MaxNormConstraintWithDefaults({})
remember_best_chan = 'valid_misclass'
run_after_early_stop=True
exp = Experiment(final_layer, dataset,splitter,None,iterator, loss_expression,updates_expression, updates_modifier, monitors,
stop_criterion, remember_best_chan, run_after_early_stop, batch_modifier=None)
if only_return_exp:
return exp
exp.setup()
exp.run()
end_time = time.time()
run_time = end_time - start_time
ex.info['finished'] = True
ex.info['runtime'] = run_time
for key in exp.monitor_chans:
ex.info[key] = exp.monitor_chans[key][-1]
save_pkl_artifact(ex, exp.monitor_chans, 'monitor_chans.pkl')
|
import warnings
from typing import Dict, Tuple, Union
import numpy as np
from phiml.math import DimFilter
from phi import math
from ._geom import Geometry, _keep_vector
from phiml.math import wrap, INF, Shape, channel, spatial, copy_with, Tensor
from phiml.math._shape import parse_dim_order
from phiml.math.magic import slicing_dict
class BaseBox(Geometry): # not a Subwoofer
"""
Abstract base type for box-like geometries.
"""
def __eq__(self, other):
raise NotImplementedError()
def __hash__(self):
raise NotImplementedError()
def __ne__(self, other):
return not self == other
@property
def shape(self):
raise NotImplementedError()
@property
def center(self) -> Tensor:
raise NotImplementedError()
def at(self, center: Tensor) -> 'BaseBox':
return Cuboid(center, self.half_size)
@property
def size(self) -> Tensor:
raise NotImplementedError(self)
@property
def half_size(self) -> Tensor:
raise NotImplementedError(self)
@property
def lower(self) -> Tensor:
raise NotImplementedError(self)
@property
def upper(self) -> Tensor:
raise NotImplementedError(self)
@property
def volume(self) -> Tensor:
return math.prod(self.size, 'vector')
@property
def shape_type(self) -> Tensor:
return math.tensor('B')
def bounding_radius(self):
return math.vec_length(self.half_size)
def bounding_half_extent(self):
return self.size * 0.5
def global_to_local(self, global_position: Tensor) -> Tensor:
if math.close(self.lower, 0):
return global_position / self.size
else:
return (global_position - self.lower) / self.size
def local_to_global(self, local_position):
return local_position * self.size + self.lower
def lies_inside(self, location):
bool_inside = (location >= self.lower) & (location <= self.upper)
bool_inside = math.all(bool_inside, 'vector')
bool_inside = math.any(bool_inside, self.shape.instance) # union for instance dimensions
return bool_inside
def approximate_signed_distance(self, location: Union[Tensor, tuple]):
"""
Computes the signed L-infinity norm (manhattan distance) from the location to the nearest side of the box.
For an outside location `l` with the closest surface point `s`, the distance is `max(abs(l - s))`.
For inside locations it is `-max(abs(l - s))`.
Args:
location: float tensor of shape (batch_size, ..., rank)
Returns:
float tensor of shape (*location.shape[:-1], 1).
"""
center = 0.5 * (self.lower + self.upper)
extent = self.upper - self.lower
distance = math.abs(location - center) - extent * 0.5
distance = math.max(distance, 'vector')
distance = math.min(distance, self.shape.instance) # union for instance dimensions
return distance
def push(self, positions: Tensor, outward: bool = True, shift_amount: float = 0) -> Tensor:
loc_to_center = positions - self.center
sgn_dist_from_surface = math.abs(loc_to_center) - self.half_size
if outward:
# --- get negative distances (particles are inside) towards the nearest boundary and add shift_amount ---
distances_of_interest = (sgn_dist_from_surface == math.max(sgn_dist_from_surface, 'vector')) & (sgn_dist_from_surface < 0)
shift = distances_of_interest * (sgn_dist_from_surface - shift_amount)
else:
shift = (sgn_dist_from_surface + shift_amount) * (sgn_dist_from_surface > 0) # get positive distances (particles are outside) and add shift_amount
shift = math.where(math.abs(shift) > math.abs(loc_to_center), math.abs(loc_to_center), shift) # ensure inward shift ends at center
return positions + math.where(loc_to_center < 0, 1, -1) * shift
def project(self, *dimensions: str):
""" Project this box into a lower-dimensional space. """
warnings.warn("Box.project(dims) is deprecated. Use Box.vector[dims] instead", DeprecationWarning, stacklevel=2)
return self.vector[dimensions]
def sample_uniform(self, *shape: math.Shape) -> Tensor:
uniform = math.random_uniform(self.shape.non_singleton, *shape, math.channel(vector=self.spatial_rank))
return self.lower + uniform * self.size
def corner_representation(self) -> 'Box':
return Box(self.lower, self.upper)
box = corner_representation
def center_representation(self) -> 'Cuboid':
return Cuboid(self.center, self.half_size)
def contains(self, other: 'BaseBox'):
""" Tests if the other box lies fully inside this box. """
return np.all(other.lower >= self.lower) and np.all(other.upper <= self.upper)
def rotated(self, angle) -> Geometry:
from ._transform import rotate
return rotate(self, angle)
def scaled(self, factor: Union[float, Tensor]) -> 'Geometry':
return Cuboid(self.center, self.half_size * factor)
class BoxType(type):
""" Deprecated. Does not support item names. """
def __getitem__(self, item):
assert isinstance(item, tuple) and isinstance(item[0], str), "The Box constructor was updated in Φ-Flow version 2.2. Please add the dimension order as a comma-separated string as the first argument, e.g. Box['x,y', 0:1, 1:2] or use the kwargs constructor Box(x=1, y=(1, 2))"
assert len(item) <= 4, f"Box[...] can only be used for x, y, z but got {len(item)} elements"
dim_order = parse_dim_order(item[0])
assert len(dim_order) == len(item) - 1, f"Dimension order '{item[0]}' does not match number of slices, {len(item) - 1}"
lower = []
upper = []
for dim_name, dim in zip(dim_order, item[1:]):
assert isinstance(dim, slice)
assert dim.step is None or dim.step == 1, "Box: step must be 1 but is %s" % dim.step
lower.append(dim.start if dim.start is not None else -np.inf)
upper.append(dim.stop if dim.stop is not None else np.inf)
vec = math.channel(vector=dim_order)
lower = math.stack(lower, vec)
upper = math.stack(upper, vec)
return Box(lower, upper)
class Box(BaseBox, metaclass=BoxType):
"""
Simple cuboid defined by location of lower and upper corner in physical space.
Boxes can be constructed either from two positional vector arguments `(lower, upper)` or by specifying the limits by dimension name as `kwargs`.
Examples:
>>> Box(x=1, y=1) # creates a two-dimensional unit box with `lower=(0, 0)` and `upper=(1, 1)`.
>>> Box(x=(None, 1), y=(0, None) # creates a Box with `lower=(-inf, 0)` and `upper=(1, inf)`.
The slicing constructor was updated in version 2.2 and now requires the dimension order as the first argument.
>>> Box['x,y', 0:1, 0:1] # creates a two-dimensional unit box with `lower=(0, 0)` and `upper=(1, 1)`.
>>> Box['x,y', :1, 0:] # creates a Box with `lower=(-inf, 0)` and `upper=(1, inf)`.
"""
def __init__(self, lower: Tensor = None, upper: Tensor = None, **size: Union[int, Tensor, tuple, list]):
"""
Args:
lower: physical location of lower corner
upper: physical location of upper corner
**size: Specify size by dimension, either as `int` or `tuple` containing (lower, upper).
"""
if lower is not None:
assert isinstance(lower, Tensor), f"lower must be a Tensor but got {type(lower)}"
assert 'vector' in lower.shape, "lower must have a vector dimension"
assert lower.vector.item_names is not None, "vector dimension of lower must list spatial dimension order"
self._lower = lower
if upper is not None:
assert isinstance(upper, Tensor), f"upper must be a Tensor but got {type(upper)}"
assert 'vector' in upper.shape, "lower must have a vector dimension"
assert upper.vector.item_names is not None, "vector dimension of lower must list spatial dimension order"
self._upper = upper
else:
lower = []
upper = []
for item in size.values():
if isinstance(item, (tuple, list)):
assert len(item) == 2, f"Box kwargs must be either dim=upper or dim=(lower,upper) but got {item}"
lo, up = item
lower.append(lo)
upper.append(up)
elif item is None:
lower.append(-INF)
upper.append(INF)
else:
lower.append(0)
upper.append(item)
lower = [-INF if l is None else l for l in lower]
upper = [INF if u is None else u for u in upper]
self._upper = math.wrap(upper, math.channel(vector=tuple(size.keys())))
self._lower = math.wrap(lower, math.channel(vector=tuple(size.keys())))
vector_shape = self._lower.shape & self._upper.shape
self._lower = math.expand(self._lower, vector_shape)
self._upper = math.expand(self._upper, vector_shape)
if self.size.vector.item_names is None:
warnings.warn("Creating a Box without item names prevents certain operations like project()", DeprecationWarning, stacklevel=2)
def __getitem__(self, item):
item = _keep_vector(slicing_dict(self, item))
return Box(self._lower[item], self._upper[item])
@staticmethod
def __stack__(values: tuple, dim: Shape, **kwargs) -> 'Geometry':
if all(isinstance(v, Box) for v in values):
return NotImplemented # stack attributes
else:
return Geometry.__stack__(values, dim, **kwargs)
def __eq__(self, other):
if self._lower is None and self._upper is None:
return isinstance(other, Box)
return isinstance(other, BaseBox)\
and set(self.shape) == set(other.shape)\
and self.size.shape.get_size('vector') == other.size.shape.get_size('vector')\
and math.close(self._lower, other.lower)\
and math.close(self._upper, other.upper)
def without(self, dims: Tuple[str, ...]):
remaining = list(self.shape.get_item_names('vector'))
for dim in dims:
if dim in remaining:
remaining.remove(dim)
return self.vector[remaining]
def largest(self, dim: DimFilter) -> 'Box':
dim = self.shape.without('vector').only(dim)
if not dim:
return self
return Box(math.min(self._lower, dim), math.max(self._upper, dim))
def __hash__(self):
return hash(self._upper)
def __variable_attrs__(self):
return '_lower', '_upper'
@property
def shape(self):
if self._lower is None or self._upper is None:
return None
return self._lower.shape & self._upper.shape
@property
def lower(self):
return self._lower
@property
def upper(self):
return self._upper
@property
def size(self):
return self.upper - self.lower
@property
def center(self):
return 0.5 * (self.lower + self.upper)
@property
def half_size(self):
return self.size * 0.5
def shifted(self, delta, **delta_by_dim):
return Box(self.lower + delta, self.upper + delta)
def __mul__(self, other):
if not isinstance(other, Box):
return NotImplemented
lower = self._lower.vector.unstack(self.spatial_rank) + other._lower.vector.unstack(other.spatial_rank)
upper = self._upper.vector.unstack(self.spatial_rank) + other._upper.vector.unstack(other.spatial_rank)
names = self._upper.vector.item_names + other._upper.vector.item_names
lower = math.stack(lower, math.channel(vector=names))
upper = math.stack(upper, math.channel(vector=names))
return Box(lower, upper)
def __repr__(self):
if self.shape.non_channel.volume == 1:
item_names = self.size.vector.item_names
if item_names:
return f"Box({', '.join([f'{dim}=({lo}, {up})' for dim, lo, up in zip(item_names, self._lower, self._upper)])})"
else: # deprecated
return 'Box[%s at %s]' % ('x'.join([str(x) for x in self.size.numpy().flatten()]), ','.join([str(x) for x in self.lower.numpy().flatten()]))
else:
return f'Box[shape={self.shape}]'
class Cuboid(BaseBox):
"""
Box specified by center position and half size.
"""
def __init__(self,
center: Tensor = 0,
half_size: Union[float, Tensor] = None,
**size: Union[float, Tensor]):
if half_size is not None:
assert isinstance(half_size, Tensor), "half_size must be a Tensor"
assert 'vector' in half_size.shape, f"Cuboid size must have a 'vector' dimension."
assert half_size.shape.get_item_names('vector') is not None, f"Vector dimension must list spatial dimensions as item names. Use the syntax Cuboid(x=x, y=y) to assign names."
self._half_size = half_size
else:
self._half_size = math.wrap(tuple(size.values()), math.channel(vector=tuple(size.keys()))) * 0.5
center = wrap(center)
if 'vector' not in center.shape or center.shape.get_item_names('vector') is None:
center = math.expand(center, channel(self._half_size))
self._center = center
def __eq__(self, other):
if self._center is None and self._half_size is None:
return isinstance(other, Cuboid)
return isinstance(other, BaseBox)\
and set(self.shape) == set(other.shape)\
and math.close(self._center, other.center)\
and math.close(self._half_size, other.half_size)
def __hash__(self):
return hash(self._center)
def __repr__(self):
return f"Cuboid(center={self._center}, half_size={self._half_size})"
def __getitem__(self, item):
item = _keep_vector(slicing_dict(self, item))
return Cuboid(self._center[item], self._half_size[item])
@staticmethod
def __stack__(values: tuple, dim: Shape, **kwargs) -> 'Geometry':
if all(isinstance(v, Cuboid) for v in values):
return Cuboid(math.stack([v.center for v in values], dim, **kwargs), math.stack([v.half_size for v in values], dim, **kwargs))
else:
return Geometry.__stack__(values, dim, **kwargs)
def __variable_attrs__(self):
return '_center', '_half_size'
@property
def center(self):
return self._center
@property
def half_size(self):
return self._half_size
@property
def shape(self):
if self._center is None or self._half_size is None:
return None
return self._center.shape & self._half_size.shape
@property
def size(self):
return 2 * self.half_size
@property
def lower(self):
return self.center - self.half_size
@property
def upper(self):
return self.center + self.half_size
def shifted(self, delta, **delta_by_dim) -> 'Cuboid':
return Cuboid(self._center + delta, self._half_size)
def bounding_box(geometry):
center = geometry.center
extent = geometry.bounding_half_extent()
return Box(lower=center - extent, upper=center + extent)
class GridCell(BaseBox):
"""
An instance of GridCell represents all cells of a regular grid as a batch of boxes.
"""
def __init__(self, resolution: math.Shape, bounds: BaseBox):
assert resolution.spatial_rank == resolution.rank, f"resolution must be purely spatial but got {resolution}"
assert resolution.spatial_rank == bounds.spatial_rank, f"bounds must match dimensions of resolution but got {bounds} for resolution {resolution}"
assert set(bounds.vector.item_names) == set(resolution.names)
self._resolution = resolution.only(bounds.vector.item_names, reorder=True)
self._bounds = bounds
self._shape = self._resolution & bounds.shape.non_spatial
@property
def resolution(self):
return self._resolution
@property
def bounds(self):
return self._bounds
@property
def spatial_rank(self) -> int:
return self._resolution.spatial_rank
@property
def center(self):
local_coords = math.meshgrid(**{dim.name: math.linspace(0.5 / dim.size, 1 - 0.5 / dim.size, dim) for dim in self.resolution})
points = self.bounds.local_to_global(local_coords)
return points
@property
def grid_size(self):
return self._bounds.size
@property
def size(self):
return self.bounds.size / math.wrap(self.resolution.sizes)
@property
def dx(self):
return self.bounds.size / self.resolution
@property
def lower(self):
return self.center - self.half_size
@property
def upper(self):
return self.center + self.half_size
@property
def half_size(self):
return self.bounds.size / self.resolution.sizes / 2
def __getitem__(self, item):
item = slicing_dict(self, item)
bounds = self._bounds
dx = self.size
gather_dict = {}
for dim, selection in item.items():
if dim in self._resolution:
if isinstance(selection, int):
start = selection
stop = selection + 1
elif isinstance(selection, slice):
start = selection.start or 0
if start < 0:
start += self.resolution.get_size(dim)
stop = selection.stop or self.resolution.get_size(dim)
if stop < 0:
stop += self.resolution.get_size(dim)
assert selection.step is None or selection.step == 1
else:
raise ValueError(f"Illegal selection: {item}")
dim_mask = math.wrap(self.resolution.mask(dim))
lower = bounds.lower + start * dim_mask * dx
upper = bounds.upper + (stop - self.resolution.get_size(dim)) * dim_mask * dx
bounds = Box(lower, upper)
gather_dict[dim] = slice(start, stop)
resolution = self._resolution.after_gather(gather_dict)
return GridCell(resolution, bounds[{d: s for d, s in item.items() if d != 'vector'}])
def __pack_dims__(self, dims: Tuple[str, ...], packed_dim: Shape, pos: Union[int, None], **kwargs) -> 'Cuboid':
return math.pack_dims(self.center_representation(), dims, packed_dim, pos, **kwargs)
@staticmethod
def __stack__(values: tuple, dim: Shape, **kwargs) -> 'Geometry':
from ._stack import GeometryStack
return GeometryStack(math.layout(values, dim))
def list_cells(self, dim_name):
center = math.pack_dims(self.center, self._shape.spatial.names, dim_name)
return Cuboid(center, self.half_size)
def stagger(self, dim: str, lower: bool, upper: bool):
dim_mask = np.array(self.resolution.mask(dim))
unit = self.bounds.size / self.resolution * dim_mask
bounds = Box(self.bounds.lower + unit * (-0.5 if lower else 0.5), self.bounds.upper + unit * (0.5 if upper else -0.5))
ext_res = self.resolution.sizes + dim_mask * (int(lower) + int(upper) - 1)
return GridCell(self.resolution.with_sizes(ext_res), bounds)
def padded(self, widths: dict):
resolution, bounds = self.resolution, self.bounds
for dim, (lower, upper) in widths.items():
masked_dx = self.dx * math.dim_mask(self.resolution, dim)
resolution = resolution.with_dim_size(dim, self.resolution.get_size(dim) + lower + upper)
bounds = Box(bounds.lower - masked_dx * lower, bounds.upper + masked_dx * upper)
return GridCell(resolution, bounds)
# def face_centers(self, staggered_name='staggered'):
# face_centers = [self.extend_symmetric(dim).center for dim in self.shape.spatial.names]
# return math.channel_stack(face_centers, staggered_name)
@property
def shape(self):
return self._shape
def shifted(self, delta: Tensor, **delta_by_dim) -> BaseBox:
# delta += math.padded_stack()
if delta.shape.spatial_rank == 0:
return GridCell(self.resolution, self.bounds.shifted(delta))
else:
center = self.center + delta
return Cuboid(center, self.half_size)
def rotated(self, angle) -> Geometry:
raise NotImplementedError("Grids cannot be rotated. Use center_representation() to convert it to Cuboids first.")
def __eq__(self, other):
return isinstance(other, GridCell) and self._bounds == other._bounds and self._resolution == other._resolution
def shallow_equals(self, other):
return self == other
def __hash__(self):
return hash(self._resolution) + hash(self._bounds)
def __repr__(self):
return f"{self._resolution}, bounds={self._bounds}"
def __variable_attrs__(self):
return '_center', '_half_size'
def __with_attrs__(self, **attrs):
return copy_with(self.center_representation(), **attrs)
@property
def _center(self):
return self.center
@property
def _half_size(self):
return self.half_size
|
import numpy as np
class Neural_Network(object):
def __init__(self, architecture, error, indepdata, depdata):
self.index = indepdata.index.values
self.indepData = np.mat(np.atleast_2d(indepdata))
self.depData = np.mat(np.atleast_2d(depdata))
self.error = error[0]
self.check =[]
self.architecture = architecture
self.architecture[0] = ["Identity", int(self.indepData.shape[1])]
self.architecture[len(self.architecture)] = [error[1], int(self.depData.shape[1])]
print "\n"
print "Setting ANN with the following structure: {Layer : [Activation, Nodes]}"
print self.architecture
print "\n"
def initlayer(self):
self.layers = {}
# Add Hidden Layers
for x in range(1, len(self.architecture) - 1):
layer = self.architecture[x][0]
exec ("self.layers[x] = %s(self.architecture[x][1],False)" % layer)
# Add Output Layers
x = len(self.architecture) - 1
layer = self.architecture[x][0]
exec ("self.layers[x] = %s(self.architecture[x][1], True)" % layer)
return self.layers
|
def reverse_string(string):
string = str()
if string == string[::-1]:
return "true"
elif string == "":
return 'None'
else:
return string[::-1]
|
from django.urls import path, include
from .views import (
ProductSearchListView,
ProductDetailView,
ProductListView,
product_by_timestamp,
)
app_name = 'products'
prod_class_patterns = ([
path('', ProductListView.as_view(), name='all'),
path('<sex>/', ProductListView.as_view(), name='sex'),
path('<sex>/<category>', ProductListView.as_view(), name='category'),
], 'p_class')
urlpatterns = [
path('search/', ProductSearchListView.as_view(), name='search_list'),
path('new/', product_by_timestamp, name='new'),
path('<prod_class>/', include(prod_class_patterns)),
]
|
# use dictionary to define graph structure
graph = {
'S' : ['A', 'D'],
'A' : ['D', 'B'],
'B' : ['C', 'E'],
'C' : [],
'D' : ['E'],
'E': ['B', 'F'],
'F': ['G'],
'G': []
}
visited = []
queue = []
def bfs(visited, graph, node):
visited.append(node)
queue.append(node)
while queue:
s = queue.pop(0)
print(s)
for neighbor in graph[s]:
if neighbor not in visited:
visited.append(neighbor)
queue.append(neighbor)
def main():
bfs(visited, graph, 'S')
main() |
d = float(input("enter Đường Kính "))
s = d*3.14
print("Area of Circle is",s,"m2")
|
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db.models import Q, Avg
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader, RequestContext
import random, string, re
from web.forms.admin import *
from web.models import Category, User, VoteCategory, Submission
@login_required()
def batch_add(request):
if not request.user.is_staff:
return HttpResponseRedirect(reverse('home'))
if request.method == 'POST':
form = BatchAddUsersForm(request.POST, error_class=PlainErrorList)
if form.is_valid():
users_added = 0
for u in form.cleaned_data['users'].split('\n'):
u = u.strip()
if u == '': continue
user_search = User.objects.filter( Q(email=u) | Q(username=u) )
if not re.match('[a-z]*@umich\.edu', u) or len(user_search) > 0:
messages.warning(request, 'Could not add ' + u + '. Username or email are already in the database, or email is not uniqname@umich.edu.')
continue
password = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for x in range(10))
user = User.objects.create_user(email=u, username=u)
user.set_password(password)
user.save()
users_added += 1
send_mail('KnoAtom New Account', 'You have been registered at knoatom.eecs.umich.edu. Your information is as follows:\n\nUsername: ' + u + '\nPassword: ' + password + '\n\nPlease login and change your password as soon as you can (click on your username at the bottom of the left sidebar).\n\nThank you\n\n-- The Management', 'knoatom-webmaster@umich.edu', [u, 'knoatom-webmaster@umich.edu'])
messages.success(request, str(users_added) + ' users have been added.')
else:
messages.warning(request, 'Could not add users. Did you have the format correct?')
else:
form = BatchAddUsersForm(error_class=PlainErrorList)
t = loader.get_template('admin/batch_add.html')
c = RequestContext(request, {
'breadcrumbs': [{'url': reverse('home'), 'title': 'Home'}, {'url':reverse('batch_add'), 'title': 'Batch Add'}],
'form': form,
'parent_categories': Category.objects.filter(parent=None),
})
return HttpResponse(t.render(c))
@login_required()
def list_videos(request):
if not request.user.is_staff:
return HttpResponseRedirect(reverse('home'))
top_ranked_videos = []
for category in VoteCategory.objects.all():
# for now, calculate an average for each video
top_ranked_videos.append({
'vote_category': category,
'submissions': Submission.objects.filter(votes__v_category=category).annotate(average_rating=Avg('votes__rating')).order_by('-average_rating'),
})
t = loader.get_template('admin/videos.html')
c = RequestContext(request, {
'breadcrumbs': [{'url': reverse('home'), 'title': 'Home'}, {'url':reverse('list_videos'), 'title': 'All Videos'}],
'top_ranked_videos': top_ranked_videos,
'parent_categories': Category.objects.filter(parent=None),
})
return HttpResponse(t.render(c))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from torch.autograd import Variable as Var
from torch.optim import Adam
from torch.utils.data import DataLoader, TensorDataset
from models.attention import RawEmbeddingLayer
from utils import *
import numpy as np
# Run on gpu is present
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Embedding layer that has a lookup table of symbols that is [full_dict_size x input_dim]. Includes dropout.
# Works for both non-batched and batched inputs
class EmbeddingLayer(nn.Module):
# Parameters: dimension of the word embeddings, number of words, and the dropout rate to apply
# (0.2 is often a reasonable value)
def __init__(self, word_vectors, embedding_dropout_rate):
super(EmbeddingLayer, self).__init__()
self.dropout = nn.Dropout(embedding_dropout_rate)
self.word_embedding = nn.Embedding.from_pretrained(torch.from_numpy(word_vectors.vectors).float(), False)
self.word_vectors = word_vectors
def forward(self, input):
try:
embedded_words = self.word_embedding(input)
except:
print(len(self.word_vectors.word_indexer))
for i in input:
for j in i:
print(j)
final_embeddings = self.dropout(embedded_words)
return final_embeddings
# One-layer RNN encoder for batched inputs -- handles multiple sentences at once. You're free to call it with a
# leading dimension of 1 (batch size 1) but it does expect this dimension.
class RNNEncoder(nn.Module):
# Parameters: input size (should match embedding layer), hidden size for the LSTM, dropout rate for the RNN,
# and a boolean flag for whether or not we're using a bidirectional encoder
def __init__(self, input_size, hidden_size, output_size, dropout, bidirect=True):
super(RNNEncoder, self).__init__()
self.bidirect = bidirect
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.reduce_h_W = nn.Linear(hidden_size * 2, hidden_size, bias=True)
self.reduce_c_W = nn.Linear(hidden_size * 2, hidden_size, bias=True)
self.rnn = nn.LSTM(input_size, hidden_size, num_layers=1, batch_first=True,
dropout=dropout, bidirectional=self.bidirect)
self.hiddenToLabel = nn.Linear(hidden_size, self.output_size)
self.init_weight()
# Initializes weight matrices using Xavier initialization
def init_weight(self):
nn.init.xavier_uniform_(self.rnn.weight_hh_l0, gain=1)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0, gain=1)
if self.bidirect:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0_reverse, gain=1)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0_reverse, gain=1)
nn.init.constant_(self.rnn.bias_hh_l0, 0)
nn.init.constant_(self.rnn.bias_ih_l0, 0)
if self.bidirect:
nn.init.constant_(self.rnn.bias_hh_l0_reverse, 0)
nn.init.constant_(self.rnn.bias_ih_l0_reverse, 0)
nn.init.xavier_uniform_(self.hiddenToLabel.weight)
def get_output_size(self):
return self.hidden_size * 2 if self.bidirect else self.hidden_size
def sent_lens_to_mask(self, lens, max_length):
return torch.from_numpy(np.asarray(
[[1 if j < lens.data[i].item() else 0 for j in range(0, max_length)] for i in range(0, lens.shape[0])]))
# embedded_words should be a [batch size x sent len x input dim] tensor
# input_lens is a tensor containing the length of each input sentence
# Returns output (each word's representation), context_mask (a mask of 0s and 1s
# reflecting where the model's output should be considered), and h_t, a *tuple* containing
# the final states h and c from the encoder for each sentence.
def forward(self, embedded_words, input_lens):
# Takes the embedded sentences, "packs" them into an efficient Pytorch-internal representation
packed_embedding = nn.utils.rnn.pack_padded_sequence(embedded_words, input_lens, batch_first=True)
# Runs the RNN over each sequence. Returns output at each position as well as the last vectors of the RNN
# state for each sentence (first/last vectors for bidirectional)
output, hn = self.rnn(packed_embedding)
# Unpacks the Pytorch representation into normal tensors
output, sent_lens = nn.utils.rnn.pad_packed_sequence(output)
# Grabs the encoded representations out of hn, which is a weird tuple thing.
# Note: if you want multiple LSTM layers, you'll need to change this to consult the penultimate layer
# or gather representations from all layers.
if self.bidirect:
h, c = hn[0], hn[1]
# Grab the representations from forward and backward LSTMs
h_, c_ = torch.cat((h[0], h[1]), dim=1), torch.cat((c[0], c[1]), dim=1)
# Reduce them by multiplying by a weight matrix so that the hidden size sent to the decoder is the same
# as the hidden size in the encoder
new_h = self.reduce_h_W(h_)
new_c = self.reduce_c_W(c_)
h_t = (new_h, new_c)
else:
h, c = hn[0][0], hn[1][0]
h_t = (h, c)
labels = self.hiddenToLabel(h_t[0])
probs = F.log_softmax(labels[0], dim=0)
return (probs, h_t)
# American authors
# Average accuracy: .416 with 10 passages/book/author, 4 authors (old test)
# Average accuracy: .583 with 30 passages/book/author, 4 authors (old test)
# Average accuracy: .675 with 50 passages/book/author, 4 authors (old test)
# British authors
# Average accuracy: .333 with 30 passages/book/author, 5 authors (old test)
# Average accuracy: .425 with 30 passages/book/author, 5 authors (old test)
# Combined authors
# Average accuracy: .264 with 10 passages/book/author, 9 authors (old test)
# Average accuracy: .2745 with 30 passages/book/author, 9 authors (old test)
# ----------------------------------------------------------
# British Authors
# Average accuracy: .196 with 30 passages/book/author, 5 authors (new test)
# Average accuracy: 687/2400 = .286 with 200 sentences/book/author, 5 authors (new test)
# Average accuracy: 1432/4000 = .358 with 400 sentences/book/author, 5 authors (new test)
# Average accuracy: 642/2000 = .286 with 200 sentences/book/author, 5 authors (new test) WITH POS EMBEDDINGS (1 gram)
# Average accuracy: 1327/4000 = .332 with 400 sentences/book/author, 5 authors (new test) WITH POS EMBEDDINGS (1 gram)
#-------------------------------------------------------------
# SPOOKY Dataset (70-30 split)
# One run, Glove word embeddings. 4411 / 5827 = 0.75699 with 10 epochs
# REUTERS:
# Correctness: 140/150 -> 0.9333333333333333
class LSTMTrainedModel(AuthorshipModel):
def __init__(self, model, model_emb, indexer, authors, history=None):
# Add any args you need here
self.model = model
self.model_emb = model_emb
self.word_indexer = indexer
self.authors = authors
self.history = history
def _predictions(self, test_data, args):
predictions = []
test_data.sort(key=lambda ex: len(word_tokenize(ex.passage)), reverse=True)
with torch.no_grad():
self.model.eval()
self.model_emb.eval()
input_lens = torch.LongTensor(np.asarray([len(word_tokenize(ex.passage)) for ex in test_data]))
input_max_len = torch.max(input_lens, dim=0)[0].item()
all_test_input_data = torch.LongTensor(make_padded_input_tensor(test_data, self.word_indexer, input_max_len))
all_test_output_data = torch.LongTensor(np.asarray([self.authors.index_of(ex.author) for ex in test_data]))
for idx, X_batch in enumerate(all_test_input_data):
print(X_batch)
y_batch = all_test_output_data[idx].unsqueeze(0)
print(y_batch)
input_lens_batch = input_lens[idx].unsqueeze(0).to(device)
# Get word embeddings
embedded_words = self.model_emb.forward(X_batch.unsqueeze(0).to(device)).to(device)
# Get probability and hidden state
probs, hidden = self.model.forward(embedded_words, input_lens_batch)
predictions.append(self.authors.get_object(torch.argmax(probs).item()))
return predictions
def myevaluate(self, test_data, args):
test_data.sort(key=lambda ex: len(word_tokenize(ex.passage)), reverse=True)
with torch.no_grad():
self.model.eval()
self.model_emb.eval()
input_lens = torch.LongTensor(np.asarray([len(word_tokenize(ex.passage)) for ex in test_data]))
input_max_len = torch.max(input_lens, dim=0)[0].item()
all_test_input_data = torch.LongTensor(make_padded_input_tensor(test_data, self.word_indexer, input_max_len))
all_test_output_data = torch.LongTensor(np.asarray([self.authors.index_of(ex.author) for ex in test_data]))
correct = 0
total = len(all_test_input_data)
for idx, X_batch in enumerate(all_test_input_data):
print(X_batch)
y_batch = all_test_output_data[idx].unsqueeze(0)
print(y_batch)
input_lens_batch = input_lens[idx].unsqueeze(0).to(device)
# Get word embeddings
embedded_words = self.model_emb.forward(X_batch.unsqueeze(0).to(device)).to(device)
# Get probability and hidden state
probs, hidden = self.model.forward(embedded_words, input_lens_batch)
print(probs, max(probs))
if torch.argmax(probs).item() == y_batch[0].item():
correct += 1
print("Correctness", str(correct) + "/" + str(total) + ": " + str(round(correct/total, 5)))
return correct, total
def train_lstm_model(train_data, test_data, authors, word_vectors, args, pretrained=True):
train_data.sort(key=lambda ex: len(word_tokenize(ex.passage)), reverse=True)
word_indexer = word_vectors.word_indexer
# Create indexed input
print("creating indexed input")
input_lens = torch.LongTensor(np.asarray([len(word_tokenize(ex.passage)) for ex in train_data]))
input_max_len = torch.max(input_lens, dim=0)[0].item()
# input_max_len = np.max(np.asarray([len(word_tokenize(ex.passage)) for ex in train_data]))
print("train input")
all_train_input_data = torch.LongTensor(make_padded_input_tensor(train_data, word_indexer, input_max_len))
print("train output")
all_train_output_data = torch.LongTensor(np.asarray([authors.index_of(ex.author) for ex in train_data]))
input_size = args.embedding_size
output_size = len(authors)
if pretrained:
model_emb = EmbeddingLayer(word_vectors, args.emb_dropout).to(device)
else:
model_emb = RawEmbeddingLayer(args.embedding_size, len(word_indexer), args.emb_dropout).to(device)
encoder = RNNEncoder(input_size, args.hidden_size, output_size, args.rnn_dropout).to(device)
# Construct optimizer. Using Adam optimizer
params = list(encoder.parameters()) + list(model_emb.parameters())
lr = args.lr
optimizer = Adam(params, lr=lr)
loss_function = nn.NLLLoss()
num_epochs = args.epochs
encoder.train()
model_emb.train()
loss_history = []
for epoch in range(num_epochs):
epoch_loss = 0
#for X_batch, y_batch, input_lens_batch in train_batch_loader:
for idx, X_batch in enumerate(all_train_input_data):
if idx % 100 == 0:
print("Example", idx, "out of", len(all_train_input_data))
y_batch = all_train_output_data[idx].unsqueeze(0).to(device)
input_lens_batch = input_lens[idx].unsqueeze(0).to(device)
# Initialize optimizer
optimizer.zero_grad()
# Get word embeddings
embedded_words = model_emb.forward(X_batch.unsqueeze(0).to(device)).to(device)
# Get probability and hidden state
probs, hidden = encoder.forward(embedded_words, input_lens_batch)
#print(probs)
#print("Predicted", torch.argmax(probs,0), "|| Actual" ,y_batch)
loss = loss_function(probs.unsqueeze(0).to(device), y_batch)
epoch_loss += loss
# Run backward
loss.backward()
optimizer.step()
print("Epoch " + str(epoch) + " Loss:", epoch_loss)
loss_history.append(epoch_loss.item())
return LSTMTrainedModel(encoder, model_emb, word_indexer, authors, loss_history)
|
from selenium import webdriver #connect python with webbrowser-chrome
from selenium.webdriver.common.keys import Keys
import pyautogui as pag
def main():
url = "http://linkedin.com/" #url of LinkedIn
network_url = "http://linkedin.com/mynetwork/" # url of LinkedIn network page
driver = webdriver.Chrome('F:\Argha\WebDriver\chromedriver.exe') # path to browser web driver
driver.get(url)
def login():
username = driver.find_element_by_id("login-email") # Getting the login element
username.send_keys("username") # Sending the keys for username
password = driver.find_element_by_id("login-password") # Getting the password element
password.send_keys("password") # Sending the keys for password
driver.find_element_by_id("login-submit").click() # Getting the tag for submit button
def goto_network():
driver.find_element_by_id("mynetwork-tab-icon").click()
def send_requests():
n= input("Number of requsts: ") # Number of requests you want to send
for i in range(0,n):
pag.click(880,770) # position(in px) of connection button
print("Done!")
|
import os
from spotibot.core.objects import \
Music
from spotibot.mongo.utils.Handlers import \
is_jsonable
def test_instantiation_serialization(result: dict):
"""Tests the instantiation of SpotiBot objects from raw API responses
and their conversion to byte-code based on the object's property's/methods
Args:
result: PyTest fixture containing a dictionary of object entries
mirroring the below.
{object name:
(object Class,
raw API representation pre-instantiation,
representation post-instantiation as of last stable build
)
}
Objects currently covered are:
: music.Track
: music.Album
: music.Artist
: device.Device
: context.Context
"""
for obj_name, nested_val in result.items():
spot_obj, result_in, expected_out = nested_val
instantiated = spot_obj(result_in)
assert instantiated == expected_out
assert is_jsonable(instantiated.json)
# -----------------------------------------------------------------------------
# def test_album(result):
# to_instantiate = result.get('item').get('album')
# instantiated = Music.Album(to_instantiate)
# assert isinstance(instantiated, Music.Album)
#
#
# def test_album_serialization(result):
# to_instantiate = result.get('item').get('album')
# instantiated = Music.Album(to_instantiate)
# assert is_jsonable(instantiated.json)
|
import socket, time, struct, binascii, mutex
import threading
import zmq
import numpy, scipy
CHANNEL_DEPTH = 128
UDP_PAYLOAD_SIZE = 818 #Derived from wireshark.
UDP_IP="" #This means all interfaces?
UDP_PORT=8899
#sock.setblocking(0)
class UDPThread(threading.Thread):
def __init__(self):
super(UDPThread,self).__init__()
self.sock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((UDP_IP,UDP_PORT))
self.lock = threading.Lock()
self.data = ''
self.addr = ''
self.packet_counter = 0;
def run(self):
while(1):
self.lock.acquire()
try:
self.data, self.addr = self.sock.recvfrom(UDP_PAYLOAD_SIZE);
self.packet_counter += 1
finally:
self.lock.release()
time.sleep(.00001)
udpthread = UDPThread()
udpthread.start()
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:34675")
start_time = time.time()
while(1):
udpthread.lock.acquire()
data = ''
addr = ''
try:
data = udpthread.data
addr = udpthread.addr
finally:
udpthread.lock.release()
#print 'Connected by:', addr, 'bufsize', len(data), 'Recieved', udpthread.packet_counter, ' packets. ', float(udpthread.packet_counter)/(time.time() - start_time), ' per second'
decode_string = str(CHANNEL_DEPTH*3) + 'H' + str(UDP_PAYLOAD_SIZE - CHANNEL_DEPTH*2*3) + 'x'
nd = numpy.asarray(struct.unpack(decode_string,data)) #300 16bit unsigneds, followed by 50 junk bits
socket.send_json(nd.tolist())
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
* https://matplotlib.org/2.0.0/examples/pylab_examples/contour_demo.html
"""
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
plt.close()
delta = 0.1
x = np.arange(-4.0, 4.0, delta)
y = np.arange(-4.0, 4.0, delta)
X, Y = np.meshgrid(x, y)
Z = 2 - np.sqrt(X*X + Y*Y)
plt.figure()
qcs = plt.contour(X, Y, Z)
plt.clabel(qcs, inline=1, fontsize=10)
plt.show()
|
class Config(object):
def __init__(self, config_dict):
self.num_folds = int(config_dict["num_folds"])
self.fnc_root = config_dict["fnc_root"]
self.fnc_out_csv = config_dict["fnc_out_csv"]
self.fnc_sts_csv = config_dict["fnc_sts_csv"]
self.re17_root = config_dict["re17_root"]
self.re17_out_csv = config_dict["re17_out_csv"]
self.re17_sts_csv = config_dict["re17_sts_csv"]
self.fnn_root = config_dict["fnn_root"]
self.fnn_out_csv = config_dict["fnn_out_csv"]
self.fnn_fnc_pred_root = config_dict["fnn_fnc_pred_root"]
self.fnn_re17_pred_root = config_dict["fnn_re17_pred_root"]
self.csi_root = config_dict["csi_root"]
self.re19_root = config_dict["re19_root"]
self.tweet_paraphrase_root = config_dict["tweet_paraphrase_root"]
self.mrpc_root = config_dict["mrpc_root"]
|
cpar = 0
cneg = 0
cpos = 0
for x in range(0, 5):
v = float(input())
if v > 0:
cpos += 1
elif v < 0:
cneg +=1
if v % 2 == 0:
cpar += 1
print('{} valor(es) par(es)\n{} valor(es) impar(es)\n{} valor(es) positivo(s)\n{} valor(es) negativo(s)'.format(cpar, (5 - cpar), cpos, cneg)) |
from PIL import Image
import numpy as np
def save_image(X_final, name, nb_colors):
im = Image.fromarray((X_final * 255).astype(np.uint8))
name = name.split(".")[0]
file_name = name + "_" + str(nb_colors) + ".jpeg"
im.save(file_name)
return file_name
def usage():
print("Usage :\npython main.py [image_file] [number_of_colors](optional, 16 by default)") |
import pytest
from django.contrib.auth.models import User
from django.urls import *
from Firma import settings
@pytest.mark.django_db
def test_user_create():
User.objects.create_user('kulpinskid', 'kulpinskid@gmail.com', 'dawid')
assert User.objects.count() == 1
@pytest.mark.django_db
def test_view(client):
url = reverse('homepage-url')
response = client.get(url)
assert response.status_code == 200 |
a, b = map(int, input().split())
result = [0, 0, 0]
for i in range(1, 7):
if abs(i - a) < abs(i - b):
result[0] += 1
elif abs(i - a) > abs(i - b):
result[-1] += 1
else:
result[1] += 1
print(*result)
|
# set provides: difference, intersection, union
print("SET EXAMPLES DIFFERENCE")
setExample = set("some set values")
print(setExample)
A = {10, 20, 30, 40, 80}
B = {100, 30, 80, 40, 60}
print("Set difference method")
print(A.difference(B))
print(B.difference(A))
print()
print("Minus operator")
print(A - B)
print(B - A)
print()
A2 = {10, 20, 30, 40, 80}
B2 = {10, 20, 30, 40, 80}
B2.add(100)
A2.add(120)
print(A2 - B2)
print(B2 - A2)
print()
print("Symmetric Difference")
print(A2.symmetric_difference(B2))
print(B2.symmetric_difference(A2))
print()
print("SET EXAMPLES INTERSECTION")
A2 = {10, 20, 30, 40, 80}
B2 = {10, 20, 30, 40, 80, 100}
print()
# INTERSECTION (Compares and returns whatever matches in both sets
intersection = B2.intersection(A2)
print(intersection)
print()
print("UNION EXAMPLES")
# UNION
print(A2.union(B2))
|
from django.shortcuts import render
from .models import Game
# Create your views here.
def main_page(request):
Games = Game.objects.filter(Popular=True)
return render(request, 'main.html', {'Games': Games})
def games(request):
Games = Game.objects.all()
return render(request, 'games.html', {'Games': Games})
def test(request):
Games = Game.objects.all()
return render(request, 'test.html', {'Games': Games})
def contact(request):
return render(request, "contact.html")
def error(request):
return render(request, "Error.html")
def about(request):
return render(request, 'about.html')
def nobuy(request):
return render(request, 'nobuy.html')
def search(request):
search = request.GET['search']
Games = Game.objects.filter(Name__icontains=search)
parameter = {'game': Games}
return render(request, 'search.html', parameter)
|
from copy import deepcopy
class ConfigBuilder:
def __init__(self):
self._base_config = {}
self._matcher = {'KDTreeMatcher': {'knn': 1}}
self._inspector = 'NullInspector'
self._reading_dp_filter = []
self._reference_dp_filter = []
self._outlier_filters = []
self.with_point_to_point().with_tf_checker()
def copy(self):
return deepcopy(self)
def with_tf_checker(self, knn=40):
self._tf_checker = [
{'CounterTransformationChecker': {'maxIterationCount': knn}},
'DifferentialTransformationChecker'
]
return self
# Inspector
def with_vtk_inspector(self):
self._inspector = {
"VTKFileInspector": {
"baseFileName" : "vissteps",
"dumpDataLinks" : 1,
"dumpReading" : 1,
"dumpReference" : 1
}
}
return self
# Error minimizer
def with_point_to_point(self, confidence_in_penalties=0.0):
self._minimizer = {"PointToPointWithPenaltiesErrorMinimizer": {"confidenceInPenalties": confidence_in_penalties}}
return self
def with_point_to_plane(self):
self._minimizer = {"PointToPlaneWithPenaltiesErrorMinimizer": {}}
return self
def with_point_to_gaussian(self):
self._minimizer = {"PointToGaussianErrorMinimizer": {}}
return self
# Datafilters
def add_normal_to_read(self, knn=5):
self._reading_dp_filter.append({"SurfaceNormalDataPointsFilter": {"knn": knn}})
return self
def add_normal_to_ref(self, knn=5):
self._reference_dp_filter.append({"SurfaceNormalDataPointsFilter": {"knn": knn}})
return self
# def add_cov_to_read(self, knn=5):
# self._reading_dp_filter.append({"SurfaceCovarianceDataPointsFilter": {"knn": knn}})
# return self
def add_cov_to_ref(self, knn=5):
self._reference_dp_filter.append({"SurfaceCovarianceDataPointsFilter": {"knn": knn}})
return self
# def add_decompose_cov_to_read(self, keep_normals=False):
# normal = 1 if keep_normals else 0
# self._reading_dp_filter.append({"DecomposeCovarianceDataPointsFilter": {"keepNormals": normal}})
# return self
def add_decompose_cov_to_ref(self, keep_normals=False):
normal = 1 if keep_normals else 0
self._reference_dp_filter.append({"DecomposeCovarianceDataPointsFilter": {"keepNormals": normal}})
return self
def add_sensor_noise_to_read(self, sensor_type=0, generate_cov=False):
cov = 1 if generate_cov else 0
self._reading_dp_filter.append({"SimpleSensorNoiseDataPointsFilter": {"sensorType": sensor_type,
"covariance": cov}})
return self
def add_sensor_noise_to_ref(self, sensor_type=0, generate_cov=False):
cov = 1 if generate_cov else 0
self._reference_dp_filter.append({"SimpleSensorNoiseDataPointsFilter": {"sensorType": sensor_type,
"covariance": cov}})
return self
# Outlier filters
def add_outlier_filter_trim(self, overlap=0.75):
self._outlier_filters.append({"TrimmedDistOutlierFilter": {"ratio": overlap}})
return self
def add_outlier_filter_sensor_noise(self):
self._outlier_filters.append({"SensorNoiseOutlierFilter": {}})
return self
def build(self):
return {
'outlierFilters': self._outlier_filters,
'errorMinimizer': self._minimizer,
'matcher': self._matcher,
'transformationCheckers': self._tf_checker,
'readingDataPointsFilters': self._reading_dp_filter,
'referenceDataPointsFilters': self._reference_dp_filter,
'inspector': self._inspector
} |
a = float(input('Please type the first line'))
b = float(input('Please type the second line'))
c = float(input('Please type the third line'))
if a < b + c and b < a + c and c < b + a:
print('\033[1;31mWith these lines we have a triangle')
else:
print('\033[1;35m not a triangle')
|
import cgi
import socket
import subprocess
import smtplib
import time
import urllib
import logging
import entity
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class UpdateTest(webapp.RequestHandler):
def post(self):
end = 0
url = "localhost:8081/updatestore"
#def testconnect(address, port, x):
def update(x):
#updatedb_name = self.request.get('updatedb_name')
update = Update(parent=updatedb_key(updatedb_name))
print('Success')
update.DeviceName = 'Location[x]['Device']'
update.Status = "Up"
#update.Note = db.StringProperty(multiline=True)
update.put()
"""rssStore = entity.Rss(key_name='almightyolive')
# Elements of our RSS
rssStore.feed = "almightyolive"
rssStore.content = content
# Stores our RSS Feed into the datastore
rssStore.put()"""
def updatetest(x):
form_fields = {
"DeviceName": Location[x]['DeviceName'],
"Status": "Down",
}
form_data = urllib.urlencode(form_fields)
result = urlfetch.fetch(url=url,
payload=form_data,
method=urlfetch.POST,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
def sendmessage(e, x):
fromaddr = ("THQWANAdmin@usc.org")
toaddrs = ('Robert_Augenstein@usc.salvationarmy.org, robaugie@gmail.com')
msg = "The test of " + Location[x]['Device'] + " has failed " + str(time.ctime()) + " \n "
msg = msg + str(e)
server = smtplib.SMTP('10.231.1.10')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
Location= [{"Command":"THQ","Device":"THQ Firewall","IPAddress":"thqwgd","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"THQ","Device":"THQ Watchguard Server","IPAddress":"thqwgs","Port":4110,"FailCount":0,"SuccessCount":0},
{"Command":"CFO","Device":"CFO Firewall","IPAddress":"67.91.168.98","Port":4110,"FailCount":0,"SuccessCount":0},
{"Command":"CFO","Device":"CFO Watchguard Server","IPAddress":"67.91.168.98","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"MET","Device":"MET Firewall","IPAddress":"metwgd","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"MET","Device":"MET Watchguard Server","IPAddress":"metwgs","Port":4110,"FailCount":0,"SuccessCount":0},
{"Command":"HRT","Device":"HRT Firewall","IPAddress":"hrtwgd","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"HRT","Device":"HRT Watchguard Server","IPAddress":"hrtwgs","Port":4110,"FailCount":0,"SuccessCount":0},
{"Command":"WUM","Device":"WUM Firewall","IPAddress":"wumwgd","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"WUM","Device":"WUM Watchguard Server","IPAddress":"wumwgs","Port":4110,"FailCount":0,"SuccessCount":0},
{"Command":"NOR","Device":"NOR Firewall","IPAddress":"norwgd","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"NOR","Device":"NOR Watchguard Server","IPAddress":"norwgs","Port":4110,"FailCount":0,"SuccessCount":0},
{"Command":"WST","Device":"WST Firewall","IPAddress":"wstwgd","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"WST","Device":"WST Watchguard Server","IPAddress":"wstwgs","Port":4110,"FailCount":0,"SuccessCount":0},
{"Command":"KAN","Device":"KAN Firewall","IPAddress":"kanwgd","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"KAN","Device":"KAN Watchguard Server","IPAddress":"kanwgs","Port":4110,"FailCount":0,"SuccessCount":0},
{"Command":"MID","Device":"MID Firewall","IPAddress":"midwgd","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"MID","Device":"MID Watchguard Server","IPAddress":"midwgs","Port":4110,"FailCount":0,"SuccessCount":0},
{"Command":"EMI","Device":"EMI Firewall","IPAddress":"emiwgd","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"EMI","Device":"EMI Watchguard Server","IPAddress":"emiwgs","Port":4110,"FailCount":0,"SuccessCount":0},
{"Command":"IND","Device":"IND Firewall","IPAddress":"indwgd","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"IND","Device":"IND Watchguard Server","IPAddress":"indwgs","Port":4110,"FailCount":0,"SuccessCount":0},
{"Command":"WMI","Device":"WMI Firewall","IPAddress":"wmiwgd","Port":8080,"FailCount":0,"SuccessCount":0},
{"Command":"WMI","Device":"WMI Watchguard Server","IPAddress":"wmiwgs","Port":4110,"FailCount":0,"SuccessCount":0}]
#while end == 0:
for x, y in enumerate(Location):
logging.info("The record to be checked is %s ", Location[x]['DeviceName'])
update( x )
#testconnect( x )
time.sleep( 1 )
#time.sleep( 60 )
application = webapp.WSGIApplication([('/scripts', UpdateTest)],
debug=True)
def main():
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
#run_wsgi_app(application)
main()
|
import os
import time
from SaveLoad import Patient,Doctor,Booking
def child(t,d_id):
time.sleep(t)
Booking.del_from_db_by_userid(d_id)
print(d_id+" doctor appointment info deleted from Booking table")
os._exit(0)
def parent(d_id):#Parameter Time Period and Discount Price
while True:
print(d_id)
newpid = os.fork()
last_time='20:00'
start_time_hour=time.localtime().tm_hour
start_time_min=time.localtime().tm_min
last_time=last_time.split(':')
last_time_hour=int(last_time[0])
last_time_min=int(last_time[1])
if((last_time_hour-start_time_hour)>0):
wait_hour=last_time_hour-start_time_hour
else:
wait_hour=24-abs(last_time_hour-start_time_hour)
if((last_time_min-start_time_min)>=0):
wait_min=last_time_min-start_time_min
else:
wait_min=(last_time_min-start_time_min)+60
wait_hour=wait_hour-1
print(wait_hour)
print(wait_min)
#t=wait_min*60+wait_hour*60*60
t=30;
if(newpid==0):
child(t,d_id)
break
parent('3')
|
#!/usr/bin/env python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# # Authors informations
#
# @author: HUC Stéphane
# @email: <devs@stephane-huc.net>
# @url: http://stephane-huc.net
#
# @license : BSD "Simplified" 2 clauses
#
''' Listener '''
import gobject
class Listener(gobject.GObject):
__gsignals__ = {
'updated' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_FLOAT, gobject.TYPE_STRING)),
'finished': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
())
}
def __init__(self, queue):
print "Listener start!"
gobject.GObject.__init__(self)
self.queue = queue
def go(self):
'''Launch Listener'''
print "Listener launch!"
while True:
# Listen for results on the queue and process them accordingly
data = self.queue.get()
print 'Listener data: %s' % str(data)
# Check if finished
if data[1] == 'finished':
print 'Listener is finishing.'
self.emit('finished')
return
else:
self.emit('updated', data[0], data[1])
|
"""add table articles tags
Revision ID: c551860ba533
Revises: 05e65bf85f23
Create Date: 2019-12-13 15:00:48.298143
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c551860ba533'
down_revision = '05e65bf85f23'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tags',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('articles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('title', sa.String(length=255), nullable=True),
sa.Column('views', sa.Integer(), nullable=True),
sa.Column('content', sa.String(length=255), nullable=True),
sa.Column('tag_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['tag_id'], ['tags.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('articles')
op.drop_table('tags')
# ### end Alembic commands ###
|
def planet_mass(gravity, radius):
mass = (gravity*radius**2) / (6.67*10**-11)
return mass
def planet_vol(radius):
vol = (4*3.142*radius**2)/3
return vol |
import re
import os
from os import popen, path
from sys import stderr
import psutil
from sv2.helpers import run_checkers
summary = "Check ssh configuration"
report = None
algorithm_blacklist = """
ecdh-sha2-nistp256 weak eliptic curves
ecdh-sha2-nistp384 weak eliptic curves
ecdh-sha2-nistp521 weak eliptic curves
diffie-hellman-group14-sha1 weak hash algorithm
ecdsa-sha2-nistp256 weak eliptic curves
hmac-sha1 weak hash algorithm
hmac-sha1-etm@openssh.com weak hash algorithm
"""
class _SSHConf(object):
def __init__(self, conf):
self.conf = [x.split(' ', 1) for x in conf.rsplit('\n')]
self.confdict = dict()
for x in self.conf:
if len(x) > 1:
if x[0] in self.confdict.keys():
self.confdict[x[0]] = (self.confdict[x[0]], x[1])
else:
self.confdict.update({x[0]: x[1]})
def __getitem__(self, x):
return self.confdict[x]
def getOptions(self):
return self.confdict.keys()
class SSHCheck(object):
def __init__(self):
with popen("/usr/sbin/sshd -T -C \"user=root,addr=127.0.0.1,host=localhost,laddr=127.0.0.1,lport=22,rdomain=localhost\" 2>/dev/null") as p:
sshd_config = p.read()
self._sshd = _SSHConf(sshd_config)
def root(self):
if self._sshd["permitrootlogin"] != "no":
report.new_issue("Disable root login.")
def port(self):
if self._sshd['port'] == '22':
report.new_issue("Port number should not be the default (22).")
def logingracetime(self):
if int(self._sshd["logingracetime"]) > 25:
report.new_issue(
"LoginGraceTime is very high.")
def passauthentication(self):
if self._sshd["passwordauthentication"] == 'yes':
report.new_issue("Disable keyboard-interactive and use ssh keys instead (or combine it, for example ssh keys + OTP codes). Make sure than PasswordAuthentication.")
def TFA(self):
with open("/etc/pam.d/sshd", 'r') as f:
sshd_pam = f.read()
if not re.match("\s*auth\s*required\s*pam_google_authenticator.so*", sshd_pam):
report.new_issue("It is recommended use 2FA.")
return 0
opt_or_suff = re.match(
"\s*auth\s*(optional|sufficient)\s*pam_google_authenticator.so*", sshd_pam)
if opt_or_suff is not None:
report.new_issue(
"Not use {} option in /etc/pam.d/sshd.".format(opt_or_suff.group()))
def login_filter(self):
if not ("allowusers" in self._sshd.getOptions()) or not ("allowgroups" in self._sshd.getOptions()):
report.new_issue(
"Filter users/groups with AllowUSers, and/or, AllowGroups.")
def subsystem(self):
if "subsystem" in self._sshd.getOptions():
report.new_issue("If you do not really need {} disable it.".format(
self._sshd["subsystem"]))
def algorithm(self):
for item in algorithm_blacklist.strip().splitlines():
item_cleared = item.split(' ', 1)
for x in self._sshd.conf:
if len(x) > 1:
if item_cleared[0] in x[1]:
report.new_issue(
"{} - {}".format(item_cleared[0], item_cleared[1][:-1]))
break
def fail2ban(self):
if not path.exists("/usr/bin/fail2ban-server"):
report.new_issue("Fail2ban not installed.")
def run(r, opts):
global report
report = r
c = SSHCheck()
run_checkers(c, opts)
def makes_sense(r) -> bool:
if os.geteuid() != 0:
r.wont_run("Needs root")
return False
for process in psutil.process_iter():
if process.name() == "sshd":
return True
r.wont_run("SSH daemon is not running")
return False
|
# MAIN GOAL
#
# Create a program that allows the user to input the sides of any triangle, and then return whether the triangle is a Pythagorean Triple or not.
#
# SUBGOALS
#
# If your program requires users to input the sides in a specific order, change the coding so the user can type in the sides in any order. Remember, the hypotenuse (c) is always the longest side.
#
# Loop the program so the user can use it more than once without having to restart the program.
while True:
one = int(input("side one: "))
two = int(input("side two: "))
three = int(input("side three: "))
if (one >= two) and (one >=three):
hypotenuse = one
adj = two
opp = three
elif (two >= one) and (two >= three):
hypotenuse = two
adj = one
opp = three
else:
hypotenuse = three
adj = one
opp = two
if (adj**2) + (opp**2) == (hypotenuse**2):
print("This triangle is a Pythagorean Triple")
else:
print("This triangle is not a Pythagorean Triple")
print('')
quit = input("Quit? (y/n) ")
if quit.lower() == 'y':
break
print("")
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 6 11:34:36 2019
@author: mit
"""
# 학습
from keras.models import Sequential
from keras.layers import MaxPooling2D
from keras.layers import Conv2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
import numpy as np
import os
from PIL import Image
import os, glob
import matplotlib.pyplot as plt
import cv2 as cv
# 카테고리 지정하기
"""
#학습시
categories = ["ori", "sigma_0.5", "sigma_1.0", "sigma_1.5", "sigma_2.0"]
nb_classes = len(categories)
# 이미지 크기 지정하기
image_w = 100
image_h = 100
# 데이터 열기
X_train, X_test, y_train, y_test = np.load("D:\\H&E_dataset\\dataset\\defocusing_6sigma_classification_200205.npy")
"""
#분석시
categories = ["ori", "sigma_0.5", "sigma_1.0", "sigma_1.5", "sigma_2.0"]
nb_classes = len(categories)
# 이미지 크기 지정하기
image_w = 100
image_h = 100
# 데이터 열기
X_train, X_test, y_train, y_test = np.load("D:\\H&E_dataset\\dataset\\defocusing_classification_200115.npy")
imgdir = "C:\\Users\\MG\\Desktop\\DGMIF\\BTM\\test_sample\\test_img"
# if you want file of a specific extension (.png):
filelist = [f for f in glob.glob(imgdir + "**/*.png", recursive=True)]
test = []
for file in filelist:
img = Image.open(file)
img = np.array(img)
test.append(img)
X_train = np.array(test)
#%%
# 모델 구조 정의
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=X_train.shape[1:], padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 전결합층
model.add(Flatten()) # 벡터형태로 reshape
model.add(Dense(128)) # 출력
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# 모델 구축하기
model.compile(loss='categorical_crossentropy', # 최적화 함수 지정
optimizer='adam',
metrics=['accuracy'])
# 모델 확인
print(model.summary())
#%%
# 모델 훈련하기
#model.fit(X_train, y_train, batch_size=32, nb_epoch=5)
# 학습 완료된 모델 저장
hdf5_file = "C:\\Users\\MG\\Desktop\\H&E New dataset\\CancerClassificationData\\20200206_classification_Adam\\weight_defocusing_adam100_Classification_200206.hdf5"
if os.path.exists(hdf5_file):
# 기존에 학습된 모델 불러들이기
model.load_weights(hdf5_file)
else:
# 학습한 모델이 없으면 파일로 저장
#early_stopping = EarlyStopping(monitor = 'val_loss', patience = 10)
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=32, callbacks=[
ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=10, verbose=1, mode='auto', min_lr=1e-04)])
#history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=500, batch_size=32, callbacks=[early_stopping])
model.save_weights(hdf5_file)
fig, ax = plt.subplots(2, 2, figsize=(10, 7))
ax[0, 0].set_title('loss')
ax[0, 0].plot(history.history['loss'], 'r')
ax[0, 1].set_title('acc')
ax[0, 1].plot(history.history['acc'], 'b')
ax[1, 0].set_title('val_loss')
ax[1, 0].plot(history.history['val_loss'], 'r--')
ax[1, 1].set_title('val_acc')
ax[1, 1].plot(history.history['val_acc'], 'b--')
#%%
# 모델 평가하기
score = model.evaluate(X_test, y_test)
print('loss=', score[0]) # loss
print('accuracy=', score[1]) # acc
############################
#single image 적용
############################
# 적용해볼 이미지
#C:\Users\MDDC\Desktop\2019-02-12 Histopathology dataset\testdata
imgdir = "C:\\Users\\MG\\Desktop\\DGMIF\\BTM\\test_sample\\sigma_0.5_sigma_1.9365"
# if you want file of a specific extension (.png):
filelist = [f for f in glob.glob(imgdir + "**/*.png", recursive=True)]
test = []
for file in filelist:
img = Image.open(file)
img = np.array(img)
test.append(img)
X_train = np.array(test)
# 예측
#X = X_train.astype("float") / 256
norm_x = cv.normalize(X_train.astype(np.float64), None, 0, 1, cv.NORM_MINMAX)
pred = model.predict(norm_x)
result = [np.argmax(value) for value in pred] # 예측 값중 가장 높은 클래스 반환
print('Prediction of Sigma Value in Gaussian Blur : ', categories[result[0]])
#########################################################
#최종 예측
#########################################################
from PIL import Image
import os, glob
import numpy as np
from sklearn.model_selection import train_test_split
import cv2 as cv
# 분류 대상 카테고리 선택하기
base_dir = "C:\\Users\\MG\\Desktop\\DGMIF\\BTM\\test_sample"
categories = ["ori","sigma_0.5","sigma_1.0", "sigma_1.5", "sigma_0.5_sigma_1.9365"]
nb_classes = len(categories)
# 이미지 크기 지정
image_w = 100
image_h = 100
pixels = image_w * image_h * 3
# 이미지 데이터 읽어 들이기
X = []
Y = []
for idx, cat in enumerate(categories):
# 레이블 지정
label = [0 for i in range(nb_classes)]
label[idx] = 1
# 이미지
image_dir = base_dir + "\\" + cat
files = glob.glob(image_dir + "\\*.png")
for i, f in enumerate(files):
img = Image.open(f)
img = img.convert("RGB")
img = img.resize((image_w, image_h))
data = np.asarray(img) # numpy 배열로 변환
data = cv.normalize(data.astype(np.float64), None, 0, 1, cv.NORM_MINMAX)
X.append(data)
Y.append(label)
if i % 10 == 0:
print(i, "\n", data)
X = np.array(X)
Y = np.array(Y)
norm_x = cv.normalize(X.astype(np.float64), None, 0, 1, cv.NORM_MINMAX)
class_names =["ori","sigma_0.5","sigma_1.0", "sigma_1.5", "sigma_0.5_sigma_1.9365"]
#class_names = ["ori", "sigma_0.5", "sigma_1.0", "sigma_1.5", "sigma_2.0"]
predictions = model.predict(X)
pred = predictions.astype("float16")
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == np.argmax(true_label):
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100 * np.max(predictions_array), class_names[np.argmax(true_label)]), color = color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(len(class_names)), predictions_array, color = "#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[np.argmax(true_label)].set_color('blue')
num_rows = 10
num_cols = 10
num_images = num_rows * num_cols
plt.figure(figsize = (2 * 2 * num_cols, 2 * num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
plot_image(i, predictions, Y, norm_x)
plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
plot_value_array(i, predictions, Y)
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 26 17:32:32 2014
@author: swalters
"""
import re
import string
### FILE HANDLING METHODS
def openFile(filename):
''' returns text in a file as a string
input: filename
output: text string
'''
f = open(filename, 'r')
fulltext = f.read()
f.close()
return fulltext
def readFile(filename):
''' returns dictionary representing sentences/categories in filename
input: filename -
output: dictionary, key=statement & value=1/0
'''
ds = openFile(filename) # data string
dl = re.split('\n', ds) # data list, with each entry a 'question,1' string
res = {}
for line in dl:
temp = re.split(',', line)
if len(temp) > 1:
res[temp[0]] = temp[1] # key is sentence, value is categorization
return res
def writeFile(dictionary, filename):
''' writes contents of dictionary into training data structure, filename.txt
input: dictionary to be written, filename
output: file is written
'''
res = ''
for key in dictionary:
res += key + ',' + str(dictionary[key]) + '\n' # specified format
f = open(filename, 'w+')
f.write(res)
f.close()
def clean(gutenbergText):
''' strips Project Gutenberg boilerplate from the text of a book
input: gutenbergText (string), from Project Gutenberg book
output: substring of gutenbergText which is the body of the book
'''
startIndex = gutenbergText.find(' ***') # always at the end of introductory boilerplate
endIndex = gutenbergText.find('End of the Project Gutenberg') # at beginning of ending boilerplate
return gutenbergText[startIndex+4:endIndex] # startIndex is space before three asterisks
### PARSING METHODS
def parseQuestions():
''' gets questions/categorizations from (uniquely, not like trainingData.txt) structured .txt file
input: none
output:
'''
qs = openFile('questions.txt')
ql = re.split('\n', qs)
res = {}
for line in ql:
sp = ['\r', '\n']
rem = [',', '.', "'", '"']
for char in sp:
line = line.replace(char, ' ')
for char in rem:
line = line.replace(char, '')
line = line.strip()
filter(lambda x: x in string.printable, line)
space = line.find(' ')
qmark = line.find('?')
if qmark != -1:
qtext = line[space:qmark].strip()
res[qtext] = 1
return res
def parseBooks(books): # books of format ['book1.txt', 'book2.txt']
res = {}
for book in books:
text = clean(openFile(book))
sentenceList = re.split("(?<=[\.?!])\W",text)
for sentence in sentenceList:
sp = ['\r', '\n']
rem = [',', '.', "'", '"']
for char in sp:
sentence = sentence.replace(char, ' ')
for char in rem:
sentence = sentence.replace(char, '')
sentence = sentence.strip()
filter(lambda x: x in string.printable, sentence)
if len(sentence) > 0:
if sentence[-1] == '?':
res[sentence] = 1
else:
res[sentence] = 0
return res
### DATA STORAGE METHOD
def build(filename):
#d = readFile('trainingData.txt')
d = {}
q = parseQuestions()
b = parseBooks(['book1.txt'])
for sentence in q:
if sentence not in d:
d[sentence] = q[sentence]
for sentence in b:
if sentence not in d:
d[sentence] = b[sentence]
writeFile(d, filename)
return d
### MAIN METHOD
if __name__ == '__main__':
build('trainingData.txt')
f = readFile('trainingData.txt')
questions = []
statements = []
for key in f:
if f[key] == '1':
questions.append(key)
else:
statements.append(key) |
def reverse(input=''):
str_tmp = ""
for c in input:
str_tmp = c + str_tmp
return str_tmp
|
import gym
from gym import spaces
from gym.utils import seeding
# 定义牌的分数。其中,A = 1, 2-10 = 牌的点数, J/Q/K= 0.5。随机发牌就是随机的从deck中选择一张牌
deck = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0.5, 0.5, 0.5]
# 人牌值
p_val = 0.5
# 限制值
dest = 10.5
# 随机发牌,随机的从deck中选择一张牌
def draw_card(np_random):
return np_random.choice(deck)
# 随机发到手一张牌
def draw_hand(np_random):
return [draw_card(np_random)]
# 当前手牌总分
def sum_hand(hand):
return sum(hand)
# 获取手牌的数量
def get_card_num(hand):
return len(hand)
# 获取手牌中的人牌数
def get_p_num(hand):
count = 0
for i in hand:
if i == p_val:
count += 1
return count
# 手上的牌是否爆掉
def gt_bust(hand):
return sum_hand(hand) > dest
# 判断是否刚好达到了十点半
def is_dest(hand):
return sum_hand(hand) == dest
# 判断是否是比十点半小
def lt_dest(hand):
return sum_hand(hand) < dest
# 判断是否为人五小(手中牌为5张,且都为人牌)
def is_rwx(hand):
return True if get_p_num(hand) == 5 else False
# 判断是否为天王(手中牌为5张,且牌面点数总和为十点半)
def is_tw(hand):
return True if get_card_num(hand) == 5 and is_dest(hand) else False
# 判断是否为五小(手中牌为5张,且总点数小于十点半)
def is_wx(hand):
return True if get_card_num(hand) == 5 and lt_dest(hand) else False
# 根据手牌返回结果(牌型,回报,结束状态)
def hand_types(hand):
# 默认为平牌
type = 1
reward = 0
done = False
if gt_bust(hand):
# 爆牌
type = 0
reward = -1
done = True
elif is_rwx(hand):
# 人五小
type = 5
reward = 5
done = True
elif is_tw(hand):
# 天王
type = 4
reward = 4
done = True
elif is_wx(hand):
# 五小
type = 3
reward = 3
done = True
elif is_dest(hand):
# 十点半
type = 2
reward = 2
done = True
return type,reward,done
# 庄家和玩家比较手牌
def cmp(dealer,player):
# 规则: 庄家大,返回True,玩家大,返回False,当点数相同时比较手牌,庄家手牌数小于等于玩家,返回False大于则返回True
dealer_score = sum_hand(dealer)
player_score = sum_hand(player)
if dealer_score > player_score:
return True
elif dealer_score < player_score:
return False
else:
dealer_num = get_card_num(dealer)
player_num = get_card_num(player)
return True if dealer_num >= player_num else False
# 创建十点半的环境
class HalftenEnv(gym.Env):
"""
简单十点半
十点半是一种扑克游戏,这种游戏老少皆宜。
游戏技巧在于如何收集成"十点半",但若超过十半点,也算失败。
十点半游戏中,手牌(A, 2, 3, 4, 5, 6, 7, 8, 9, 10),A为1点,其余牌点数为本身的点数,手牌(J、Q、K)为人牌,视为半点,
现在这个环境中对局为庄家和玩家
牌型说明:
人五小: 5张牌,且每张都由人牌组成,奖励x5
天王:5张牌,且牌面点数总和为十点半,奖励x4
五小:5张牌不都是人牌,且总点数小于十点半,奖励x3
十点半:5张牌以下,牌的总点数正好等于十点半,奖励x2
平牌:5张牌以下,牌的总点数小于十点半,奖励x1
爆牌:牌的总点数大于十点半
比牌规则:
牌型大小: 人五小>天王>五小>十点半>平牌>爆牌
玩家拿到牌型为十点半以上(或包含)的牌(人五小,天王,五小,十点半),则立即获胜,庄家立输
玩家拿到总分为十点半以上的牌,则为爆牌,玩家立输,庄家立即获胜
玩家拿到十点半以下的牌并停牌,则庄家要牌,再和玩家比大小。
庄家如果当前分数小于玩家,则继续要牌,直至分出胜负,如果庄家等于玩家分数则比较手牌的数量,若手牌数小于玩家的手牌数,则继续要牌,否则判定为庄家获胜
庄家手牌也同样遵循牌型规则
回报说明:
赢牌: 1
输牌: -1
在计算回报时,应该根据各牌型的相应倍率进行
"""
def __init__(self):
# 行为空间: 停牌,叫牌
self.action_space = spaces.Discrete(2) # 停牌,叫牌
# 状态空间: (玩家手牌数的总分,玩家手中的总牌数,玩家手中的人牌数)
# 玩家的手牌总分数: 21个状态
# 玩家的手牌数: 5个状态
# 玩家手中的人牌数: 6个状态
self.observation_space = spaces.Tuple((
spaces.Discrete(21), # 玩家当前手牌的积分
spaces.Discrete(5), # 手中的手牌数
spaces.Discrete(6))) # 手中的人牌数
self._seed()
# 开始牌局
self._reset()
# 行为数
self.nA = 2
# 获取随机种子
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# 基于当前的状态,和输入动作,得出下一步的状态,奖励和是否结束
# 如果动作为叫牌: 给玩家发一张手牌,改变玩家手牌的状态。判断玩家当前手中的牌型,返回(玩家当前手牌状态,奖励和是否结束)。
# 如果动作为停牌: 庄家开始补牌,点数比玩家大,庄家获胜,游戏结束,否则继续补牌至分出胜负(注意:当点数相同时,比较手牌,庄家手牌大于等于玩家,庄家胜,否则继续补牌)
def _step(self, action):
assert self.action_space.contains(action)
reward = 0
# 叫牌
if action:
self.player.append(draw_card(self.np_random))
# 判断当前玩家手中的牌型
type,reward,done = hand_types(self.player)
# 停牌
else:
done = True
# 玩家停止牌之后,庄家开始补牌
self.dealer = draw_hand(self.np_random)
# 第一张手牌特殊牌型,所以不用判断类型,只需比较和玩家手牌大小即可
result = cmp(self.dealer, self.player)
if result:
reward = -1
else:
while not result:
# 继续给庄家补牌
self.dealer.append(draw_card(self.np_random))
# 判断庄家牌型
dealer_type, dealer_reward, dealer_done = hand_types(self.dealer)
# 出现特殊牌型,终止比赛(因为上式计算的是庄家的回报,所以在转成玩家回报时应该是负值)
if dealer_done:
reward = -dealer_reward
break
# 还未终止,则对比庄家和玩家的手牌分数
result = cmp(self.dealer, self.player)
if result:
reward = -1
break
return self._get_obs(), reward, done, {}
# 获取当前的状态空间(玩家手牌数的总分,玩家手中的总牌数,玩家手中的人牌数)
def _get_obs(self):
return (sum_hand(self.player), get_card_num(self.player), get_p_num(self.player))
# 牌局初始化
def _reset(self):
self.player = draw_hand(self.np_random)
return self._get_obs() |
# -*- coding:utf-8 -*-
'''
先采用1秒模拟1ms,所有文件sleep 1s 避免线程之间错乱
'''
from mininet.node import Controller
from mininet.log import setLogLevel, info
from mn_wifi.link import wmediumd, adhoc
from mn_wifi.cli import CLI_wifi
from mn_wifi.net import Mininet_wifi
from mn_wifi.wmediumdConnector import interference
from EH.energy import energy
from Params.params import getDistance
import threading
import json
import random
from NewGame import game
"线程函数"
def command(host, arg):
result = host.cmd(arg)
return result
def topology():
"Create a network."
net = Mininet_wifi(controller=Controller, link=wmediumd,
wmediumd_mode=interference)
info("*** Creating nodes\n")
ap1 = net.addAccessPoint('ap1', ssid="ap1-ssid", mode="g",
channel="1", position='5,6,0',range=40)
AP = net.addStation('AP', position='5,10,0', ip='10.0.0.1', mac='00:00:00:00:00:01')
DU = net.addStation('DU', position='30,5,0', ip='10.0.0.2', mac='00:00:00:00:00:02')
DU2 = net.addStation('DU2', position='15,15,0', ip='10.0.0.3', mac='00:00:00:00:00:03')
DU3 = net.addStation('DU3', position='15,15,0', ip='10.0.0.4', mac='00:00:00:00:00:04')
c0 = net.addController('c0')
net.setPropagationModel(model="logDistance", exp=5)
info("*** Configuring Propagation Model\n")
net.setPropagationModel(model="logDistance", exp=4.5)
info("*** Configuring wifi nodes\n")
net.configureWifiNodes()
info("*** Adding Link\n")
net.addLink(AP, cls=adhoc, ssid='adhocNet', mode='g', channel=5, ht_cap='HT40+')
net.addLink(DU, cls=adhoc, ssid='adhocNet', mode='g', channel=5, ht_cap='HT40+')
net.addLink(DU2, cls=adhoc, ssid='adhocNet', mode='g', channel=5, ht_cap='HT40+')
net.addLink(DU3, cls=adhoc, ssid='adhocNet', mode='g', channel=5, ht_cap='HT40+')
info("*** Starting network ***\n")
net.build()
c0.start()
ap1.start([c0])
"划分时间线来确定每一时隙需要完成的任务"
timeline = 0
round = 0
while(round<20):
info("***first round AP collect the info\n***")
try:
thread.start_new_thread(command,(AP,"python RInfo.py 10.0.0.1 AP-wlan0"))
thread.start_new_thread(command,(DU,"python SInfo.py 10.0.0.2 DU-wlan0 10.0.0.1"))
thread.start_new_thread(command,(DU2,"python SInfo.py 10.0.0.3 DU2-wlan0 10.0.0.1"))
thread.start_new_thread(command,(DU3,"python SInfo.py 10.0.0.4 DU3-wlan0 10.0.0.1"))
except:
print("info collect error")
time.sleep(22)
info("***AP start broadcasting ***\n")
"DU,RU 随机接收信息还是接收能量"
'''
约束条件
1.DU接收到的有效信息量比RU多博弈决策的N1
2.DU的能量要能够保障在后一阶段能够发送足够的数据包
计算出时隙比值来设置概率
e1=0.4 e2=0.1
'''
p1 = 0.2 #DU1接收到但是RU没有接收到的概率,先设置为定值后再考虑与丢包率的关系,设置合理的丢包率
p2 = 0.3 #DU2
TotalTime = 10#时间片大小
FileIndex = 0 #发送文件位置
dst = ['10.0.0.2','10.0.0.3','10.0.0.4']
for i in range(0,TotalTime): #有100个最小时隙,AP广播100轮,DU选择接收能量和信息,RU直接接收信息
"此处AP应该改成广播,APsend 的 dst应该不止一个"
t1 = threading.Thread(target=command, args = (AP,"python APBroadCast.py 10.0.0.1 AP-wlan0 '%s' %s" %(dst,FileIndex)))#AP广播一个数据包
t2 = threading.Thread(target=command, args = (RU,"python Receive.py 10.0.0.2 RU-wlan0 0.5"))
top1 = int(100-100*p1)
key1 = random.randint(1,100)
"中继设备随机接收信息或者能量"
#
if key1 in range(1,top1):
info("DU1 infomation\n")
t3 = threading.Thread(target = command,args = (DU1,"python Receive.py 10.0.0.3 DU1-wlan0 0.1"))
else:
info("DU1 energy\n")
t3 = threading.Thread(target = energy,args = (DU1,AP,1))
t3.start()
top2 = int(100-100*p2)
key2 = random.randint(1,100)
if key2 in range(1,top2):
info("DU2 infomation\n")
t4 = threading.Thread(target=command,args=(DU2,"python Receive.py 10.0.0.4 DU2-wlan0 0.2"))
else:
info("DU2 energy\n")
t4 = threading.Thread(target = energy,args = (DU2,AP,1) )
t4.start()
#先开始监听进程再开始发送进程
t2.start()
t1.start()
t1.join()
t2.join()
t3.join()
t4.join()
FileIndex += 1
info("*** Running CLI\n")
CLI_wifi(net)
info("*** Stopping network\n")
net.stop()
if __name__ == '__main__':
setLogLevel('info')
topology()
|
"""Tests for treadmill.ad.*"""
|
from django.shortcuts import render
# Create your views here.
from django.http import JsonResponse
import json
from src.expression.Item import Item
from src.singletons import sku_match
from urllib.parse import unquote
from src.wrappers import autocomplete
from src.Utils.logger import logger
sku_matcher_singleton = sku_match.SkuSingleton()
def modify_format(index, title, id='noskuidavailable', source_string='good'):
return {'type': 'fuzzy', 'idx': index, 'title': title, 'skuId': id, 'sourceString': source_string}
def complete_query(request):
try:
logger.info("REQUEST: %s", str(request))
assert request.GET
user_search_query = request.GET['text']
user_search_query = unquote(user_search_query)
sku_matcher = sku_matcher_singleton.get_obj()
autocompleter = autocomplete.AutoComplete(sku_matcher)
sku_suggestions = autocompleter.hit_query(user_search_query.lower())
response = {'code': 200, 'data': {'results': []}}
for i in range(len(sku_suggestions)):
sku = sku_suggestions[i]
response['data']['results'].append(modify_format(index=i, title=sku[0].get_name(), id=sku[0]._id, source_string=user_search_query))
# print(a.add_and_match_item(Item(name='maggi noodles', subtag='Alcohol')))
return JsonResponse(response)
except Exception as e:
logger.error(e)
return JsonResponse({'code': 400, 'data': {'results': []}, 'error': str(e)})
def get_sku_data(request):
try:
logger.info(request)
assert request.GET
key = request.GET['key']
if not (key == 'data' or key=='dataless') :
return JsonResponse({'code': 400, 'error': 'Unwarranted Request'})
sku_matcher = sku_matcher_singleton.get_obj()
if key == 'dataless':
data = [('Sku Id', 'Sku Name', 'Sku Popularity')]
for sku in sku_matcher.sku_set:
data.append((sku._id, sku.get_name(), sku._subtag, sku._popularity))
return JsonResponse({'code': 200, 'data': data})
data = [('Sku Id', 'Sku Name', 'Sku Subtag', 'Sku Popularity', 'Sku Dzerids', 'Sku Users')]
for sku in sku_matcher.sku_set:
data.append((sku._id, sku.get_name(), sku._subtag, int(sku._popularity), json.dumps(sku._dzer_ids), str(sku._user_ids)))
return JsonResponse({'code': 200, 'data': data})
except Exception as e:
return JsonResponse({'code': 200, 'error': str(e)})
|
from enum import Enum
class Direction(Enum):
ASC = 0
DESC = 1
# --------------------insertion sort start --------------------------
def insertion_sort(items, direction=Direction.ASC):
"""
插入排序(inplace)。(以正序来说)从第二个数开始,和前一个数比较,如果比前一个数小,就插到前一个数前面,
然后接着和现在的前一个数比,知道现在的的前一个数比它小了(前头的全比它小),这个位置结束,向
后一位继续前面的过程
:param items: 需要进行排序的项们,必须是list
:param direction: 排序方向,Direction.ASC - 正排(默认),Direction.DESC - 倒排。
:return: 完成排序的数组
"""
__assert_is_valid_direction(direction)
__assert_items_must_be_list(items)
i = 1
should_swap = __greater_than_previous if Direction.DESC == direction else __less_than_previous
while i < len(items):
__keep_compare_and_swap_if_should(items, i, should_swap)
i += 1
return items
def __less_than_previous(items, index):
return items[index] < items[index - 1]
def __greater_than_previous(items, index):
return items[index] > items[index - 1]
def __keep_compare_and_swap_if_should(items, index, should_swap_with_previous):
while index > 0:
if should_swap_with_previous(items, index):
items[index], items[index - 1] = items[index - 1], items[index]
index -= 1
else:
return
# --------------------insertion sort end --------------------------
# --------------------selection sort start --------------------------
def selection_sort(items, direction=Direction.ASC):
"""
选择排序(inplace)。(以正序来说)从索引0开始,选择0~n中最小的数,放到索引0的位置;
然后选择1~n中最小的数,放到索引1的位置。一次类推。
:param items: 需要进行排序的项们,必须是list
:param direction: 排序方向,Direction.ASC - 正排(默认),Direction.DESC - 倒排。
:return: 完成排序的数组
"""
__assert_is_valid_direction(direction)
__assert_items_must_be_list(items)
i = 0
select_func = __select_the_greater if direction == Direction.DESC \
else __select_the_less
while i < len(items) - 1:
target = __select_target(items, i, select_func)
if target != i:
items[target], items[i] = items[i], items[target]
i += 1
return items
def __select_target(items, index, select):
selected_index = index
index += 1
while index < len(items):
selected_index = select(items, selected_index, index)
return selected_index
def __select_the_greater(items, index1, index2):
return index2 if items[index2] > items[index1] else index1
def __select_the_less(items, index1, index2):
return index2 if items[index2] < items[index1] else index1
# --------------------selection sort end --------------------------
# --------------------bubble sort start --------------------------
def bubble_sort(items, direction=Direction.ASC):
"""
冒泡排序(inplace)。(正序)n个数的数组,第一次从第1个数开始,和后一个比较,如果比后一个大,
就和后一个交换,然后从第二个数开始,完成前n个数的比较,这时最大的数会在第n个位置。然后接着从
第1个数开始,在前n-1个数执行上述过程。依次类推。
:param items: 需要进行排序的项们,必须是list
:param direction: 排序方向,Direction.ASC - 正排(默认),Direction.DESC - 倒排。
:return: 完成排序的数组
"""
__assert_items_must_be_list(items)
__assert_is_valid_direction(direction)
should_swap = __less_than_next if direction == Direction.DESC else __greater_than_next
n = len(items)
while n > 1:
__bubble_the_nth(items, n, should_swap)
n -= 1
return items
def __bubble_the_nth(items, n, should_swap_with_next):
i = 0
while i < n - 1:
if should_swap_with_next(items, i):
items[i], items[i + 1] = items[i + 1], items[i]
i += 1
def __greater_than_next(items, i):
return items[i] > items[i + 1]
def __less_than_next(items, i):
return items[i] <= items[i + 1]
# --------------------bubble sort end --------------------------
# --------------------merge sort start --------------------------
def merge_sort(items, direction=Direction.ASC):
"""
归并排序(inplace)。将数组分成两个子数组(逻辑上),对子数组进行排序,
之后将排序完成的子数组进行合并,得到排序完成的数组.
时间复杂度: Theta(nlogn)
辅助空间: O(n)
:param items: 需要进行排序的项们,必须是list
:param direction: 排序方向,Direction.ASC - 正排(默认),Direction.DESC - 倒排。
:return: 完成排序的数组
"""
__assert_is_valid_direction(direction)
__assert_items_must_be_list(items)
aux_list = [0] * len(items) # the aux_list is for saving space purpose
select = __select_the_greater if Direction.DESC else __select_the_less
__internal_merge_sort(items, 0, len(items) - 1, aux_list, select)
return items
def __internal_merge_sort(items, start, stop, aux_list, select):
if start >= stop:
return
mid = (start + stop) / 2
__internal_merge_sort(items, start, mid, aux_list, select)
__internal_merge_sort(items, mid + 1, stop, aux_list, select)
__merge_sublist(items, aux_list, start, mid, stop, select)
return items
def __merge_sublist(items, aux_list, start, mid, stop, select):
before_current, after_current, target_index = start, mid + 1, start
while before_current <= mid and after_current <= stop:
selected_index = select(items, before_current, after_current)
aux_list[target_index] = items[selected_index]
if selected_index == before_current:
before_current += 1
else:
after_current += 1
target_index += 1
# TODO: think about if the following double copy can be optimized
if before_current > mid:
for i in range(after_current, stop):
items[target_index] = items[i]
target_index += 1
elif after_current > stop: # must be true if the if is not true
for i in range(before_current, mid):
items[target_index] = items[i]
target_index += 1
for i in range(start, stop):
items[i] = aux_list[i]
# --------------------bubble sort end --------------------------
# --------------------heap sort start --------------------------
def heap_sort(items, direction=Direction.ASC):
"""
堆排序(inplace)。
:param items: 需要进行排序的项们,必须是list
:param direction: 排序方向,Direction.ASC - 正排(默认),Direction.DESC - 倒排。
:return: 完成排序的数组
"""
__assert_items_must_be_list(items)
__assert_is_valid_direction(direction)
def __heapify(items, n, type): # type= 1 - maxheap, 2 - minheap
pass
def __heap_parent(index):
return index / 2
def __heap_left_child(index):
return index << 1
def __heap_right_child(index):
return (index << 1) + 1
# --------------------heap sort end --------------------------
def quick_sort(items, direction=Direction.ASC):
pass
def counting_sort(items, direction=Direction.ASC):
pass
def radix_sort(items, direction=Direction.ASC):
pass
def bucket_sort(items, direction=Direction.ASC):
pass
def __assert_is_valid_direction(direction):
if direction != Direction.DESC and direction != Direction.ASC:
raise ValueError('The value of direction must be Direction.ASC or Direction.DESC')
def __assert_items_must_be_list(items):
if not items:
raise ValueError('items cannot be None!')
if not isinstance(items, type(list)):
raise ValueError('items must be a list instance!') |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
while True:
content = input('enter text:')
if content == '':
print('bye')
break
elif content.isdigit():
num = int(content)
if num < 20:
print('a lower num!')
else:
print(num ** 10)
else:
print('Error!')
|
lista = []
while True:
n = int(input('Digite um número: '))
lista.append(n)
resp = str(input('Quer continuar? [S/N] ')).lower().strip()[0]
while 's' not in resp and 'n' not in resp:
resp = str(input('Quer continuar? [S/N] ')).lower().strip()[0]
if 'n' in resp:
break
print('-=' * 30)
lista_pares = []
lista_impares = []
for valor in lista:
if valor % 2 == 0:
lista_pares.append(valor)
else:
lista_impares.append(valor)
print(f'A lista completa é {lista}')
print(f'A lista de pares é {lista_pares}')
print(f'A lista de ímpares é {lista_impares}')
|
input = [list(line.strip() * 100) for line in open('data/03.txt')]
def tree_counting(right, down):
row = col = tree_counter = 0
while row < len(input) - 1:
col += right
row += down
if input[row][col] == '#':
tree_counter += 1
input[row][col] = 'X'
else:
input[row][col] = 'O'
return tree_counter
print(tree_counting(1, 1) * tree_counting(3, 1) * tree_counting(5, 1) * tree_counting(7, 1) * tree_counting(1, 2))
|
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
stack = []
for c in s:
if c == '(' or c == '[' or c == '{':
stack.append(c)
elif c == ')' or c == ']' or c == '}':
if len(stack) == 0:
return False
top = stack.pop()
if (c == ')' and top == '(') or \
(c == ']' and top == '[') or \
(c == '}' and top == '{'):
continue
else:
return False
if stack:
return False
return True
s = Solution()
import sys
print s.isValid(sys.argv[1])
|
from logging import getLogger
import sys
import io
import numpy as np
import PIL.ImageDraw as ImageDraw
import PIL.Image as Image
def cylinder(draw, v1_, v2_, r, **options):
"""
draw a 3D cylinder
"""
options = {"fill": "#fff", **options}
draw.line([int(x) for x in [v1_[0], v1_[1], v2_[0], v2_[1]]],
width=int(r * 2), fill=options["fill"])
def Render(prims, Rsphere, shadow=None, topleft=np.array(
[-1., -1.]), size=(50, 50), zoom=200, vertices=None, vecs=None, bgcolor='#fff', encode=True):
"""
Renter the image in PNG format and output to the stdout.
Returns nothing.
When vertex list is given, the coords in prims are not indicated in vectors but in indices
Vecs are vectors not needed to be sorted (used in "L" command)
"""
logger = getLogger()
size = tuple((int(x * zoom + 0.5) for x in size))
image = Image.new("RGB", size, bgcolor)
draw = ImageDraw.Draw(image, "RGBA")
# special treatment for post-K project
# draw.rectangle([0,0,size[0]/2,size[1]], fill="#EF5FA7")
# draw.rectangle([size[0]/2,0,size[0],size[1]], fill="#00A2FF")
TL0 = np.zeros(3)
TL0[:2] = topleft
linedefaults = {"stroke_width": 2,
"stroke": "#000",
# "stroke_linejoin": "round",
# "stroke_linecap" : "round",
}
filldefaults = {"stroke_width": 1,
"stroke": "#000",
"fill": "#0ff",
# "stroke_linejoin": "round",
# "stroke_linecap" : "round",
# "fill_opacity": 1.0,
}
shadowdefaults = {"stroke_width": 0,
# "fill": "#8881",
"fill": shadow,
# "fill_opacity": 0.08,
}
if shadow is not None:
r = Rsphere
Z = np.array([0, 0, 1.0])
prims += [[prim[0] - Z * r * 1.4**j, prim[1] + 'S', r * 1.4**j] + prim[3:]
for j in range(1, 5) for prim in prims if prim[1] == 'C']
prims.sort(key=lambda x: -x[0][2])
while len(prims) > 0:
prim = prims.pop()
if not ((-0.5 < prim[0][0] - topleft[0] < size[0] + 0.5) and
(-0.5 < prim[0][1] - topleft[1] < size[1] + 0.5)):
continue
if prim[1] == "L":
if prim[4] == 0:
options = {**linedefaults, **prim[5]}
s = (prim[2][:2] - topleft) * zoom
e = (prim[3][:2] - topleft) * zoom
draw.line([int(s[0]), int(s[1]), int(e[0]), int(e[1])],
fill=options["stroke"], width=options["stroke_width"])
else:
options = {**filldefaults, **prim[5]}
cylinder(
draw,
(prim[2] - TL0) * zoom,
(prim[3] - TL0) * zoom,
prim[4] * zoom,
**options)
elif prim[1] == "L2":
# new, simpler expression.
# half relative vector is given
if prim[3] == 0:
options = {**linedefaults, **prim[4]}
s = ((prim[0] + prim[2])[:2] - topleft) * zoom
e = ((prim[0] - prim[2])[:2] - topleft) * zoom
draw.line([int(s[0]), int(s[1]), int(e[0]), int(e[1])],
fill=options["stroke"], width=options["stroke_width"])
else:
options = {**filldefaults, **prim[4]}
cylinder(
draw,
(prim[0] + prim[2] - TL0) * zoom,
(prim[0] - prim[2] - TL0) * zoom,
prim[3] * zoom,
**options)
elif prim[1] == "C":
options = {**filldefaults, **prim[3]}
Rsphere = prim[2]
center = (prim[0][:2] - topleft) * zoom
r = Rsphere * zoom
tl = center - r
br = center + r
draw.ellipse(
[int(x) for x in [tl[0], tl[1], br[0], br[1]]], fill=options["fill"])
elif prim[1] == "CS":
Rsphere = prim[2]
options = {**shadowdefaults, } # **prim[3] }
# logger.info("{0}".format(options))
center = (prim[0][:2] - topleft) * zoom
r = Rsphere * zoom
tl = center - r
br = center + r
draw.ellipse(
[int(x) for x in [tl[0], tl[1], br[0], br[1]]], fill=options["fill"])
if encode:
imgByteArr = io.BytesIO()
image.save(imgByteArr, format='PNG')
imgByteArr = imgByteArr.getvalue()
return imgByteArr
else:
return image
|
from common.run_method import RunMethod
import allure
@allure.step("极权限/添加权限")
def permission_addPermission_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/添加权限"
url = f"/api-admin/permission/addPermission"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/修改权限")
def permission_updatePermission_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/修改权限"
url = f"/api-admin/permission/updatePermission"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/删除权限")
def permission_deletePermission_delete(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/删除权限"
url = f"/api-admin/permission/deletePermission"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/删除多个权限")
def permission_deletePermissions_delete(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/删除多个权限"
url = f"/api-admin/permission/deletePermissions"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/查询所有平台")
def permission_queryAllPlatform_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/查询所有平台"
url = f"/api-admin/permission/queryAllPlatform"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/查询员工授权的所有平台")
def permission_queryAllPlatformByEmployeeId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/查询员工授权的所有平台"
url = f"/api-admin/permission/queryAllPlatformByEmployeeId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/查询员工授权的所有PC平台")
def permission_queryAllPCPlatformByEmployeeId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/查询员工授权的所有PC平台"
url = f"/api-admin/permission/queryAllPCPlatformByEmployeeId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/查询整颗权限树,从根开始")
def permission_queryPermissionTree_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/查询整颗权限树,从根开始"
url = f"/api-admin/permission/queryPermissionTree"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/查询某个平台下的权限树")
def permission_queryPermissionTreeByPlatform_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/查询某个平台下的权限树"
url = f"/api-admin/permission/queryPermissionTreeByPlatform"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/查询员工对应的URL权限")
def permission_queryPermissionUrlByEmployeeId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/查询员工对应的URL权限"
url = f"/api-admin/permission/queryPermissionUrlByEmployeeId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/根据员工ID查询员工的权限")
def permission_queryPermissionsByEmployeeId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/根据员工ID查询员工的权限"
url = f"/api-admin/permission/queryPermissionsByEmployeeId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/查询员工对应的权限树")
def permission_queryPermissionTreeByEmployeeId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/查询员工对应的权限树"
url = f"/api-admin/permission/queryPermissionTreeByEmployeeId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/修改角色与权限绑定关系")
def permission_updateRolePermission_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/修改角色与权限绑定关系"
url = f"/api-admin/permission/updateRolePermission"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/根据角色ID查询角色的权限")
def permission_queryPermissionsByRoleId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/根据角色ID查询角色的权限"
url = f"/api-admin/permission/queryPermissionsByRoleId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/查询角色授权的所有平台")
def permission_queryAllPlatformByRoleId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/查询角色授权的所有平台"
url = f"/api-admin/permission/queryAllPlatformByRoleId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极权限/根据角色ID查询角色的权限--已选择的标出")
def permission_querySelectPermissionTreeByRoleId_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极权限/根据角色ID查询角色的权限--已选择的标出"
url = f"/api-admin/permission/querySelectPermissionTreeByRoleId"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
|
from django.shortcuts import render
from django.http import HttpResponseRedirect,HttpResponse
# Create your views here.
def View(request):
return render(request,'textutil.html')
def remove(text):
punc = """~`!@#$%^&*()_-+={}[]"":;\|/?.,<>"""
new = ""
for char in text:
if char not in punc:
new = new + char
return new
def remove_space(text):
new = ""
text = text.strip()
for index,char in enumerate(text):
if index in range(len(text)-1):
if not(text[index] == " " and text[index+1] == " "):
new = new + char
return new
def remove_line(text):
new = ""
for char in text:
if char != "\n" and char!="\r":
new = new + char
#print(new)
return new
def on_caps_lock(text):
new = ""
for char in text:
new = new + char.upper()
return new
def off_caps_lock(text):
return text.lower()
def titlepara(text):
return text.title()
def count_char(text):
char_num = 0
white_space = 0
num =0
punc =0
for char in text:
if char in "0123456789":
num = num+1
elif char == " ":
white_space += 1
elif char in "~`!@#$%^&*()_-+={}[]:;\|/?.,<>":
punc += 1
else :
char_num += 1
return char_num,white_space,num,punc
def analyze(request):
text = request.POST['text']
removepunc = request.POST.get('removepunc','off')
removeextraspace = request.POST.get('removeextraspace','off')
removeline = request.POST.get('removeline','off')
capital =request.POST.get('upper','off')
small = request.POST.get('lower','off')
titled = request.POST.get('title','off')
counting = request.POST.get('counting','off')
if removepunc == 'on':
result = remove(text)
params = {'purpose':'Remove Puncutations','analyze_text':result}
#return render(request,'utilresult.html',params)
elif removeextraspace == 'on':
result = remove_space(text)
params = {'purpose':'Remove Extra Spaces','analyze_text':result}
#return render(request,'utilresult.html',params)
elif removeline == 'on':
result = remove_line(text)
params = {'purpose':'Remove Extra Spaces','analyze_text':result}
#return render(request,'utilresult.html',params)
elif capital == 'on':
result = on_caps_lock(text)
params = {'purpose':'Capitalize Text','analyze_text':result}
#return render(request,'utilresult.html',params)
elif small == 'on':
result = off_caps_lock(text)
params = {'purpose':'Capitalize Text','analyze_text':result}
#return render(request,'utilresult.html',params)
elif titled == 'on':
result = titlepara(text)
params = {'purpose':'Titled Text','analyze_text':result}
#return render(request,'utilresult.html',params)
elif counting == 'on':
r = list(count_char(text))
result=f"""'Number of character :' {r[0]}
'Number of spaces :' {r[1]}
'Number of Integer :' {r[2]}
'Number of punctuation :'{r[3]}"""
#print(type(result))
params = {'purpose':'counting','analyze_text':result}
else:
params = {'purpose':'No text to transform or you forget to switch on for transform','analyze_text':''}
return render(request,'utilresult.html',params)
|
# -*- coding: utf-8 -*-
__license__ = """
This file is part of **janitoo** project https://github.com/bibi21000/janitoo.
License : GPL(v3)
**janitoo** is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
**janitoo** is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with janitoo. If not, see http://www.gnu.org/licenses.
"""
__copyright__ = "Copyright © 2013-2014-2015-2016 Sébastien GALLET aka bibi21000"
__author__ = 'Sébastien GALLET aka bibi21000'
__email__ = 'bibi21000@gmail.com'
try:
__import__('pkg_resources').declare_namespace(__name__)
except Exception: # pragma: no cover
# bootstrapping
pass # pragma: no cover
import sys, os, errno
import time
import unittest
import threading
import json as mjson
import shutil
import mock
import traceback
from pkg_resources import iter_entry_points
from janitoo_nosetests import JNTTBase
from janitoo.mqtt import MQTTClient
from janitoo.dhcp import JNTNetwork, HeartbeatMessage
from janitoo.utils import json_dumps, json_loads
from janitoo.utils import HADD_SEP, HADD
from janitoo.utils import TOPIC_HEARTBEAT
from janitoo.utils import TOPIC_NODES, TOPIC_NODES_REPLY, TOPIC_NODES_REQUEST
from janitoo.utils import TOPIC_BROADCAST_REPLY, TOPIC_BROADCAST_REQUEST
from janitoo.utils import TOPIC_VALUES_USER, TOPIC_VALUES_CONFIG, TOPIC_VALUES_SYSTEM, TOPIC_VALUES_BASIC
from janitoo.runner import jnt_parse_args
class JNTTComponent(JNTTBase):
"""Component base test
"""
component_name = None
def setUp(self):
JNTTBase.setUp(self)
self.factory = {}
for entry in iter_entry_points(group='janitoo.components'):
try:
loaded = entry.load()
self.factory[entry.name] = loaded
except Exception:
#traceback.print_exc()
pass
print("Component %s" % self.component_name)
def tearDown(self):
self.factory = None
JNTTBase.tearDown(self)
def assertComponentEntryPoint(self, entry = None):
if entry is None:
entry = self.component_name
mkth = None
for entry in iter_entry_points(group='janitoo.components', name=entry):
mkth = entry.load()
self.assertNotEqual(mkth, None)
class JNTTComponentCommon(object):
"""Common tests for components
"""
def test_001_component_entry_point(self):
self.assertFalse(self.component_name is None)
self.assertComponentEntryPoint()
def test_002_component_oid_and_properties(self):
self.assertFalse(self.component_name is None)
entries = iter_entry_points(group='janitoo.components', name=self.component_name)
entry = next(entries)
mkth = entry.load()
self.assertFalse(mkth is None)
compo = mkth()
self.assertFalse(compo is None)
self.assertEqual(compo.oid, self.component_name)
self.assertFalse(compo.name is None)
self.assertFalse(compo.product_name is None)
compo = mkth(
name = 'myunbelievablename',
product_name = 'myunbelievableproduct_name',
product_type = 'myunbelievableproduct_type',
product_manufacturer = 'myunbelievableproduct_manufacturer',
)
self.assertNotEqual(compo, None)
self.assertEqual(compo.name, 'myunbelievablename')
self.assertEqual(compo.product_name, 'myunbelievableproduct_name')
self.assertEqual(compo.product_type, 'myunbelievableproduct_type')
self.assertEqual(compo.product_manufacturer, 'myunbelievableproduct_manufacturer')
|
import unittest
from models.activity import Activity
class TestActivity(unittest.TestCase):
def test_add_activity(self):
self.assertEqual(self.activity.add_activity({"name":"The gods must be crazy"}), "Activity added")
def test_edit_activity(self):
self.assertEqual(self.activity.edit_activity({"name":"The gods must be crazy"}), "updated")
def test_delete_activity(self):
self.assertEqual(self.activity.delete_activity({"name"}),"Activity deleted")
|
from django.test import TestCase
from recorder.models.site_application import InstantContent
from recorder.models.core import *
class SingleModelTestCase(TestCase):
def setUp(self):
InstantContent.objects.create(contentType=InstantContent.ContentType.TYPE_LOCATION)
def test_user_instant_content(self):
some_one_voice_content: InstantContent = InstantContent.objects.get(
contentType=InstantContent.ContentType.TYPE_LOCATION)
user = some_one_voice_content.user
all_users = RecordedUser.objects.all()
self.assertEqual(all_users.count(),0)
self.assertEqual(1, 1)
|
h1 = hex(97) #h1은 문자열 '0x61'
h2 = hex(98) #h2는 문자열 '0x62'
ret1 = h1+h2
print(ret1) #'0x610x62'가 출력됨
a = int(h1,16)
b = int(h2,16)
ret2 = a+b # ret2는 10진수 195가 됨
print(hex(ret2)) # '0xc3'가 출력
|
from flask import Flask
from flask_restful import Api, Resource
import os
from dotenv import load_dotenv
import mercantile
import requests
import shutil
load_dotenv()
app = Flask(__name__)
api = Api(app)
# Retrieves the API key as an environment variable. Make sure there is a .env file
# in the Platform folder with the API_KEY variable set
API_KEY = os.getenv('API_KEY')
BASE_URL = 'https://api.mapbox.com/v4/mapbox.satellite/'
'''
This retrieves and downloads a satellite image for a specified set of coordinates
and for a specified zoom level. The zoom level is an integer between 1 and 15,
where 1 is as far as possible and 15 is as close as possible. Right now, the image
is automatically downloaded into ./images as "test.png". There is also no error
handling logic, so any faulty request is an internal server error.
'''
class FindImage(Resource):
def get(self, lat, long, zoom, folder_name):
tile = mercantile.tile(float(long), float(lat), int(zoom))
url = BASE_URL + zoom + '/' + str(tile.x) + '/' + str(tile.y)
url += '@2x.pngraw?access_token=' + API_KEY
req = requests.get(url, stream=True)
# Creates a folder with the specified name, if not already there
path_name = './images/' + folder_name
if not os.path.exists(path_name):
os.makedirs(path_name)
with open(path_name + '/' + lat + ":" + long + '.png','wb') as f:
req.raw.decode_content = True
shutil.copyfileobj(req.raw, f)
return [{"result": "Successfully downloaded image"}]
# Example request (Georgia Tech): http://127.0.0.1:5000/image/33.7756/-84.3963/15/test
api.add_resource(FindImage, "/image/<lat>/<long>/<zoom>/<folder_name>")
def main():
app.run(debug=True)
if __name__ == "__main__":
main() |
import re
import sys
import time
start = time.time()
if len(sys.argv) < 2:
print("We require more vespian command line inputs! (tell me what file to process)")
sys.exit(0)
dev = False
if len(sys.argv) > 2 and sys.argv[2] == "dev":
dev = True
silent = False
if len(sys.argv) > 2 and sys.argv[2] == "silent":
silent = True
timing = False
if len(sys.argv) > 2 and sys.argv[2] == "timing":
silent = True
timing = True
class Line:
def __init__(self, m, b):
self.m = m
self.b = b
self.vis = True
def print(self):
print("y=("+str(self.m)+")*x+("+str(self.b)+") is "+("visible" if self.vis else "not visible"))
def intersect(self, line):
if line.m == self.m:
return -1
x = (self.b-line.b)/(line.m-self.m)
y = self.m*x+self.b
return [x,y]
def at(self, x):
return self.m*x+self.b
def comp(self, l):
return self.m == l.m and self.b == l.b
f = open(sys.argv[1], 'r')
#f = open('solve_these.txt', 'r')
file = f.read()#change this to (read) to read all the lines
arr = re.split('\n', file)
def Merge(a, b):
for ib,lb in enumerate(b):
for ia,la in reversed(list(enumerate(a))):
if ia > 0:#first one is always visible
prevLine = a[ia-1]
pos = la.intersect(prevLine)
y = lb.at(pos[0])
if y < pos[1]:
return a[:ia+1]+b[ib:]
return a[:1]+b[-1:]
def MergeVisible(A):
if len(A) <= 2:
return A
else:
A1 = MergeVisible(A[:len(A)//2])
A2 = MergeVisible(A[len(A)//2+1:])
return Merge(A1, A2)
linesArr = []
for idx, a in enumerate(arr):
arrs = re.findall('\[[^\[^\]]*\]', a)
numArrs = []
for currArrStr in arrs:
cArr = eval(currArrStr)
numArrs.append(cArr)
currLinesArr = []
if len(numArrs) > 0:
for lm,lb in zip(numArrs[0], numArrs[1]):
line = Line(lm, lb)
currLinesArr.append(line)
linesArr.append(currLinesArr)
for lines in linesArr:
tfArr = []
visLines = MergeVisible(lines)
while len(lines) > 0:
l = lines.pop(0)
if l.comp(visLines[0]):
tfArr.append(True)
visLines.pop(0)
else:
tfArr.append(False)
print(tfArr)
|
cipher = input()
match = ['.', '-.', '--']
for i in range(len(match) - 1, -1, -1):
cipher = cipher.replace(match[i], str(i))
print(cipher)
|
class IntCode():
def __init__(self, input_list):
self.input_list = input_list
self.current_list = input_list
self.current_op = 0
self.output = 0
self.input = 0
'''return the immediate or position value depending on mode in [0,1]'''
def im_pos(self, il, pos, mode):
return il[il[pos]] if mode == 0 else il[pos]
'''transform the instruction to remove the immedate/position mode logic'''
def make_instruction(self, il, op):
input_string = '{0:05d}'.format(il[op])
op_code = int(input_string[-2:])
# last param will always be position mode
if op_code in [1, 2, 7, 8]:
new_params = [self.im_pos(il, op+1, int(input_string[2])),
self.im_pos(il, op+2, int(input_string[1])),
il[op+3]]
# write-to param always in position mode
elif op_code == 3:
new_params = [il[op+1]]
elif op_code == 4:
new_params = [self.im_pos(il, op+1, int(input_string[2]))]
elif op_code in [5, 6]:
new_params = [self.im_pos(il, op+1, int(input_string[2])),
self.im_pos(il, op+2, int(input_string[1]))]
return op_code, new_params
'''perform one operation'''
def one_step(self):
il = self.current_list
op = self.current_op
op_code, params = self.make_instruction(il, op)
if op_code == 1:
il[params[2]] = params[0] + params[1]
op += 4
elif op_code == 2:
il[params[2]] = params[0] * params[1]
op += 4
elif op_code == 3:
inst_input = self.input
il[params[0]] = inst_input
op += 2
elif op_code == 4:
self.output = params[0]
op += 2
elif op_code == 5:
op = params[1] if params[0] != 0 else op + 3
elif op_code == 6:
op = params[1] if params[0] == 0 else op + 3
elif op_code == 7:
il[params[2]] = 1 if params[0] < params[1] else 0
op += 4
elif op_code == 8:
il[params[2]] = 1 if params[0] == params[1] else 0
op += 4
self.current_list = il
self.current_op = op
return il, op
|
import numpy as np
import random
# Update centroids
def compute_centroids(X, idx, k):
pixels, n = X.shape
centroids = np.zeros((k,n))
for i in range(k):
nb_samples = 0
for j in range(pixels):
if idx[j] == i:
nb_samples += 1
centroids[i] += X[j]
centroids[i] = centroids[i] / nb_samples
return centroids
def find_closest_centroids(X, centroids):
pixels = X.shape[0]
k = centroids.shape[0]
idx = np.zeros(pixels)
for i in range(pixels):
closest_centroid = 0
#euclidean distance
min_d = np.linalg.norm(X[i] - centroids[0])
for j in range(k):
d = np.linalg.norm(X[i] - centroids[j])
if d < min_d:
min_d = d
closest_centroid = j
idx[i] = closest_centroid
return idx
def init_centroids(X, k):
pixels, n = X.shape
centroids = np.zeros((k, n))
used_c = []
for i in range(k):
c = random.randint(0, pixels - 1)
while c in used_c:
c = random.randint(0, pixels -1)
used_c.append(c)
centroids[i] = X[c]
return centroids |
#-*- coding: utf-8 -*-
#####
#
#Localization for payroll to the Dominican Republic.
#Modifications to the res_partner_bank object.
#
#Author: Carlos Llamacho @ Open Business Solutions
#
#Date: 2013-10-23
#
#####
from openerp.osv import orm, fields, osv
class ResPartnerBank(orm.Model):
_name = 'res.partner.bank'
_inherit = 'res.partner.bank'
_columns = {
'cod_operation': fields.selection((('22','Cr.Cta.Corriente'),
('32', '32 - Credito Cuenta de Ahorro'),
('52', '52 - Credito Prestamo/TCR'),
('42', '42 - Credito Cuenta Contable'),
('12', '12 - Emision de Cheques'),
('27', '27 - Debito Cuenta Corriente'),
('37', '37 - Debito Cuenta Ahorro'),
('57', '57 - Debito Tarjeta Credito'),
('47', '47 - Debito Cuenta Contable'),
('17', '17 - Debito Tarjeta Debito Popular'),
('28', '28 - Prenotificacion Debito Cuenta Corriente'),
('38', '38 - Prenotificacion Debito Cuenta Ahorro')),
'Operation Code'),
'bank_check_digit': fields.selection((('8', 'Banco Popular Dominicano'),
('5', '5 - Banco del Progreso'),
('8', '8 - Banco BHD'),
('6', '6 - Banco de Reservas'),
('4', '4 - Republic Bank'),
('5', '5 - Banco Leon'),
('4', '4 - Banco Santa Cruz'),
('1', '1 - Citibank'),
('0', '0 - Scotiabank'),
('9', '9 - Asociacion Popular de Ahorros y Prestamos'),
('9', '9 - Banco Lope de Haro'),
('8', '8 - BDI'),
('2', '2 - Banco Promerica'),
('1', '1 - Banco Caribe'),
('7', '7 - Asociacion Cibao de Ahorros y Prestamos')),
'Check Digit')
}
def onchange_bank_id(self, cr, uid, ids, bank_id, context=None):
"""On chaning the bank field pulls the bank check digit from the
res.bank object. Then is added to the dict returned by the original
method.
Returns:
True
"""
bank_obj = self.pool.get('res.bank')
if bank_id:
result = super(ResPartnerBank, self).onchange_bank_id(cr, uid, ids,
bank_id)
bank = bank_obj.browse(cr, uid, bank_id)
result['value']['bank_check_digit'] = bank.bank_check_digit
return result
ResPartnerBank()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Defaut:
def __init__(self, provision_travaux_taux,
vacance_locative_taux_T1,
vacance_locative_taux_T2,
gestion_agence_taux):
self._provision_travaux_taux = provision_travaux_taux
self._vacance_locative_taux = {}
self._vacance_locative_taux['T1'] = vacance_locative_taux_T1
self._vacance_locative_taux['T2'] = vacance_locative_taux_T2
self._gestion_agence_taux = gestion_agence_taux
@property
def provision_travaux_taux(self):
return self._provision_travaux_taux
def vacance_locative_taux(self, type_):
try:
return self._vacance_locative_taux[type_]
except KeyError:
return 0
@property
def gestion_agence_taux(self):
return self._gestion_agence_taux
|
def duplicate_elements(m, n):
return bool(set(m).intersection(n))
|
#-*- coding:utf8 -*-
# Copyright (c) 2020 barriery
# Python release: 3.7.0
# Create time: 2020-12-21
DATABASE = {
'remote_ip': '39.104.154.79',
'remote_usr': 'wangch',
'remote_pwd': '20191104wc',
'database_usr': 'root',
'database_pwd': '20191104',
'database_name': 'node_infos',
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 29 15:12:20 2019
@author: clair
"""
from imutils.video import VideoStream
import argparse
import imutils
import cv2
from random import randint
import time
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str,
help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
help="OpenCV object tracker type")
ap.add_argument("-a", "--min-area", type=int, default=300,
help="minimum area size")
args = vars(ap.parse_args())
vector_tracker = []
trackers = {
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create
}
def isInBboxes(x, y, w, h, bboxes):
for a in range(x-3,x+4):
for b in range(y-3, y+4):
for w1 in range(w-3, w+4):
for h1 in range(h-3, h+4):
if (a,b,w1,h1) in bboxes:
return True
return False
bboxes = []
colors = []
vs = cv2.VideoCapture(args["video"])
firstFrame = None
startTime = time.time()
while True:
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
if frame is None:
break
#resize frame
frame = imutils.resize(frame, width=500)
# loop over the contours
currTime = time.time()
deltaTime = currTime - startTime
if (len(bboxes) < 1 and deltaTime >= 0.1) or deltaTime > 5:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
# find contours on the thresholded image
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) < args["min_area"]:
continue
(x, y, w, h) = cv2.boundingRect(c)
if isInBboxes(x,y,w,h,bboxes):
continue
bb = (x, y, w, h)
if bb in bboxes:
continue
print(bb)
bboxes.append(bb)
color = (randint(64, 255), randint(64, 255), randint(64, 255))
colors.append(color)
vector_tracker.append(trackers[args["tracker"]]())
vector_tracker[-1].init(frame, bb)
startTime = time.time()
(H, W) = frame.shape[:2]
if bboxes != []:
if len(bboxes) >= 1:
for i, bb in enumerate(bboxes):
(success, box) = vector_tracker[i].update(frame)
if success:
print(i)
print("success")
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x,y), (x+w,y+h), colors[i], 2)
else:
print("FAILEDDDDDDDDDDDD")
vector_tracker.pop(i)
bboxes.pop(i)
colors.pop(i)
startTime = time.time()
# firstFrame = None
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("s"):
bb = cv2.selectROI("Frame", frame, fromCenter=False, showCrosshair=True)
print(bb)
bboxes.append(bb)
color = (randint(64, 255), randint(64, 255), randint(64, 255))
colors.append(color)
vector_tracker.append(trackers[args["tracker"]]())
vector_tracker[-1].init(frame,bb)
elif key == ord("q"):
break
if not args.get("video", False):
vs.stop()
else:
vs.release()
cv2.destroyAllWindows()
cv2.waitKey(1)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from mtenv.envs.control.cartpole import MTCartPole
from mtenv.wrappers.ntasks_id import NTasksId as NTasksIdWrapper
from tests.utils.utils import validate_mtenv
def get_valid_num_tasks() -> List[int]:
return [1, 10, 100]
def get_invalid_num_tasks() -> List[int]:
return [-1, 0]
@pytest.mark.parametrize("n_tasks", get_valid_num_tasks())
def test_ntasks_id_wrapper_with_valid_input(n_tasks):
env = MTCartPole()
env = NTasksIdWrapper(env, n_tasks=n_tasks)
validate_mtenv(env=env)
@pytest.mark.parametrize("n_tasks", get_invalid_num_tasks())
def test_ntasks_id_wrapper_with_invalid_input(n_tasks):
with pytest.raises(Exception):
env = MTCartPole()
env = NTasksIdWrapper(env, n_tasks=n_tasks)
validate_mtenv(env=env)
|
from scrapy.crawler import CrawlerProcess
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from hashlib import md5
from settings import spider_settings
def run_spider():
""" Run VoteSpider as new process """
process = CrawlerProcess(spider_settings)
process.crawl(VoteSpider)
process.start()
class VoteSpider(CrawlSpider):
""" Rada.gov.ua crawl spider for scraping all vote results from all sessions of the VIII convocation """
name = 'votes'
allowed_domains = 'rada.gov.ua',
start_urls = 'http://w1.c1.rada.gov.ua/pls/zweb2/webproc2_5_1_J?' \
'ses=10009&num_s=2&num=&date1=&date2=&name_zp=&out_type=&id=',
rules = (
Rule(LinkExtractor(
allow=[
r'webproc2_5_1_J\?ses=10009&num_s=2&num=&date1=&date2=&name_zp=&out_type=&id=&page=\d{1,3}&zp_cnt=20',
r'webproc4_1\?pf3511=\d{5}',
r'/pls/radan_gs09/ns_zakon_gol_dep_wohf\?zn=.{1,}',
r'/pls/radan_gs09/ns_golos\?g_id=.{1,}'
]),
follow=True),
Rule(LinkExtractor(
allow=r'http://w1\.c1\.rada\.gov\.ua/pls/radan_gs09/ns_golos_rtf\?g_id=.{1,}&vid=0'),
callback='save_vote_results'
)
)
def __init__(self, save_path='data/{0}.rtf', *args, **kwargs):
"""
VoteSpider initialization
:param save_path: relative path for saving scraped files
"""
super().__init__(*args, **kwargs)
self.file_number = -1
self.save_path = save_path
self.hashes = set()
def save_vote_results(self, response):
""" Parse response and save vote results to file """
body_hash = md5()
body_hash.update(response.body)
body_hash = body_hash.hexdigest()
hashes_length = len(self.hashes)
self.hashes.add(body_hash)
# check duplicates by md5 hash
if hashes_length == len(self.hashes):
self.logger.info('Duplicate file with hash: {0}'.format(body_hash))
return
self.file_number += 1
filename = self.save_path.format(self.file_number)
with open(filename, 'wb') as file:
file.write(response.body)
self.logger.info('File saved: {0}, from: {1}'.format(filename, response.url))
if __name__ == '__main__':
run_spider()
print('Done')
|
from imageai.Detection.Custom import CustomObjectDetection, CustomVideoObjectDetection
import os
import cv2
detector = None
execution_path = os.getcwd()
def load():
# construct and display model
global detector
detector = CustomVideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(detection_model_path=os.path.join(execution_path, "detection_model-ex-33--loss-4.97.h5"))
detector.setJsonPath(configuration_json=os.path.join(execution_path, "detection_config.json"))
detector.loadModel()
def get_frame(path):
interval = 1
if path.isdecimal():
gen_frame = detector.detectObjectsFromVideo(
camera_input = cv2.VideoCapture(int(path)),
frames_per_second=1,
frame_detection_interval=interval,
minimum_percentage_probability=1,)
# log_progress=True)
else:
gen_frame = detector.detectObjectsFromVideo(
input_file_path=os.path.join(execution_path, path),
frames_per_second=1,
frame_detection_interval=interval,
minimum_percentage_probability=1,)
# log_progress=True)
for frame in gen_frame:
yield frame
|
# Generated by Django 3.0.5 on 2020-10-31 07:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('akun', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Jadwal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tgl_absen', models.DateField(auto_now_add=True)),
('jam_masuk', models.TimeField()),
('jam_pulang', models.TimeField()),
],
),
migrations.CreateModel(
name='Kategori',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nama_kategori', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Peserta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('jadwal_peserta', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='absensi_apps.Jadwal')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='akun.Profil')),
],
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Scan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scan_jam', models.TimeField(auto_now_add=True)),
('tgl_scan', models.DateField(auto_now_add=True)),
('peserta', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='absensi_apps.Peserta')),
],
),
migrations.AddField(
model_name='jadwal',
name='kategori',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='absensi_apps.Kategori'),
),
]
|
import sys
from pyinit import *
from labels import *
from math import exp
h.celsius = 37
h.load_file("pywrap.hoc")
from conf import *
# determine config file name
def setfcfg ():
fcfg = "netcfg.cfg" # default config file name
for i in xrange(len(sys.argv)):
if sys.argv[i].endswith(".cfg") and os.path.exists(sys.argv[i]):
fcfg = sys.argv[i]
return fcfg
fcfg=setfcfg() # config file name
dconf = readconf(fcfg)
taurcada = dconf['taurcada']
h.cac_hcnwino = 0.006
h.k4_hcnwino = dconf['iark4']
ihginc = h.ginc_hcnwino = dconf['ihginc'];
recdt = dconf['recdt']
recvdt = dconf['recvdt']
erevh = dconf['erevh']
spaceum = dconf['spaceum']
h_lambda = dconf['h_lambda']
h_gbar = dconf['h_gbar'] # for E cells
fs_h_gbar = dconf['fs_h_gbar'] #
lts_h_gbar = dconf['lts_h_gbar'] #
cagk_gbar = dconf['cagk_gbar'] #
ikc_gkbar = dconf['ikc_gkbar'] #
cabar = dconf['cabar'] # used for E cells
lts_cabar = dconf['lts_cabar']
tau1NMDAEE=15; tau2NMDAEE=150;
tau1NMDAEI=15; tau2NMDAEI=150;
nax_gbar = dconf['nax_gbar']
kdr_gbar = dconf['kdr_gbar']
kap_gbar = dconf['kap_gbar']
kdmc_gbar = dconf['kdmc_gbar']
km_gmax = dconf['km_gmax']
##
from syn import *
# if rdt > 0 use fixed interval for recording, else let cvode determine it
def saferecord (var, rdt):
if rdt > 0.0:
vrec = h.Vector(h.tstop/rdt + 1)
vrec.record(var,rdt)
else:
vrec = h.Vector()
vrec.record(var)
return vrec
###############################################################################
# General Cell
###############################################################################
class Cell:
"General cell"
def __init__ (self,x,y,z,ID,ty):
self.x=x
self.y=y
self.z=z
self.ID=ID
self.ty = ty
self.snames = [] # list of section names
self.all_sec = []
self.add_comp('soma',True)
self.set_morphology()
self.set_conductances()
self.set_synapses()
self.set_inj()
# get number of outgoing connections
def set_morphology (self): pass
def set_conductances (self): pass
def set_synapses (self): pass
def set_inj (self): self.somaInj = h.IClamp(0.5, sec=self.soma)
def add_comp (self, name, rec):
self.snames.append( name )
self.__dict__[name] = h.Section()
self.all_sec.append(self.__dict__[name])
if rec: # Record voltage
self.__dict__[name+"_volt"] = saferecord(self.__dict__[name](0.5)._ref_v, recvdt)
self.__dict__[name+"_volt"].label(name+"_volt")
###############################################################################
# Soma-targeting interneuron (fast-spiking Basket Cell -- Bas)
###############################################################################
class Bas (Cell):
"Basket cell"
def set_morphology(self):
total_area = 10000 # um2
self.soma.nseg = 1
self.soma.cm = 1 # uF/cm2
diam = sqrt(total_area) # um
L = diam/pi # um
h.pt3dclear(sec=self.soma)
h.pt3dadd(self.x, self.y, self.z, diam, sec=self.soma)
h.pt3dadd(self.x, self.y, self.z+L, diam, sec=self.soma)
def set_conductances(self): # Bas
cap = 1.0
rall = 150.0
rm = 10e3
Vrest = -79.8
p_ek = -85.0
p_ena = 55.0
sh_nax = 0.0
sec = self.soma
sec.insert('k_ion')
sec.insert('na_ion')
#sec.insert('ca_ion')
sec.insert('pas') # passive
sec.insert('nax') # Na current
sec.insert('kdr') # K delayed rectifier current
# erev
sec.ek = p_ek # K+ current reversal potential (mV)
sec.ena = p_ena # Na+ current reversal potential (mV)
# passive
sec.g_pas = 1.0/rm
sec.Ra = rall
sec.cm = cap
sec.e_pas = Vrest
# Na
sec.gbar_nax = nax_gbar
sec.sh_nax = sh_nax
# KDR
sec.gbar_kdr = kdr_gbar
self.soma.insert('HCN1')
self.soma(0.5).HCN1.gbar = fs_h_gbar
def set_synapses(self):
self.somaGABAf=Synapse(sect=self.soma,loc=0.5,tau1=0.07,tau2=9.1,e=-80);#self.dSy['somaGABAf']=self.somaGABAf;
self.somaGABAss=Synapse(sect=self.soma,loc=0.5,tau1=20,tau2=40,e=-80);#self.dSy['somaGABAss']=self.somaGABAss;
self.somaAMPA=Synapse(sect=self.soma,loc=0.5,tau1=0.05,tau2=5.3,e=0);#self.dSy['somaAMPAf']=self.somaAMPAf;
self.somaNMDA=SynapseNMDA(sect=self.soma,loc=0.5, tau1NMDA=tau1NMDAEI,tau2NMDA=tau2NMDAEI,r=1,e=0);
###############################################################################
# Dendrite-targeting interneuron (LTS Cell)
###############################################################################
class Lts (Cell):
"LTS cell"
def set_morphology(self):
total_area = 10000 # um2
self.soma.nseg = 1
self.soma.cm = 1 # uF/cm2
diam = sqrt(total_area) # um
L = diam/pi # um
h.pt3dclear(sec=self.soma)
h.pt3dadd(self.x, self.y, self.z, diam, sec=self.soma)
h.pt3dadd(self.x, self.y, self.z+L, diam, sec=self.soma)
def set_conductances(self): # LTS
cap = 1.0
rall = 150.0
rm = 10e3
Vrest = -79.8
p_ek = -85.0
p_ena = 55.0
sh_nax = 0.0
sec = self.soma
sec.insert('k_ion')
sec.insert('na_ion')
sec.insert('pas') # passive
sec.insert('nax') # Na current
sec.insert('kdr') # K delayed rectifier current
# erev
sec.ek = p_ek # K+ current reversal potential (mV)
sec.ena = p_ena # Na+ current reversal potential (mV)
# passive
sec.g_pas = 1.0/rm
sec.Ra = rall
sec.cm = cap
sec.e_pas = Vrest
# Na
sec.gbar_nax = nax_gbar
sec.sh_nax = sh_nax
# KDR
sec.gbar_kdr = kdr_gbar
# ca-related
sec.insert('icalts')
sec(0.5).icalts.gca = lts_cabar
sec.insert('kcalts')
sec.insert('ihlts')
sec(0.5).ihlts.gh = lts_h_gbar
sec.insert('calts') # calcium extrusion
sec(0.5).calts.tau = taurcada
def set_synapses(self):
self.somaGABAf = Synapse(sect=self.soma, loc=0.5, tau1=0.07, tau2=9.1, e=-80)
self.somaGABAss = Synapse( sect=self.soma, loc=0.5, tau1=20, tau2=40, e=-80)#originally for septal input
self.somaAMPA = Synapse( sect=self.soma, loc=0.5, tau1=0.05, tau2=5.3, e=0)
self.somaNMDA = SynapseNMDA(sect=self.soma, loc=0.5, tau1NMDA=tau1NMDAEI, tau2NMDA=tau2NMDAEI, r=1, e=0)
LTS = Lts
FS = Bas
###############################################################################
# Pyramidal Cell
###############################################################################
class PyrAdr (Cell):
"Pyramidal cell"
def __init__(self,x,y,z,ID,ty):
Cell.__init__(self,x,y,z,ID,ty)
self.set_props()
lrec = ['soma','Adend3']
def set_morphology(self):
self.add_comp('Bdend',True)
self.add_comp('Adend1',False)
self.add_comp('Adend2',False)
self.add_comp('Adend3',True)
self.apic = [self.Adend1, self.Adend2, self.Adend3]
self.basal = [self.Bdend]
sec = self.soma; sec.L = 20.0; sec.diam = 20.0
if self.ty == E5R or self.ty == E5B or self.ty == E5P: apicL = 300.0
else: apicL = 150.0
#else: apicL = 300.0
for sec in self.apic:
sec.L = apicL; sec.diam = 2.0
self.Bdend.L = 200.0; self.Bdend.diam = 2.0
self.Bdend.connect(self.soma, 0, 0)
self.Adend1.connect(self.soma, 1, 0)
self.Adend2.connect(self.Adend1, 1, 0)
self.Adend3.connect(self.Adend2, 1, 0)
if spaceum > 0.0:
for sec in self.all_sec:
ns = int(sec.L / spaceum)
if ns % 2 == 0: ns += 1
sec.nseg = ns
def set_props (self): # PYR
Vrest = -79.8
h.v_init = -60.0
#h.v_init = -79.8 # -70 # -75 # -79.8 # Vrest # -79.8
# passive properties
cap = 1.0
rall = 150.0
rm = 10e3
# Na, K reversal potentials calculated from
# internal and external solutions via Nernst equation
p_ek = -85.0
p_ena = 55.0
# h-current
#h.erev_h = -42.0
gbar_h = h_gbar
# d-current
kdmc_gbar_somam = 20
# na,k
sh_nax = 0.0
gbar_nax = nax_gbar
nax_gbar_somam = 5
kdr_gbar_somam = 5
# A few kinetic params changed vis-a-vis kdr_BS.mod defaults:
h.a0n_kdr = 0.0075 # def 0.02
h.nmax_kdr = 20.0 # def 2
sh_kap = 0.0
kap_gbar_somam = 5
# A few kinetic params changed from kap_BS.mod defaults:
h.vhalfn_kap = 35.0 # def 11
h.nmin_kap = 0.4 # def 0.1
h.lmin_kap = 5.0 # def 2
h.tq_kap = -45.0 # def -40
# other ion channel parameters
cal_gcalbar = cabar
can_gcanbar = cabar
cat_gcatbar = cabar
calginc = 1.0 # 2.0 - middle might need to get more but can leave out
cal_gbar_somam = can_gbar_somam = cat_gbar_somam = 0.1
cal_gbar_bdendm = can_gbar_bdendm = cat_gbar_bdendm = 0.25
ikc_gbar_dendm = 0.25
for sec in self.all_sec:
# erev
sec.ek = p_ek # K+ current reversal potential (mV)
sec.ena = p_ena # Na+ current reversal potential (mV)
# passive
sec.g_pas = 1.0/rm
sec.Ra = rall
sec.cm = cap
sec.e_pas = Vrest
# Ih
sec.ehwino = erevh
for seg in sec:
seg.hcnwino.k2 = 1e-4 # 1e-5 #
seg.hcnwino.ghbar = gbar_h
# Na
sec.gbar_nax = gbar_nax
sec.sh_nax = sh_nax
# KDR
sec.gbar_kdr = kdr_gbar
# K-A
sec.gbar_kap = kap_gbar
sec.sh_kap = sh_kap
soma = self.soma
soma.gbar_kdmc = kdmc_gbar * kdmc_gbar_somam
soma.gbar_nax = nax_gbar * nax_gbar_somam
soma.gbar_kdr = kdr_gbar * kdr_gbar_somam
soma.gbar_kap = kap_gbar * kap_gbar_somam
soma.gkbar_ikc = ikc_gkbar
soma.gcalbar_cal = cal_gcalbar * cal_gbar_somam
soma.gcanbar_can = can_gcanbar * can_gbar_somam
soma.gcatbar_cat = cat_gcatbar * cat_gbar_somam
h.distance(0,0.5,sec=self.soma) # middle of soma is origin for distance
for sec in self.apic:
sec.gcalbar_cal = cal_gcalbar
sec.gcanbar_can = can_gcanbar
sec.gcatbar_cat = cat_gcatbar
sec.gkbar_ikc = ikc_gkbar * ikc_gbar_dendm
sec.gbar_cagk = cagk_gbar
for seg in sec:
d = h.distance(seg.x,sec=sec)
seg.hcnwino.ghbar = gbar_h * exp(d/h_lambda)
seg.gmax_km = km_gmax * exp(d/h_lambda)
seg.gbar_kap = soma.gbar_kap * exp(d/h_lambda)
seg.gbar_kdr = soma.gbar_kdr * exp(d/h_lambda)
self.apic[1].gcalbar_cal = cal_gcalbar * calginc # middle apical dend gets more iL
self.apic[2].cm = 2.0
Bdend = self.Bdend
Bdend.gcalbar_cal = cal_gcalbar * cal_gbar_bdendm
Bdend.gcanbar_can = can_gcanbar * can_gbar_bdendm
Bdend.gcatbar_cat = cat_gcatbar * cat_gbar_bdendm
Bdend.gkbar_ikc = ikc_gkbar * ikc_gbar_dendm
Bdend.gbar_cagk = cagk_gbar
Bdend.gbar_kap = soma.gbar_kap; Bdend.gbar_kdr = soma.gbar_kdr
Bdend.gmax_km = km_gmax
def set_conductances (self): # insert the conductances
for sec in self.all_sec:
sec.insert('k_ion')
sec.insert('na_ion')
sec.insert('ca_ion')
sec.insert('pas') # passive
sec.insert('hcnwino') # H channel in Ih.mod
sec.insert('nax') # Na current
sec.insert('kdr') # K delayed rectifier current
sec.insert('kap') # K-A current
# calcium-related channels
sec.insert('cal') # cal_mig.mod
sec.insert('can') # can_mig.mod
sec.insert('cat') # cat_mig.mod
sec.insert('ikc') # IC.mod - ca and v dependent k channel - BK
soma = self.soma; self.soma.insert('kdmc') # K-D current in soma only
for sec in self.apic:
sec.insert('km') # km.mod
sec.insert('cagk') # cagk.mod - SK
self.Bdend.insert('km') # km.mod
self.Bdend.insert('cagk') # cagk.mod - SK
def set_synapses(self):
erevgaba = -80
self.somaGABAf = Synapse(sect=self.soma,loc=0.5,tau1=0.07,tau2=9.1,e=erevgaba)
self.somaAMPA = Synapse(sect=self.soma,loc=0.5,tau1=0.05,tau2=5.3,e=0)
bdsyloc = 0.5
self.BdendAMPA = Synapse(sect=self.Bdend,loc=bdsyloc,tau1=0.05, tau2=5.3,e=0)
self.BdendNMDA = SynapseNMDA(sect=self.Bdend,loc=bdsyloc,tau1NMDA=tau1NMDAEE,tau2NMDA=tau2NMDAEE,r=1,e=0)
self.Adend1GABAs = Synapse(sect=self.Adend1,loc=0.5,tau1=0.2,tau2=20,e=erevgaba)
self.Adend2GABAs = Synapse(sect=self.Adend2,loc=0.5,tau1=0.2,tau2=20,e=erevgaba)
self.Adend3GABAs = Synapse(sect=self.Adend3,loc=0.5,tau1=0.2,tau2=20,e=erevgaba)
self.Adend3GABAf = Synapse(sect=self.Adend3,loc=0.5,tau1=0.07,tau2=9.1,e=erevgaba)
self.Adend3AMPA = Synapse(sect=self.Adend3,loc=0.5,tau1=0.05,tau2=5.3,e=0)
self.Adend3NMDA = SynapseNMDA(sect=self.Adend3,loc=0.5,tau1NMDA=tau1NMDAEE,tau2NMDA=tau2NMDAEE,r=1,e=0)
self.Adend2AMPA = Synapse(sect=self.Adend2,loc=0.5,tau1=0.05,tau2=5.3,e=0)
self.Adend2NMDA = SynapseNMDA(sect=self.Adend2,loc=0.5,tau1NMDA=tau1NMDAEE,tau2NMDA=tau2NMDAEE,r=1,e=0)
self.Adend1AMPA = Synapse(sect=self.Adend1,loc=0.5,tau1=0.05,tau2=5.3,e=0)
self.Adend1NMDA = SynapseNMDA(sect=self.Adend1,loc=0.5,tau1NMDA=tau1NMDAEE,tau2NMDA=tau2NMDAEE,r=1,e=0)
self.Adend3mGLUR = SynapsemGLUR(sect=self.Adend3,loc=0.5)
self.Adend3GABAB = SynapseGABAB(sect=self.Adend3,loc=0.5)
self.Adend2mGLUR = SynapsemGLUR(sect=self.Adend2,loc=0.5)
self.Adend2GABAB = SynapseGABAB(sect=self.Adend2,loc=0.5)
self.Adend1mGLUR = SynapsemGLUR(sect=self.Adend1,loc=0.5)
self.Adend1GABAB = SynapseGABAB(sect=self.Adend1,loc=0.5)
#######################################
# some utils to avoid the h. #
vlk = h.vlk
Vector = h.Vector
NQS = h.NQS
gg = h.gg
ge = h.ge
Random = h.Random
List = h.List
Matrix = h.Matrix
nqsdel = h.nqsdel
Graph = h.Graph
vrsz = h.vrsz
allocvecs = h.allocvecs
NetCon = h.NetCon
NetStim = h.NetStim
#######################################
|
__author__ = 'Matthijs'
import ConfigParser;
import StringIO
config = ConfigParser.ConfigParser()
def getConfig():
if len(config.sections()) != 0:
return config
else:
config.read('config.ini')
return config
def loadConfig(contents):
config_buf = StringIO.StringIO(contents)
config.readfp(config_buf)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata (eneldo@marcos.do)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions
from openerp.exceptions import ValidationError
class InvoiceJournalConfig(models.Model):
_name = "shop.ncf.config"
name = fields.Char("Sucursal", size=40, required=False)
final = fields.Many2one("account.journal", "Consumidor final", required=False)
fiscal = fields.Many2one("account.journal", u"Para crédito fiscal", required=False)
special = fields.Many2one("account.journal", u"Regímes especiales", required=False)
gov = fields.Many2one("account.journal", "Gubernalmentales", required=False)
nc = fields.Many2one("account.journal", u"Notas de crédito", required=False)
nd = fields.Many2one("account.journal", u"Notas de débito", required=False)
default_warehouse = fields.Many2one("stock.warehouse", u"Almacén predeterminado")
"""
This method return de default shop ncf config for the user
TODO: create correct demo data because when isntall with demo data get exception because
the new field shop_ncf_config_id not include an get exception this is the reason i place
try inthis exception
"""
@api.v8
@api.multi
def get_default(self):
try:
resp = False
if self.env.user.shop_ncf_config_id.id:
resp = self.env.user.shop_ncf_config_id.id
elif self.search_count([]) > 0:
resp = self.search([])[0].id
if not resp:
raise ValidationError(
u"Se debe realizar la configuración de los comprobantes fiscales antes de realizar una factura!")
else:
return resp
except:
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.