content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
import os
from datetime import timedelta
import numpy
from PIL import Image, ImageDraw, ImageFont
from crequest.middleware import CrequestMiddleware
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files import File
from django.db import models
from django.db.models import Sum, Avg, Q, Count
from django.utils import timezone
from isoweek import Week
from djanban.apps.base.auth import get_user_boards, get_member_boards, user_is_administrator
class Member(models.Model):
DEFAULT_MAX_NUMBER_OF_BOARDS = None
creator = models.ForeignKey("members.Member", related_name="created_members", null=True, default=None, blank=True)
user = models.OneToOneField(User, verbose_name=u"Associated user", related_name="member", null=True, default=None)
custom_avatar = models.ImageField(verbose_name=u"Custom avatar", blank=True, null=True, default=None)
default_avatar = models.ImageField(verbose_name=u"Default avatar", null=True, default=None)
biography = models.TextField(verbose_name=u"Biography", blank=True, default="")
is_developer = models.BooleanField(verbose_name=u"Is this member a developer?",
help_text=u"Informs if this member is a developer and hence will receive reports"
u" and other information", default=False)
on_holidays = models.BooleanField(verbose_name=u"Is this developer on holidays?",
help_text=u"If the developer is on holidays will stop receiving reports "
u"and other emails", default=False)
minimum_working_hours_per_day = models.PositiveIntegerField(
verbose_name=u"Minimum number hours this developer should complete each day",
default=None, null=True, blank=True)
minimum_working_hours_per_week = models.PositiveIntegerField(
verbose_name=u"Minimum number of hours this developer should complete per week",
default=None, null=True, blank=True)
max_number_of_boards = models.PositiveIntegerField(
verbose_name=u"Max number of boards",
help_text=u"Maximum number of boards this member can fetch. If null, unlimited number of boards",
default=None, null=True
)
is_public = models.BooleanField(
verbose_name=u"Is this member public?",
help_text=u"If checked, this user will be seen by other members and they will be able to add it to their boards",
default=False, blank=True
)
# Constructor for Member
def __init__(self, *args, **kwargs):
super(Member, self).__init__(*args, **kwargs)
# Adjust spent time
def adjust_spent_time(self, spent_time, date):
# Check to what spent time factor belongs this spent time according to the date
# in case the date is in any spent time factor interval, apply that factor to
# the spent time
spent_time_factors = self.spent_time_factors.all()
for spent_time_factor in spent_time_factors:
if (spent_time_factor.start_date is None and spent_time_factor.end_date is None) or\
(spent_time_factor.start_date <= date and spent_time_factor.end_date is None) or \
(spent_time_factor.start_date <= date <= spent_time_factor.end_date):
adjusted_value = spent_time * spent_time_factor.factor
return adjusted_value
# In case there is no date interval which date belongs to, return the spent time
return spent_time
# Adjust spent time according to the factor specified by date intervals
def adjust_daily_spent_time(self, daily_spent_time, attribute="spent_time", spent_time_factors=None):
if spent_time_factors is None:
spent_time_factors = self.spent_time_factors.all()
return Member.adjust_daily_spent_time_from_spent_time_factors(
daily_spent_time=daily_spent_time,
spent_time_factors=spent_time_factors,
attribute=attribute
)
# Adjust spent time according to the spent time factors passed as parameters
@staticmethod
def adjust_daily_spent_time_from_spent_time_factors(daily_spent_time, spent_time_factors, attribute="spent_time"):
date = daily_spent_time.date
adjusted_value = getattr(daily_spent_time, attribute)
if adjusted_value is None:
return 0
for spent_time_factor in spent_time_factors:
if (spent_time_factor.start_date is None and spent_time_factor.end_date is None) or\
(spent_time_factor.start_date <= date and spent_time_factor.end_date is None) or \
(spent_time_factor.start_date <= date <= spent_time_factor.end_date):
original_value = getattr(daily_spent_time, attribute)
adjusted_value = original_value * spent_time_factor.factor
#print "{0} {1} * {2} = {3}".format(self.external_username, original_value, spent_time_factor.factor, adjusted_value)
return adjusted_value
return adjusted_value
# A native member is one that has no Trello profile
@property
def is_native(self):
return not self.has_trello_profile
# Inform if this member was fetched from Trello (alias method).
@property
def has_trello_profile(self):
return hasattr(self, "trello_member_profile") and self.trello_member_profile
# Inform if this member was fetched from Trello
@property
def has_trello_member_profile(self):
return self.has_trello_profile
# Has this uses credentials to make actions with the Trello API?
@property
def has_trello_credentials(self):
return self.has_trello_profile and self.trello_member_profile.is_initialized
# Inform if this user has an initialized profile for one of the backends
@property
def is_initialized(self):
if self.is_native:
return True
return self.has_trello_profile and self.trello_member_profile.is_initialized
@property
def uuid(self):
if self.has_trello_profile:
return self.trello_member_profile.trello_id
return self.id
# Alias very useful for now
@property
def external_username(self):
if self.has_trello_profile:
return self.trello_member_profile.username
if self.user:
return self.user.username
return "Member {0}".format(self.id)
@property
def initials(self):
if self.has_trello_profile:
return self.trello_member_profile.initials
if self.user:
return self.user.username
return "Member {0}".format(self.id)
# Return the members this member can see. That is:
# - Members of one of his/her boards.
# - Members created by this member.
# - Public members.
@property
def viewable_members(self):
boards = []
if self.user:
boards = get_user_boards(self.user)
return Member.objects.filter(Q(boards__in=boards) | Q(creator=self) | Q(is_public=True)).distinct()
# Get member companions of the same boards
@property
def team_mates(self):
return Member.get_user_team_mates(self.user)
# Get members that work with this user
@staticmethod
def get_user_team_mates(user):
boards = get_user_boards(user)
if user_is_administrator(user):
return Member.objects.all().exclude(user=user).distinct().order_by("id")
return Member.objects.filter(boards__in=boards).exclude(user=user).distinct().order_by("id")
# Get member on the same boards
# This method will always return at least one member (if user is a member)
@property
def team_members(self):
return Member.get_user_team_mates(self.user)
# Get members of the same team of this user
# This method will always return at least one member (if user is a member)
@staticmethod
def get_user_team_members(user):
boards = get_user_boards(user)
return Member.objects.filter(boards__in=boards).distinct().order_by("id")
# Resets the password of the associated user of this member
def reset_password(self, new_password=None):
# A member without an user cannot be his/her password changed
if not self.user:
raise ValueError(u"This member has not an associated user")
# Create automatically a new password if None is passed
if new_password is None:
new_password = User.objects.make_random_password()
# Setting up the new password
self.user.set_password(new_password)
self.user.save()
return new_password
# Returns cards that belongs to this member and are currently under development
def get_current_development_cards(self, board=None):
development_cards = self.cards.filter(board__is_archived=False, is_closed=False, list__type="development")
# Filtering development cards by board
if board:
return development_cards.filter(board=board)
return development_cards
# Returns cards that are in development ordered by descending order according to when were worked on.
def get_last_development_cards(self, board=None):
development_cards = self.get_current_development_cards(board=board)
return development_cards.order_by("-last_activity_datetime")
# Return the last notifications (10 by default)
def get_last_unread_notifications(self, number=10):
return self.received_notifications.filter(is_read=False).order_by("-creation_datetime")[:number]
# Returns the number of hours this member has develop today
def get_today_spent_time(self, board=None):
# Getting the spent time for today
now = timezone.now()
today = now.date()
return self.get_spent_time(today, board)
# Returns the number of hours this member has develop today
def get_today_adjusted_spent_time(self, board=None):
# Getting the adjusted spent time for today
now = timezone.now()
today = now.date()
return self.get_adjusted_spent_time(today, board)
# Returns the number of hours this member developed yesterday
def get_yesterday_spent_time(self, board=None):
now = timezone.now()
today = now.date()
yesterday = today - timedelta(days=1)
return self.get_spent_time(yesterday, board)
# Returns the number of adjusted hours this member developed yesterday
def get_yesterday_adjusted_spent_time(self, board=None):
now = timezone.now()
today = now.date()
yesterday = today - timedelta(days=1)
return self.get_adjusted_spent_time(yesterday, board)
# Returns the number of hours this member has develop on a given date
def get_spent_time(self, date=None, board=None):
spent_time_on_date_filter = {}
# If we pass the date, filter adjusted spent time of this member by date
if date is not None:
spent_time_on_date_filter["date"] = date
# If we pass the board, only this board spent times will be given
if board is not None:
spent_time_on_date_filter["board"] = board
return self._sum_spent_time_from_filter(spent_time_on_date_filter)
# Returns the number of adjusted hours this member has develop on a given date
def get_adjusted_spent_time(self, date=None, board=None):
spent_time_on_date_filter = {}
# If we pass the date, filter adjusted spent time of this member by date
if date is not None:
spent_time_on_date_filter["date"] = date
# If we pass the board, only this board spent times will be given
if board is not None:
spent_time_on_date_filter["board"] = board
return self._sum_adjusted_spent_time_from_filter(spent_time_on_date_filter)
# Returns the number of hours this member has develop on a given week
def get_weekly_spent_time(self, week, year, board=None):
start_date = Week(year, week).monday()
end_date = Week(year, week).friday()
spent_time_on_week_filter = {"date__gte": start_date, "date__lte": end_date}
# If we pass the board, only this board spent times will be given
if board is not None:
spent_time_on_week_filter["board"] = board
return self._sum_spent_time_from_filter(spent_time_on_week_filter)
# Returns the number of adjusted hours this member has develop on a given week
def get_weekly_adjusted_spent_time(self, week, year, board=None):
start_date = Week(year, week).monday()
end_date = Week(year, week).friday()
spent_time_on_week_filter = {"date__gte": start_date, "date__lte": end_date}
# If we pass the board, only this board spent times will be given
if board is not None:
spent_time_on_week_filter["board"] = board
return self._sum_adjusted_spent_time_from_filter(spent_time_on_week_filter)
# Returns the number of hours this member has develop on a given month
def get_monthly_spent_time(self, month, year, board=None):
spent_time_on_month_filter = {"date__month": month, "date__year": year}
# If we pass the board, only this board spent times will be given
if board is not None:
spent_time_on_month_filter["board"] = board
return self._sum_spent_time_from_filter(spent_time_on_month_filter)
# Returns the number of adjusted hours this member has develop on a given month
def get_monthly_adjusted_spent_time(self, month, year, board=None):
spent_time_on_month_filter = {"date__month": month, "date__year": year}
# If we pass the board, only this board spent times will be given
if board is not None:
spent_time_on_month_filter["board"] = board
return self._sum_adjusted_spent_time_from_filter(spent_time_on_month_filter)
# Returns the sum of this member's number of spent time for the daily spent filter passed as parameter
def _sum_spent_time_from_filter(self, daily_spent_time_filter):
daily_spent_times = self.daily_spent_times.filter(**daily_spent_time_filter)
return Member._sum_spent_time(daily_spent_times)
# Returns the sum of this member's number of adjusted spent time for the daily spent filter passed as parameter
def _sum_adjusted_spent_time_from_filter(self, daily_spent_time_filter):
daily_spent_times = self.daily_spent_times.filter(**daily_spent_time_filter)
spent_time_factors = self.spent_time_factors.all()
adjusted_spent_time_sum = 0
for daily_spent_time in daily_spent_times:
adjusted_spent_time_sum += self.adjust_daily_spent_time(
daily_spent_time, attribute="spent_time", spent_time_factors=spent_time_factors
)
return adjusted_spent_time_sum
# Returns the number of hours this member has develop given a filter
@staticmethod
def _sum_spent_time(daily_spent_times):
spent_time = daily_spent_times. \
aggregate(sum=Sum("spent_time"))["sum"]
if spent_time is None:
return 0
return spent_time
# Destroy boards created by this member
def delete_current_data(self):
self.created_boards.all().delete()
# Mood of this member
@property
def mood(self):
happy_days = self.daily_member_moods.filter(mood="happy").count()
normal_days = self.daily_member_moods.filter(mood="normal").count()
sad_days = self.daily_member_moods.filter(mood="sad").count()
all_days = (happy_days + normal_days + sad_days)
if all_days == 0:
return 0.0
return 1.0 * (happy_days - sad_days) / all_days
def get_role(self, board):
try:
return self.roles.get(board=board)
except MemberRole.DoesNotExist:
member_role, created = MemberRole.objects.get_or_create(type="normal", board=board)
member_role.members.add(self)
return member_role
@property
def active_cards(self):
return self.cards.filter(board__is_archived=False, is_closed=False).order_by("position")
def all_boards_in_downtime(self):
resumed_boards = get_member_boards(self).\
annotate(num_resumed_cards=Count(
models.Case(
models.When(cards__is_closed=False, cards__list__type="development", then=1),
models.When(cards__is_closed=False, cards__list__type="ready_to_develop", then=1),
default=0,
output_field=models.IntegerField()
))
).\
filter(num_resumed_cards__gt=0)
return not resumed_boards.exists()
# Is the member in downtime?
@property
def is_in_downtime(self):
return not self.active_cards.filter(Q(list__type="development")|Q(list__type="ready_to_develop")).exists()
@property
def first_work_datetime(self):
try:
return self.daily_spent_times.all().order_by("id")[0].comment.creation_datetime
except (IndexError, AttributeError):
return None
@property
def last_work_datetime(self):
try:
return self.daily_spent_times.all().order_by("-id")[0].comment.creation_datetime
except (IndexError, AttributeError):
return None
@property
def number_of_cards(self):
return self.active_cards.count()
@property
def forward_movements(self):
return self.card_movements.filter(type="forward").count()
@property
def backward_movements(self):
return self.card_movements.filter(type="backward").count()
def get_forward_movements_for_board(self, board):
return self.card_movements.filter(type="forward", board=board).count()
def get_backward_movements_for_board(self, board):
return self.card_movements.filter(type="backward", board=board).count()
# Returns this member's avatar URL
@property
def avatar_url(self, size=30):
# Check if this member has a custom avatar, if that's the case, return the custom avatar
if self.custom_avatar:
return self.custom_avatar.url
# Create avatar if it doesn't exist
if not self.default_avatar:
self.create_default_avatar()
# If the member has an user and therefore, an email, get its gravatar
if self.user:
current_request = CrequestMiddleware.get_request()
return "https://www.gravatar.com/avatar/{0}?s={1}&d={2}".format(
hashlib.md5(self.user.email.encode('utf-8')).hexdigest(),
size,
current_request.build_absolute_uri(self.default_avatar.url)
)
# Otherwise, get its default avatar URL
return self.default_avatar.url
# Create default avatar
def create_default_avatar(self):
initials = self.initials
font_size = 15
x = 4
y = 8
if len(initials) == 3:
font_size = 11
x = 1
y = 10
font = ImageFont.truetype(settings.BASE_DIR + "/fonts/vera.ttf", size=font_size)
canvas = Image.new('RGB', (30, 30), (255, 255, 255))
draw = ImageDraw.Draw(canvas)
draw.text((x, y), initials, font=font, fill=(0, 0, 0, 255))
filename = "{0}.png".format(initials)
# If tmp directory does not exist, create it
if not os.path.exists(settings.TMP_DIR):
os.mkdir(settings.TMP_DIR)
path = os.path.join(settings.TMP_DIR, "{0}".format(filename))
canvas.save(path, "PNG")
with open(path, "rb") as avatar_image:
self.default_avatar.save(filename, File(avatar_image))
# Average lead time of the cards of this member
@property
def avg_card_lead_time(self):
return self.active_cards.aggregate(avg=Avg("lead_time"))["avg"]
# Average spent time of the cards of this member
@property
def avg_card_spent_time(self):
return self.active_cards.aggregate(avg=Avg("spent_time"))["avg"]
# Average estimated time of the cards of this member
@property
def avg_card_estimated_time(self):
return self.active_cards.aggregate(avg=Avg("estimated_time"))["avg"]
# Standard deviation of the lead time of the cards of this member
@property
def std_dev_card_lead_time(self):
values = [float(card_i.lead_time) for card_i in self.active_cards.exclude(lead_time=None)]
std_dev_time = numpy.nanstd(values)
return std_dev_time
# Standard deviation of the spent time of the cards of this member
@property
def std_dev_card_spent_time(self):
values = [float(card_i.spent_time) for card_i in self.active_cards.exclude(spent_time=None)]
std_dev_time = numpy.nanstd(values)
return std_dev_time
# Standard deviation of the estimated time of the cards of this member
@property
def std_dev_card_estimated_time(self):
values = [float(card_i.estimated_time) for card_i in self.active_cards.exclude(estimated_time=None)]
std_dev_time = numpy.nanstd(values)
return std_dev_time
@property
def first_name(self):
if self.user:
return self.user.first_name
return None
@property
def last_name(self):
if self.user:
return self.user.last_name
return None
@property
def email(self):
if self.user:
return self.user.email
return None
# Spent factors of each member
class SpentTimeFactor(models.Model):
member = models.ForeignKey("members.Member", verbose_name=u"Member", related_name="spent_time_factors")
name = models.CharField(verbose_name=u"Name of this factor", max_length=128, default="", blank=True)
start_date = models.DateField(verbose_name=u"Start date of this factor")
end_date = models.DateField(verbose_name=u"End date of this factor", null=True, default=None, blank=True)
factor = models.DecimalField(
decimal_places=2, max_digits=5,
verbose_name=u"Factor that needs to be multiplied on the spent time price for this member",
help_text=u"Modify this value whe this member cost needs to be adjusted by a factor",
default=1
)
# Role a member has in a board
class MemberRole(models.Model):
TYPE_CHOICES = (
("admin", "Administrator"),
("normal", "Normal"),
("guest", "Guest")
)
type = models.CharField(verbose_name="Role a member has in a board", default="normal", max_length=32)
members = models.ManyToManyField("members.Member", verbose_name=u"Member", related_name="roles")
board = models.ForeignKey("boards.Board", verbose_name=u"Boards", related_name="roles")
# Return the full name of the type
@property
def name(self):
return dict(MemberRole.TYPE_CHOICES)[self.type]
#
class TrelloMemberProfile(models.Model):
api_key = models.CharField(max_length=128, verbose_name=u"Trello API key", null=True, default=None, blank=True)
api_secret = models.CharField(max_length=128,
verbose_name=u"Trello API secret (obsolete)",
help_text=u"Trello API secret. Deprecated and not used. This field will be removed.",
null=True, default=None, blank=True)
token = models.CharField(max_length=128, verbose_name=u"Trello token", null=True, default=None, blank=True)
token_secret = models.CharField(max_length=128, verbose_name=u"Trello token secret", null=True, default=None, blank=True)
trello_id = models.CharField(max_length=128, verbose_name=u"Trello member id", unique=True)
username = models.CharField(max_length=128, verbose_name=u"Trello username")
initials = models.CharField(max_length=8, verbose_name=u"User initials in Trello")
member = models.OneToOneField(Member, verbose_name=u"Associated member", related_name="trello_member_profile", null=True, default=None)
# Informs if this member is initialized, that is, it has the credentials needed for connecting to trello.com
@property
def is_initialized(self):
return self.api_key and self.api_secret and self.token and self.token_secret
@property
def user(self):
if self.member:
return self.member.user
return None
|
from rest_framework.routers import DefaultRouter
from app.api import views
router = DefaultRouter(trailing_slash=False)
router.register("users", views.UserViewSet, basename="user")
urlpatterns = router.urls
|
# Generated by Django 2.2.17 on 2020-12-22 19:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('battles', '0002_auto_20201222_1644'),
]
operations = [
migrations.AddField(
model_name='battle',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
for testcase in range(int(input())):
n = int(input())
dict = {}
comb = 1
m = (10**9)+7
for x in input().split():
no = int(x)
try:
dict[no] = dict[no] + 1
except:
dict[no] = 1
dict = list(dict.items())
dict.sort(key=lambda x: x[0], reverse=True)
dict = [x[1] for x in dict]
for ind in range(len(dict)):
if dict[ind]==0:
continue
if (dict[ind]%2==0):
for j in range(dict[ind]-1,2,-2):
comb = (comb*j) % m
else:
for j in range(dict[ind],2,-2):
comb = (comb*j) % m
comb = (comb*dict[ind+1]) % m
dict[ind+1] -= 1
print(comb)
|
"""This is a file for processing the data in the abdomenCT-1k dataset.
Essentially the masks are in a .nii.gz format, and there is 1 mask for the whole CT scan.
The mask is then processed and converted into a numpy array corresponding to each image in the CT scan.
The numpy array follows the standard semantic segmentation GT format - N*3*H*W
"""
import os
import numpy as np
import nibabel as nib
from glob import glob
from PIL import Image
from os.path import basename
ROOT_DIR = '/scratche/users/sansiddh/abdomenCT-1k/'
all_masks = glob(ROOT_DIR+'Masks/*.nii.gz')
CLASSES = [0, 1, 2, 3, 4]
print(len(all_masks))
for mask_path in all_masks:
mask = nib.load(mask_path).get_fdata()
for i in range(mask.shape[2]):
mask_img = mask[:, :, i]
mask_np = np.repeat(mask_img[:, :, np.newaxis], len(CLASSES), axis=2)
for idx, class_val in enumerate(CLASSES):
bool_arr = (mask_img == class_val)
mask_np[:, :, idx][bool_arr] = 1
mask_np[:, :, idx][~bool_arr] = 0
id = basename(mask_path).split('_')[1].split('.')[0]
masks_dir = f'{ROOT_DIR}Masks/{id}/'
os.makedirs(masks_dir, exist_ok=True)
with open(masks_dir + '%05d.npy' % i, 'wb') as f:
np.save(f, mask_np)
print(f'{mask_path} done')
|
#!/usr/bin/python3
'''
Copyright [02/2019] Vaclav Alt, vaclav.alt@utf.mff.cuni.cz
'''
import pandas as pd
import sqlite3 as sq
import csv
import configparser, os, time, sys
from datetime import datetime
from math import isnan
from shutil import copyfile
from optmgr import OptMaster
class SvodMaster:
def __init__(self, filename):
self.cfg = configparser.ConfigParser()
self.cfg.read(filename)
self.wd = self._createFolder()
copyfile("opts.ini", os.path.join(self.wd, "opts.ini"))
dbpath = os.path.join(self.wd, self.cfg["database"]["sql_filename"])
self._initDb(dbpath)
self.opt = OptMaster()
self.opt.load()
def _initDb(self, dbpath):
self.db = sq.connect(dbpath)
self.c = self.db.cursor()
self._createTable()
self.insertQuery = self._insertQueryTemplate()
def _createFolder(self):
mydir = os.path.join(
os.getcwd(),
datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
os.makedirs(mydir)
return mydir
def download(self):
task_count = self.opt.getTaskCount()
error = False
error_path = os.path.join(self.wd, self.cfg["database"]["error_filename"])
with open(error_path, 'w', newline='') as logfile:
i = 1
for x in self.opt.optIterator():
print("Stahování %d/%d..." % (i, task_count), end='')
opts = self.opt._getUrlOpts(x)
url = self.opt._getUrl(opts)
try:
table = self._downloadYearTable(url)
except:
csv_out = csv.writer(logfile)
if not error:
header = list(opts.keys())
for i in range(len(header)):
header[i] = self.opt.opt_names[header[i]]
header.append("url")
csv_out.writerow(header)
error= True
values = list(opts.values())
values.append(url)
csv_out.writerow(values)
continue
self._changeFormats(opts)
for index, row in table.iterrows():
if isnan(row['Rok']):
continue
opts["c_rok"] = row['Rok']
opts["c_inc"] = row['Incidence']
opts["c_mor"] = row['Mortalita']
self._saveToDb(opts)
self.db.commit()
print("hotovo")
i += 1
self.writeCsv()
if error:
print("Došlo k chybám. Pro konfigurace v errors.csv se nepodařilo stáhnout žádná data.")
input("Stisknutím klávesy Enter ukončíte chod programu")
def writeCsv(self):
csv_path = os.path.join(self.wd, self.cfg["database"]["csv_filename"])
print("Ukládám %s" % self.cfg["database"]["csv_filename"])
sql3_cursor = self.db.cursor()
sql3_cursor.execute('SELECT * FROM %s' % self.cfg["database"]["tablename"])
with open(csv_path,'w', newline='') as out_csv_file:
csv_out = csv.writer(out_csv_file)
csv_out.writerow([d[0] for d in sql3_cursor.description])
for result in sql3_cursor:
csv_out.writerow(result)
def _changeFormats(self, opts):
opts["c_vek"] = self._vekFormat(opts["c_vek"])
opts["c_gen"] = self._pohlFormat(opts["c_gen"])
def _vekFormat(self, i):
return (int(i) - 1) * 5
def _pohlFormat(self, pohl):
if (pohl == "m"):
return 1
elif (pohl == "z"):
return 2
else:
return "NULL"
def _insertQueryTemplate(self):
query = "insert into %s (" % self.cfg["database"]["tablename"]
for col in self.cfg.options("database.columns"):
query += "%s, " % self.cfg["database.columns"][col]
query = query[:-2]
query += ") values ("
for col in self.cfg.options("database.columns"):
query += "'{%s}', " % col
query = query[:-2]
query += ")"
return query
def _composeQuery(self, opts):
for index, val in opts.items():
if val == '':
opts[index] = "NULL"
return (self.insertQuery.format(**opts))
def _createTable(self):
query = "create table %s (" % self.cfg["database"]["tablename"]
query += "id INTEGER PRIMARY KEY"
for col in self.cfg.options("database.columns"):
query += ", %s %s" % (self.cfg["database.columns"][col], self.cfg["database.types"][col])
query += ")"
self.c.execute(query)
self.db.commit()
def _saveToDb(self, opts):
sql_query = self._composeQuery(opts)
self.c.execute(sql_query)
def _parseSingleYearTable(self, tables):
df = tables[0].transpose()
df = pd.DataFrame(df.values[1:,4:])
return (df.values[0,0], df.values[0,1])
def _downloadYearTable(self, url):
tables = pd.read_html(url, skiprows=[3,7])
df = tables[0].transpose()
headers = df.iloc[0,:3]
df1 = pd.DataFrame(df.values[1:,:3], columns=headers)
df2 = pd.DataFrame(df.values[1:,3:], columns=headers)
df = df1.append(df2).reset_index(drop=True)
return df
def _processTable(self, table):
for index, row in table.iterrows():
rowDict = {
"rok" : row['Rok'],
"incidence" : row['Incidence'],
"mortalita" : row['Mortalita']
}
print(rowDict)
def main():
svod = SvodMaster("config.ini")
svod.download()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import yaml
from tabulate import tabulate
from IPython import get_ipython
from IPython.core.display import display, HTML
def load_config(yamlfile):
"""load yaml to a dict"""
with open(yamlfile, 'r') as stream:
_dict = yaml.safe_load(stream)
return _dict
def innotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def print_dict(input_dict):
"""tabulated HTML output of dictionary"""
if innotebook():
outstr = tabulate({
'name': list(input_dict.keys()),
'description': list(input_dict.values()),
}, headers="keys", tablefmt='html')
display(HTML(outstr))
else:
print(tabulate({
'name': list(input_dict.keys()),
'description': list(input_dict.values()),
}, headers="keys"))
def repeat_exp(plan_func, n=1):
"""
Quick wrapper to repeat certain experiment, e.g.
>> RE(repeat_exp(tomo_scan('tomo_scan_config.yml')), 2)
"""
for _ in range(n):
yield from plan_func
|
import re
from time import perf_counter
from unittest.mock import patch
import pytest
from stixcore.data.test import test_data
from stixcore.idb.manager import IDBManager
from stixcore.io.fits.processors import FitsL0Processor, FitsL1Processor
from stixcore.io.soc.manager import SOCPacketFile
from stixcore.products.level0.housekeepingL0 import MaxiReport as MaxiReportL0
from stixcore.products.level0.housekeepingL0 import MiniReport as MiniReportL0
from stixcore.products.level1.housekeepingL1 import MaxiReport as MaxiReportL1
from stixcore.products.level1.housekeepingL1 import MiniReport as MiniReportL1
from stixcore.products.levelb.binary import LevelB
from stixcore.products.product import Product
from stixcore.util.logging import get_logger
logger = get_logger(__name__)
testpackets = [(test_data.tmtc.TM_3_25_1, MiniReportL0, MiniReportL1, 'mini',
'0660010031:51424', '0660010031:51424', 1),
(test_data.tmtc.TM_3_25_2, MaxiReportL0, MaxiReportL1, 'maxi',
'0660258881:33104', '0660258881:33104', 1)]
@pytest.fixture
def idbm():
return IDBManager(test_data.idb.DIR)
@patch('stixcore.products.levelb.binary.LevelB')
@pytest.mark.parametrize('packets', testpackets, ids=[f[0].stem for f in testpackets])
def test_housekeeping(levelb, packets):
hex_file, cl_l0, cl_l1, name, beg, end, size = packets
with hex_file.open('r') as file:
hex = file.readlines()
levelb.data.__getitem__.return_value = [re.sub(r"\s+", "", h) for h in hex]
levelb.control = {'raw_file': 'raw.xml', 'packet': 0}
hk_l0 = cl_l0.from_levelb(levelb)
assert hk_l0.level == 'L0'
assert hk_l0.name == name
assert hk_l0.scet_timerange.start.to_string() == beg
assert hk_l0.scet_timerange.end.to_string() == end
assert len(hk_l0.data) == size
hk_l1 = cl_l1.from_level0(hk_l0)
assert hk_l1.level == 'L1'
@patch('stixcore.products.levelb.binary.LevelB')
def test_calibration_hk(levelb, idbm, tmp_path):
with test_data.tmtc.TM_3_25_2.open('r') as file:
hex = file.readlines()
levelb.data.__getitem__.return_value = [re.sub(r"\s+", "", h) for h in hex]
levelb.control = {'raw_file': 'raw.xml', 'packet': 0}
hkl0 = MaxiReportL0.from_levelb(levelb)
hkl0.control['parent'] = ['parent.fits']
hkl0.control['raw_file'] = ['raw.xml']
hkl1 = MaxiReportL1.from_level0(hkl0)
fits_procl1 = FitsL1Processor(tmp_path)
fits_procl1.write_fits(hkl1)[0]
assert True
def test_calibration_hk_many(idbm, tmp_path):
idbm.download_version("2.26.35", force=True)
tstart = perf_counter()
prod_lb_p1 = list(LevelB.from_tm(SOCPacketFile(test_data.io.HK_MAXI_P1)))[0]
prod_lb_p1.control['raw_file'] = ['raw.xml']
hk_p1 = MaxiReportL0.from_levelb(prod_lb_p1)
hk_p1.control['raw_file'] = ['raw.xml']
hk_p1.control['parent'] = ['parent.fits']
fits_procl0 = FitsL0Processor(tmp_path)
filename = fits_procl0.write_fits(hk_p1)[0]
hk_p1_io = Product(filename)
prod_lb_p2 = LevelB.from_tm(SOCPacketFile(test_data.io.HK_MAXI_P2))
hk_p2 = MaxiReportL0.from_levelb(list(prod_lb_p2)[0])
# fake a idb change on the same day
hk_p2.idb_versions["2.26.35"] = hk_p2.idb_versions["2.26.34"]
del hk_p2.idb_versions["2.26.34"]
hkl0 = hk_p1_io + hk_p2
hkl0.control['raw_file'] = ['raw.xml']
hkl0.control['parent'] = ['parent.fits']
hkl1 = MaxiReportL1.from_level0(hkl0, idbm=idbm)
hkl1.control['raw_file'] = ['raw.xml']
hkl1.control['parent'] = ['parent.fits']
fits_procl1 = FitsL1Processor(tmp_path)
filename = fits_procl1.write_fits(hkl1)[0]
tend = perf_counter()
logger.info('Time taken %f', tend - tstart)
if __name__ == '__main__':
test_calibration_hk_many(IDBManager(test_data.idb.DIR))
|
import socket
def get_open_ports(target, port_range, verbose = False):
open_ports = []
try:
ip_addr = socket.gethostbyname(target)
except:
if target.replace('.', '').isnumeric():
return "Error: Invalid IP address"
else:
return "Error: Invalid hostname"
try:
hostname = socket.gethostbyaddr(ip_addr)[0]
no_host = False
except:
no_host = True
for i in range(port_range[0], port_range[1] + 1):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
if s.connect_ex((str(ip_addr), i)) == 0:
open_ports.append(i)
s.close()
if verbose:
if no_host:
output = "Open ports for {IP}\nPORT SERVICE".format(IP=ip_addr)
else:
output = "Open ports for {URL} ({IP})\nPORT SERVICE".format(URL=hostname, IP=ip_addr)
for i in open_ports:
output += "\n{PORT}{SERVICE}".format(PORT=str(i).ljust(9), SERVICE=socket.getservbyport(i))
return(output)
else:
return(open_ports)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import time
from acc_utils.attrdict import AttrDict
from acc_utils.errors import *
from acc_utils.model_utils import *
from config import cfg
from generic_op import *
class Stats():
def __init__(self):
self.global_stats = None
self.local_stats = None
self.layer_stats = None
self.description = self.init_description()
logging.config.dictConfig(cfg.LOGGING_CONFIG_DICT)
self.logger = logging.getLogger()
def init(self):
if self.global_stats is not None:
del self.global_stats
if self.layer_stats is not None:
for layer in self.layer_stats:
del self.layer_stats[layer]
del self.layer_stats
self.global_stats = self.create_branch()
self.local_stats = self.create_branch()
self.layer_stats = {}
def create_branch(self):
branch = AttrDict()
branch.SIM_TIME = time.time()
branch.CLOCK = 0
branch.WRITE = AttrDict()
branch.WRITE.DRAM = 0
branch.RUN = AttrDict()
branch.READ = AttrDict()
branch.READ.DRAM = 0
branch.READ.DRAM2FMEM = 0
branch.READ.DRAM2WMEM = 0
branch.READ.FMEM = 0
branch.READ.WMEM = 0
branch.MACs = 0
branch.DELAY = AttrDict()
branch.DELAY.DRAM = 0
branch.DELAY.READ_WMEM = 0
branch.DELAY.READ_FMEM = 0
branch.DELAY.WRITE_DRAM = 0
branch.DELAY.WRITE_FMEM = 0
branch.DRAM = AttrDict()
branch.DRAM.BUS_BUSY_TIME = 0
return branch
def init_description(self):
__DESCRIPTION = self.create_branch()
__DESCRIPTION.SIM_TIME = "Elapsed time"
__DESCRIPTION.CLOCK = "Simulated Cycle"
__DESCRIPTION.WRITE.DRAM = "# of DRAM write"
__DESCRIPTION.READ.DRAM = "# of DRAM read"
__DESCRIPTION.READ.DRAM2FMEM = "data size of DRAM -> FMEM"
__DESCRIPTION.READ.DRAM2WMEM = "data size of DRAM -> WMEM"
__DESCRIPTION.READ.FMEM = "# of FMEM read"
__DESCRIPTION.READ.WMEM = "# of WMEM read"
__DESCRIPTION.MACs = "MACs"
__DESCRIPTION.DELAY.DRAM = "Total memory delay"
__DESCRIPTION.DELAY.READ_WMEM = "WMEM Read delay"
__DESCRIPTION.DELAY.READ_FMEM = "FMEM Read delay"
__DESCRIPTION.DELAY.WRITE_DRAM = "DRAM Write Delay"
__DESCRIPTION.DELAY.WRITE_FMEM = "LATENCY due to the write operation bottleneck"
__DESCRIPTION.DRAM.BUS_BUSY_TIME = "DRAM busy time"
return __DESCRIPTION
def update(self, layer_name, print_stats=True):
for attr in self.global_stats:
if attr == 'SIM_TIME':
continue
if isinstance(self.global_stats[attr], dict):
for key in self.global_stats[attr]:
self.global_stats[attr][key] += self.local_stats[attr][key]
else:
self.global_stats[attr] += self.local_stats[attr]
self.local_stats.SIM_TIME = time.time() - self.local_stats.SIM_TIME
stat = self.local_stats
if print_stats:
self.logger.info(" LAYER {} - Elapsed Time(s): {}, Simulated cycles: {}, MACs: {}.".format(
layer_name,
stat.SIM_TIME,
stat.CLOCK,
stat.MACs)
)
self.layer_stats[layer_name] = self.local_stats
self.local_stats = self.create_branch()
def end_simulation(self):
self.global_stats.SIM_TIME = time.time() - self.global_stats.SIM_TIME
stat = self.global_stats
self.logger.info(""" SIMULATION FINISHED. SUMMARY:
SIMULATION TIME(s): {}
MACs: {}
SIMULATED CYCLE(cylce): {}
DRAM Delay, Write Delay: {}, {}
DRAM_READ, WRITE: {}, {}
DRAM2FMEM, DRAM2WMEM: {}, {}
FMEM_READ: {}
WMEM_READ: {}
""".format(stat.SIM_TIME,
stat.MACs,
stat.CLOCK,
stat.DELAY.DRAM, stat.DELAY.WRITE_FMEM,
stat.READ.DRAM, stat.WRITE.DRAM,
stat.READ.DRAM2FMEM, stat.READ.DRAM2WMEM,
stat.READ.FMEM,
stat.READ.WMEM)
)
def dram_read(self, cnt=1):
self.local_stats.READ.DRAM += cnt
def read_dram2fmem(self, cnt=1):
self.local_stats.READ.DRAM2FMEM += cnt
self.dram_read(cnt)
def read_dram2wmem(self, cnt=1):
self.local_stats.READ.DRAM2WMEM += cnt
self.dram_read(cnt)
def fmem_read(self, cnt=1):
self.local_stats.READ.FMEM += cnt
def wmem_read(self, cnt=1):
self.local_stats.READ.WMEM += cnt
def dram_write(self, cnt=1):
self.local_stats.WRITE.DRAM += cnt
def total_cycle(self):
return self.local_stats.CLOCK + self.global_stats.CLOCK
def current_cycle(self):
return self.local_stats.CLOCK
def increase_cycle(self, t=1):
self.local_stats.CLOCK += t
def memory_latency(self):
return self.local_stats.DELAY.DRAM
def wait_memory(self, t=1):
self.local_stats.DELAY.DRAM += t
self.local_stats.CLOCK += t
def wait_dram2fmem(self, t=1):
self.local_stats.DELAY.READ_FMEM += t
self.wait_memory(t)
def wait_dram2wmem(self, t=1):
self.local_stats.DELAY.READ_WMEM += t
self.wait_memory(t)
def wait_write_dram(self, t=1):
self.local_stats.DELAY.WRITE_DRAM += t
def wait_writing(self, t=1):
self.local_stats.DELAY.WRITE_FMEM += t
self.local_stats.CLOCK += t
def use_dram_bus(self, t):
self.local_stats.DRAM.BUS_BUSY_TIME += t
def set_macs(self, size):
self.local_stats.MACs = size
def diff_static_and_simulate(path_info, static_info):
for layer in path_info:
if isinstance(layer.main_op, ConvOp) or isinstance(layer.main_op, PoolOp):
stats_layer = self.layer_stats[layer.name]
profile_value = stats_layer['CLOCK'] - stats_layer['DELAY.DRAM']
static_cycle = static_info[layer.name]['cycle']
static_in_size = static_info[layer.name]['in_size']
static_out_size = static_info[layer.name]['out_size']
print("Layer: {:>16s} {:^12s}\tKern: {:1d}x{:1d}\tSimulated: {:>8d}\tStatic Calc: {:>8d}\tDiff. Rate: {:>.2f}\tDRAM: {:>8d}\tFMEM: {:>8d} ({:>5d}, {:>5d})\tTensor: {:>8d} -> {:>8d}".format(
layer.name, "(" + layer.main_op.type + ")", layer.main_op.k_w, layer.main_op.k_h, profile_value, static_cycle,
(static_cycle - profile_value) / profile_value * 100, stats_layer['DELAY.DRAM'], stats_layer['WRITE']['FMEM'], stats_layer['DELAY.READ_FMEM'], stats_layer['DELAY.WRITE_DRAM'], static_in_size, static_out_size))
elif layer.name in self.layer_stats:
stats_layer = self.layer_stats[layer.name]
print("Layer: {:>16s} {:^12s}\tDRAM: {:>8d}\tFMEM: {:>8d} ({:>5d}, {:>5d})".format(
layer.name, "(" + layer.main_op.type + ")", stats_layer['DELAY.DRAM'], stats_layer['WRITE']['FMEM'], stats_layer['DELAY.READ_FMEM'], stats_layer['DELAY.WRITE_DRAM']))
def print_result(self, path_info, model):
import math
name = []
dram = []
fmem_dram = []
wmem_dram = []
total_delay = []
fmem_delay = []
wmem_delay = []
cycle = []
mac = []
fps = []
utilization = []
dram_busy_time = []
dram_utilization = []
dram_is = []
dram_ws = []
# for conv
conv_clock = 0
conv_mac = 0
conv_delay = 0
# for fc
fc_delay = 0
# for residual
residual_delay = 0
num_mac_units = cfg.MIDAP.SYSTEM_WIDTH * cfg.MIDAP.WMEM.NUM
cps = cfg.SYSTEM.FREQUENCY * 1.0e6
for layer in path_info:
stats_layer = self.layer_stats[layer.name]
it = layer.input[0]
input_size = it.shape[0] * it.shape[1] * it.shape[2]
weight_size = 0
name.append(layer.name)
dram.append(stats_layer['READ']['DRAM'] + stats_layer['WRITE']['DRAM'])
wmem_dram.append(stats_layer['READ']['DRAM2WMEM'])
fmem_dram.append(stats_layer['READ']['DRAM2FMEM'])
total_delay.append(stats_layer['DELAY']['DRAM'])
fmem_delay.append(stats_layer['DELAY']['READ_FMEM'])
wmem_delay.append(stats_layer['DELAY']['READ_WMEM'])
mac.append(stats_layer['MACs'])
cycle.append(stats_layer['CLOCK'])
dram_busy_time.append(stats_layer['DRAM']['BUS_BUSY_TIME'])
dram_utilization.append(dram_busy_time[-1]/cycle[-1])
fps.append(cps / stats_layer['CLOCK'])
utilization.append(stats_layer['MACs'] / (stats_layer['CLOCK'] * num_mac_units))
main_op = layer.modules[0].op
if isinstance(main_op, ConvOp):
conv_clock += stats_layer['CLOCK']
conv_mac += stats_layer['MACs']
conv_delay += stats_layer['DELAY']['READ_WMEM']
weight_size = main_op.orig_weight_size
elif isinstance(main_op, ConvOp) and main_op.type == 'FC':
fc_delay += stats_layer['DELAY']['READ_WMEM']
weight_size = main_op.orig_weight_size
elif isinstance(main_op, ArithmeticOp):
residual_delay += stats_layer['DELAY']['READ_WMEM']
dram_is.append(input_size + int(math.ceil(input_size / (cfg.MIDAP.FMEM.NUM_ENTRIES * cfg.MIDAP.FMEM.NUM * cfg.SYSTEM.DATA_SIZE))) * weight_size)
dram_ws.append(weight_size + max(int(math.ceil(weight_size / (cfg.MIDAP.WMEM.NUM_ENTRIES * cfg.MIDAP.WMEM.NUM * cfg.SYSTEM.DATA_SIZE))), 1) * input_size)
print("{}\tDRAM_Access\tDRAM_Delay\tDRAM_Access(FMEM)\tDRAM_Access(WMEM)\tDRAM_Delay(FMEM)\t\
DRAM_Delay(WMEM)\tMACs\tCYCLE\tDRAM_BUSY_TIME\tDRAM_Utilization\tFPS\tUtilization\tDRAM_Access(IS)\tDRAM_Access(WS)\tUtil.(Conv)\tResidual_Delay(WMEM)\t\
FC_Delay(WMEM)\tConv_Delay(WMEM)\tDRAM_Dealy_Ratio\tDRAM_Delay_Ratio(FMEM)\tDRAM_Delay_Ratio(WMEM)\tDRAM_Delay_Ratio(WMEM, Conv)".format(model))
for v in zip(name, dram, total_delay, fmem_dram, wmem_dram, fmem_delay, wmem_delay, mac, cycle, dram_busy_time, dram_utilization, fps, utilization, dram_is, dram_ws):
print("{}\t{:,}\t{:,}\t{:,}\t{:,}\t{:,}\t{:,}\t{:,}\t{:,}\t{:,}\t{:.4f}\t{:.0f}\t{:.4f}\t{}\t{}".format(*v))
# dram, fmem_dram, wmem_dram, total_delay, fmem_delay, wmem_delay, mac, cycle, fps, utilization
# conv util, residual delay, fc delay, conv delay, dram delay ratio, fmem ratio, wmem ratio, wmem ratio (only conv)
global_stat = self.global_stats
print("Total\t{:,}\t{:,}\t{:,}\t{:,}\t{:,}\t{:,}\t{:,}\t{:,}\t{:,}\t\
\t{:.4f}\t{:.0f}\t{:.4f}\t{}\t{}\t{:.4f}\t{}\t{}\t\
{}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}".format(global_stat.READ.DRAM + global_stat.WRITE.DRAM, global_stat.DELAY.DRAM,
global_stat.READ.DRAM2FMEM,
global_stat.READ.DRAM2WMEM,
global_stat.DELAY.READ_FMEM, global_stat.DELAY.READ_WMEM,
global_stat.MACs, global_stat.CLOCK, global_stat.DRAM.BUS_BUSY_TIME,
global_stat.DRAM.BUS_BUSY_TIME/global_stat.CLOCK, cps / global_stat.CLOCK,
global_stat.MACs / (global_stat.CLOCK * num_mac_units), sum(dram_is), sum(dram_ws),
conv_mac / (conv_clock * num_mac_units), residual_delay, fc_delay, conv_delay,
global_stat.DELAY.DRAM / global_stat.CLOCK,
global_stat.DELAY.READ_FMEM / global_stat.CLOCK,
global_stat.DELAY.READ_WMEM / global_stat.CLOCK,
conv_delay / global_stat.CLOCK))
def get_dram_delay(self):
return self.global_stats.DELAY.DRAM
|
# -*- coding: UTF-8 -*-
#
# =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2 Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3 Neither the names of the copyright holders nor the names of the
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =======================================================================
#
"""
web application for presenter server.
"""
import os
import re
import random
import base64
import threading
import time
import logging
import tornado.ioloop
import tornado.web
import tornado.gen
import tornado.websocket
import face_detection.src.config_parser as config_parser
from common.channel_manager import ChannelManager
class WebApp:
"""
web application
"""
__instance = None
def __init__(self):
"""
init method
"""
self.channel_mgr = ChannelManager(["image", "video"])
self.request_list = set()
self.lock = threading.Lock()
def __new__(cls, *args, **kwargs):
# if instance is None than create one
if cls.__instance is None:
cls.__instance = object.__new__(cls, *args, **kwargs)
return cls.__instance
def add_channel(self, channel_name):
"""
add channel
@param channel_name name of channel
@return: return add status and message (for error status)
"""
ret = {"ret":"error", "msg":""}
# check channel_name validate,
# channel_name can not be None or length = 0
if channel_name is None:
logging.info("Channel name is None , add channel failed")
ret["msg"] = "Channel name can not be empty"
return ret
# strip channel name
channel_name = channel_name.strip()
# check channel_name emtpy or not
if channel_name == "":
logging.info("Channel name is emtpy , add channel failed")
ret["msg"] = "Channel name can not be empty"
return ret
# length of channel name can not over 25
if len(channel_name) > 25:
logging.info("Length of channel name %s > 25 , add channel failed", channel_name)
ret["msg"] = "Length of channel name should less than 25"
return ret
# define pattern support a-z A-Z and /
pattern = re.compile(r"[a-z]|[A-Z]|[0-9]|/")
tmp = pattern.findall(channel_name)
# check reuslt changed or not
if len(tmp) != len(channel_name):
logging.info("%s contain invalidate character, add channel failed", channel_name)
ret["msg"] = "Channel name only support 0-9, a-z, A-Z /"
return ret
# register channel
flag = self.channel_mgr.register_one_channel(channel_name)
# check register result
if self.channel_mgr.err_code_too_many_channel == flag:
logging.info("Only supports up to 10 channels, add channel failed")
ret["msg"] = "Only supports up to 10 channels"
elif self.channel_mgr.err_code_repeat_channel == flag:
logging.info("%s already exist, add channel failed", channel_name)
ret["msg"] = "Channel %s already exist" % channel_name
else:
logging.info("add channel %s succeed", channel_name)
ret["ret"] = "success"
return ret
def del_channel(self, names):
"""
delete channel
@param names: channel name to be deleted, separated by ','
@return: return add status and message (for error status)
"""
# init ret for return
ret = {"ret":"error", "msg":""}
# check length of names
if names.strip() == "":
logging.info("Channel name is empty, delete channel failed")
ret["msg"] = "Channel name should not be empty"
return ret
# split name for multi name
listname = names.split(",")
# unregister name
for item in listname:
item = item.strip()
# if name is emtpy continu
if item == "":
continue
self.channel_mgr.unregister_one_channel(item)
logging.info("delete channel %s succeed", item)
ret["ret"] = "success"
return ret
def list_channels(self):
"""
list all channels information
"""
# list register channels
ret = self.channel_mgr.list_channels()
# id for every channel item , start with 1
idx = 1
# set id for channel
for item in ret:
item['id'] = idx
idx = idx + 1
return ret
def is_channel_exists(self, name):
"""
view channel content via browser.
@param name : channel name
@return return True if exists. otherwise return False.
"""
return self.channel_mgr.is_channel_exist(name)
def add_requst(self, request):
"""
add request
@param requst: request item to be stored
@note: request can not be same with other request.
request is identified by (channel name ,random number)
so this method do not return value.
"""
with self.lock:
self.request_list.add(request)
def has_request(self, request):
"""
whether request exist or not
@param request: request to be checked.
@return: return True if exists, otherwise return False.
"""
with self.lock:
for item in self.request_list:
# check request equal
if item[0] == request[0] and item[1] == request[1]:
return True
return False
def get_media_data(self, channel_name):
"""
get media data by channel name
@param channel_name: channel to be quest data.
@return return dictionary which have for item
type: identify channel type, for image or video.
image: data to be returned.
fps: just for video type
status: can be error, ok, or loading.
"""
# channel exists or not
if self.is_channel_exists(channel_name) is False:
return {'type': 'unkown', 'image':'', 'fps':0, 'status':'error'}
image_data = self.channel_mgr.get_channel_image(channel_name)
# only for image type.
if image_data is not None:
image_data = base64.b64encode(image_data).decode('utf-8')
return {'type': 'image', 'image':image_data, 'fps':0, 'status':'ok'}
fps = 0 # fps for video
image = None # image for video & image
rectangle_list = None
handler = self.channel_mgr.get_channel_handler_by_name(channel_name)
if handler is not None:
media_type = handler.get_media_type()
# if type is image then get image data
if media_type == "image":
image = handler.get_image_data()
# for video
else:
frame_info = handler.get_frame()
image = frame_info[0]
fps = frame_info[1]
rectangle_list = frame_info[4]
status = "loading"
# decode binary to utf-8 when image is not None
if image is not None:
status = "ok"
image = base64.b64encode(image).decode('utf-8')
return {'type': media_type, 'image':image, 'fps':fps, 'status':status, 'rectangle_list':rectangle_list}
else:
return {'type': 'unkown', 'image':None, 'fps':0, 'status':'loading'}
# pylint: disable=abstract-method
class BaseHandler(tornado.web.RequestHandler):
"""
base handler.
"""
# pylint: disable=abstract-method
class HomeHandler(BaseHandler):
"""
handler index request
"""
@tornado.web.asynchronous
def get(self, *args, **kwargs):
"""
handle home or index request only for get
"""
self.render("home.html", listret=G_WEBAPP.list_channels())
# pylint: disable=abstract-method
class AddHandler(BaseHandler):
"""
handler add request
"""
@tornado.web.asynchronous
def post(self, *args, **kwargs):
"""
handle reqeust for add channel
"""
channel_name = self.get_argument('name', '')
self.finish(G_WEBAPP.add_channel(channel_name))
# pylint: disable=abstract-method
class DelHandler(BaseHandler):
"""
handler delete request
"""
@tornado.web.asynchronous
def post(self, *args, **kwargs):
"""
handel requst for delete channel
"""
channel_name = self.get_argument('name', '')
self.finish(G_WEBAPP.del_channel(channel_name))
# pylint: disable=abstract-method
class ViewHandler(BaseHandler):
"""
handler view request
"""
@tornado.web.asynchronous
def get(self, *args, **kwargs):
"""
handler request for view channel
"""
channel_name = self.get_argument('name', '')
if G_WEBAPP.is_channel_exists(channel_name):
req_id = str(random.random())
G_WEBAPP.add_requst((req_id, channel_name))
self.render('view.html', channel_name=channel_name, req=req_id)
else:
raise tornado.web.HTTPError(404)
class WebSocket(tornado.websocket.WebSocketHandler):
"""
web socket for web page socket quest
"""
def open(self, *args, **kwargs):
"""
called when client request by ws or wss
"""
self.req_id = self.get_argument("req", '', True)
self.channel_name = self.get_argument("name", '', True)
# check request valid or not.
if not G_WEBAPP.has_request((self.req_id, self.channel_name)):
self.close()
@staticmethod
def send_message(obj, message, binary=False):
"""
send message to client.
"""
# check socket exist or not
if not obj.ws_connection or not obj.ws_connection.stream.socket:
return False
ret = False
try:
obj.write_message(message, binary)
ret = True
except tornado.websocket.WebSocketClosedError:
ret = False
return ret
def on_close(self):
"""
called when closed web socket
"""
@tornado.web.asynchronous
@tornado.gen.coroutine
def on_message(self, message):
"""
On recv message from client.
"""
if message == "next":
self.run_task()
def run_task(self):
"""
send image to client
"""
# check channel valid
if not G_WEBAPP.is_channel_exists(self.channel_name) or \
not G_WEBAPP.has_request((self.req_id, self.channel_name)):
self.close()
return
result = G_WEBAPP.get_media_data(self.channel_name)
# sleep 100ms if status not ok for frequently query
if result['status'] != 'ok':
time.sleep(0.1)
# if channel not exist close websocket.
if result['status'] == "error":
self.close()
# send message to client
else:
# close websoket when send failed or for image channel.
ret = WebSocket.send_message(self, result)
if not ret or result['type'] == "image":
self.close()
def get_webapp():
"""
start web applicatioin
"""
# get template file and static file path.
templatepath = os.path.join(config_parser.ConfigParser.get_rootpath(), "ui/templates")
staticfilepath = os.path.join(config_parser.ConfigParser.get_rootpath(), "ui/static")
# create application object.
app = tornado.web.Application(handlers=[(r"/", HomeHandler),
(r"/index", HomeHandler),
(r"/add", AddHandler),
(r"/del", DelHandler),
(r"/view", ViewHandler),
(r"/static/(.*)",
tornado.web.StaticFileHandler,
{"path": staticfilepath}),
(r"/websocket", WebSocket)],
template_path=templatepath)
# create server
http_server = tornado.httpserver.HTTPServer(app)
return http_server
def start_webapp():
"""
start webapp
"""
http_server = get_webapp()
config = config_parser.ConfigParser()
http_server.listen(config.web_server_port, address=config.web_server_ip)
print("Please visit http://" + config.web_server_ip + ":" +
str(config.web_server_port) + " for faceemotion")
tornado.ioloop.IOLoop.instance().start()
def stop_webapp():
"""
stop web app
"""
tornado.ioloop.IOLoop.instance().stop()
global G_WEBAPP
G_WEBAPP = WebApp()
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import unittest
import botocore.exceptions
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.operators.batch import AwsBatchOperator
from tests.compat import mock
JOB_NAME = "51455483-c62c-48ac-9b88-53a6a725baa3"
JOB_ID = "8ba9d676-4108-4474-9dca-8bbac1da9b19"
RESPONSE_WITHOUT_FAILURES = {
"jobName": JOB_NAME,
"jobId": JOB_ID,
}
class TestAwsBatchOperator(unittest.TestCase):
MAX_RETRIES = 2
STATUS_RETRIES = 3
@mock.patch("airflow.providers.amazon.aws.operators.batch.AwsHook")
def setUp(self, aws_hook_mock):
self.aws_hook_mock = aws_hook_mock
self.batch = AwsBatchOperator(
task_id="task",
job_name=JOB_NAME,
job_queue="queue",
job_definition="hello-world",
max_retries=self.MAX_RETRIES,
status_retries=self.STATUS_RETRIES,
parameters=None,
overrides={},
array_properties=None,
aws_conn_id=None,
region_name="eu-west-1",
)
def test_init(self):
self.assertEqual(self.batch.job_name, JOB_NAME)
self.assertEqual(self.batch.job_queue, "queue")
self.assertEqual(self.batch.job_definition, "hello-world")
self.assertEqual(self.batch.max_retries, self.MAX_RETRIES)
self.assertEqual(self.batch.status_retries, self.STATUS_RETRIES)
self.assertEqual(self.batch.parameters, None)
self.assertEqual(self.batch.overrides, {})
self.assertEqual(self.batch.array_properties, {})
self.assertEqual(self.batch.region_name, "eu-west-1")
self.assertEqual(self.batch.aws_conn_id, None)
self.assertEqual(self.batch.hook, self.aws_hook_mock.return_value)
self.aws_hook_mock.assert_called_once_with(aws_conn_id=None)
def test_template_fields_overrides(self):
self.assertEqual(self.batch.template_fields, ("job_name", "overrides", "parameters",))
@mock.patch.object(AwsBatchOperator, "_wait_for_task_ended")
@mock.patch.object(AwsBatchOperator, "_check_success_task")
def test_execute_without_failures(self, check_mock, wait_mock):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
client_mock.submit_job.return_value = RESPONSE_WITHOUT_FAILURES
self.batch.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with(
"batch", region_name="eu-west-1"
)
client_mock.submit_job.assert_called_once_with(
jobQueue="queue",
jobName=JOB_NAME,
containerOverrides={},
jobDefinition="hello-world",
arrayProperties={},
parameters=None,
)
wait_mock.assert_called_once_with()
check_mock.assert_called_once_with()
self.assertEqual(self.batch.jobId, JOB_ID)
def test_execute_with_failures(self):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
client_mock.submit_job.return_value = ""
with self.assertRaises(AirflowException):
self.batch.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with(
"batch", region_name="eu-west-1"
)
client_mock.submit_job.assert_called_once_with(
jobQueue="queue",
jobName=JOB_NAME,
containerOverrides={},
jobDefinition="hello-world",
arrayProperties={},
parameters=None,
)
def test_wait_end_tasks(self):
client_mock = mock.Mock()
self.batch.jobId = JOB_ID
self.batch.client = client_mock
self.batch._wait_for_task_ended()
client_mock.get_waiter.assert_called_once_with("job_execution_complete")
client_mock.get_waiter.return_value.wait.assert_called_once_with(jobs=[JOB_ID])
self.assertEqual(sys.maxsize, client_mock.get_waiter.return_value.config.max_attempts)
@mock.patch("airflow.providers.amazon.aws.operators.batch.randint")
def test_poll_job_status_success(self, mock_randint):
client_mock = mock.Mock()
self.batch.jobId = JOB_ID
self.batch.client = client_mock
mock_randint.return_value = 0 # don't pause in unit tests
client_mock.get_waiter.return_value.wait.side_effect = ValueError()
client_mock.describe_jobs.return_value = {
"jobs": [{"jobId": JOB_ID, "status": "SUCCEEDED"}]
}
self.batch._wait_for_task_ended()
client_mock.describe_jobs.assert_called_once_with(jobs=[JOB_ID])
@mock.patch("airflow.providers.amazon.aws.operators.batch.randint")
def test_poll_job_status_running(self, mock_randint):
client_mock = mock.Mock()
self.batch.jobId = JOB_ID
self.batch.client = client_mock
mock_randint.return_value = 0 # don't pause in unit tests
client_mock.get_waiter.return_value.wait.side_effect = ValueError()
client_mock.describe_jobs.return_value = {
"jobs": [{"jobId": JOB_ID, "status": "RUNNING"}]
}
self.batch._wait_for_task_ended()
# self.assertEqual(client_mock.describe_jobs.call_count, self.STATUS_RETRIES)
client_mock.describe_jobs.assert_called_with(jobs=[JOB_ID])
self.assertEqual(client_mock.describe_jobs.call_count, self.MAX_RETRIES)
@mock.patch("airflow.providers.amazon.aws.operators.batch.randint")
def test_poll_job_status_hit_api_throttle(self, mock_randint):
client_mock = mock.Mock()
self.batch.jobId = JOB_ID
self.batch.client = client_mock
mock_randint.return_value = 0 # don't pause in unit tests
client_mock.describe_jobs.side_effect = botocore.exceptions.ClientError(
error_response={"Error": {"Code": "TooManyRequestsException"}},
operation_name="get job description",
)
with self.assertRaises(Exception) as e:
self.batch._poll_for_task_ended()
self.assertIn("Failed to get job description", str(e.exception))
client_mock.describe_jobs.assert_called_with(jobs=[JOB_ID])
self.assertEqual(client_mock.describe_jobs.call_count, self.STATUS_RETRIES)
def test_check_success_tasks_raises(self):
client_mock = mock.Mock()
self.batch.jobId = JOB_ID
self.batch.client = client_mock
client_mock.describe_jobs.return_value = {"jobs": []}
with self.assertRaises(Exception) as e:
self.batch._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("Failed to get job description", str(e.exception))
def test_check_success_tasks_raises_failed(self):
client_mock = mock.Mock()
self.batch.jobId = JOB_ID
self.batch.client = client_mock
client_mock.describe_jobs.return_value = {
"jobs": [
{
"jobId": JOB_ID,
"status": "FAILED",
"statusReason": "This is an error reason",
"attempts": [{"exitCode": 1}],
}
]
}
with self.assertRaises(Exception) as e:
self.batch._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("Job ({}) failed with status ".format(JOB_ID), str(e.exception))
def test_check_success_tasks_raises_pending(self):
client_mock = mock.Mock()
self.batch.jobId = JOB_ID
self.batch.client = client_mock
client_mock.describe_jobs.return_value = {
"jobs": [{"jobId": JOB_ID, "status": "RUNNABLE"}]
}
with self.assertRaises(Exception) as e:
self.batch._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("Job ({}) is still pending".format(JOB_ID), str(e.exception))
def test_check_success_tasks_raises_multiple(self):
client_mock = mock.Mock()
self.batch.jobId = JOB_ID
self.batch.client = client_mock
client_mock.describe_jobs.return_value = {
"jobs": [
{
"jobId": JOB_ID,
"status": "FAILED",
"statusReason": "This is an error reason",
"attempts": [{"exitCode": 1}, {"exitCode": 10}],
}
]
}
with self.assertRaises(Exception) as e:
self.batch._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("Job ({}) failed with status ".format(JOB_ID), str(e.exception))
def test_check_success_task_not_raises(self):
client_mock = mock.Mock()
self.batch.jobId = JOB_ID
self.batch.client = client_mock
client_mock.describe_jobs.return_value = {
"jobs": [{"jobId": JOB_ID, "status": "SUCCEEDED"}]
}
self.batch._check_success_task()
# Ordering of str(dict) is not guaranteed.
client_mock.describe_jobs.assert_called_once_with(jobs=[JOB_ID])
def test_check_success_task_raises_without_jobs(self):
client_mock = mock.Mock()
self.batch.jobId = JOB_ID
self.batch.client = client_mock
client_mock.describe_jobs.return_value = {"jobs": []}
with self.assertRaises(Exception) as e:
self.batch._check_success_task()
client_mock.describe_jobs.assert_called_with(jobs=[JOB_ID])
self.assertEqual(client_mock.describe_jobs.call_count, self.STATUS_RETRIES)
self.assertIn("Failed to get job description", str(e.exception))
def test_kill_job(self):
client_mock = mock.Mock()
self.batch.jobId = JOB_ID
self.batch.client = client_mock
client_mock.terminate_job.return_value = {}
self.batch.on_kill()
client_mock.terminate_job.assert_called_once_with(
jobId=JOB_ID, reason="Task killed by the user"
)
if __name__ == "__main__":
unittest.main()
|
"""Main module."""
import inspect
import typing
from datetime import date, datetime, time
import graphene
BASIC_TYPE_MAPPINGS = {
str: graphene.String,
int: graphene.Int,
bool: graphene.Boolean,
float: graphene.Float,
date: graphene.Date,
datetime: graphene.DateTime,
time: graphene.Time,
}
UNSUPORTED_TYPES = (list, dict, tuple, set)
class UnsupportedAnnotationError(Exception):
"""Raised on unsupported annotation resolve attempt."""
class NoAnnotationsError(Exception):
"""Raised when no annotations have been found (or all are excluded)."""
class SchemaClashError(Exception):
"""Raised when there are two different schema classes with the same name."""
class ObjectType(graphene.ObjectType):
"""
Base class for type annotated graphene schemas.
The subclass still has to be decorated, the purpose of this class is to provide
hints for special graphanno attributes and those inherited from ObjectType.
"""
__model__: typing.Any = None
__excluded_fields__: typing.Iterable = tuple()
__ignore_unsupported__: bool = False
def graph_annotations( # pylint: disable=dangerous-default-value
cls, cached_objects={}
):
"""Prepare GraphQL schema based on the type annotations."""
attributes = {}
target_class = cls.__model__ if hasattr(cls, "__model__") else cls
ignore_unsupported = getattr(cls, "__ignore_unsupported__", False)
excluded_keys = getattr(cls, "__excluded_fields__", tuple())
cached = None
if hasattr(cls, "__name__"):
cached = _get_cached(
cached_objects.get(cls.__name__, (None, None, None)),
target_class,
excluded_keys,
)
if cached:
return cached
annotations = _get_annotations_data(cls, excluded_keys, target_class)
for name, annotation in annotations.items():
if annotation in UNSUPORTED_TYPES:
if ignore_unsupported:
continue
raise UnsupportedAnnotationError(
f"The type annotation: {annotation} is not supported."
)
type_, args = _get_type_from_annotation(annotation)
attributes[name] = type_(*args)
superclasses = (
(cls,) if issubclass(cls, graphene.ObjectType) else (cls, graphene.ObjectType)
)
result = type(cls.__name__, superclasses, attributes)
cached_objects[result.__name__] = (
result,
target_class,
hasattr(cls, "__model__") or set(annotations.keys()),
)
return result
def _get_annotations_data(cls, excluded_keys, target_class):
annotations = dict(**getattr(target_class, "__annotations__", {}))
annotations.update(getattr(cls, "__annotations__", {}))
annotations.update(_get_property_annotations(target_class))
private_keys = tuple(key for key in annotations.keys() if key.startswith("_"))
for key in excluded_keys + private_keys:
annotations.pop(key)
if not annotations:
raise NoAnnotationsError(
f'No included annotations for class '
f'{cls.__name__ if hasattr(cls, "__name__") else cls}.'
)
return annotations
def _get_cached(cache_data, target, excluded_keys):
cached, original, annotated = cache_data
if cached:
if (target.__module__, target) != (original.__module__, original):
raise SchemaClashError(
f'The schema with name "{target.__name__}" already exists, '
f"and bases on another class:\n"
f"\t- Current: {target.__module__}.{target.__name__}\n"
f"\t- Existing: {original.__module__}.{original.__name__}"
)
if annotated is True:
return cached
for key in excluded_keys:
delattr(cached, key)
annotated.remove(key)
if not annotated:
raise NoAnnotationsError(
f"No included annotations for class {target.__name__}."
)
return cached
def _get_type_from_annotation(annotation, type_only=False):
basic_type = BASIC_TYPE_MAPPINGS.get(annotation)
if basic_type:
return basic_type if type_only else (basic_type, tuple())
if str(annotation).startswith("typing.List"):
return graphene.List, (_get_type_from_annotation(annotation.__args__[0], True),)
if hasattr(annotation, "__origin__"):
if annotation.__origin__ is typing.Union:
if len(annotation.__args__) == 2 and any(
isinstance(None, arg) for arg in annotation.__args__
):
type_ = next(
arg for arg in annotation.__args__ if not isinstance(None, arg)
)
return _get_type_from_annotation(type_, type_only)
type_ = graph_annotations(annotation)
return type_ if type_only else (graphene.Field, (type_,))
def _get_property_annotations(cls):
property_annotations = {}
properties = inspect.getmembers(cls, lambda o: isinstance(o, property))
for name, property_ in properties:
members = {
key: value
for key, value in inspect.getmembers(
property_.fget, lambda o: isinstance(o, dict)
)
}
annotation = members.get("__annotations__", {}).get("return")
if annotation:
property_annotations[name] = annotation
return property_annotations
|
import pytest
import mock
from elasticsearch_metrics import exceptions
from elasticsearch_metrics.management.commands.check_metrics import Command
from elasticsearch_metrics.registry import registry
@pytest.fixture()
def mock_check_index_template():
with mock.patch(
"elasticsearch_metrics.metrics.Metric.check_index_template"
) as patch:
yield patch
def test_exits_with_error_if_out_of_sync(run_mgmt_command, mock_check_index_template):
mock_check_index_template.side_effect = exceptions.IndexTemplateNotFoundError(
"Index template does not exist", client_error=None
)
with pytest.raises(SystemExit):
run_mgmt_command(Command, ["check_metrics"])
def test_exits_with_success(run_mgmt_command, mock_check_index_template):
mock_check_index_template.return_value = True
run_mgmt_command(Command, ["check_metrics"])
assert mock_check_index_template.call_count == len(registry.get_metrics())
|
from django.conf.urls import url
from .views import Home
urlpatterns = [
url(r'^$', Home.as_view(), name='home'),
]
|
from flask import Flask, request, jsonify, send_from_directory
from Models.hardware import Hardware
from auth import Auth, User
from database import init_db
from Models.project import ProjectSchema, Project
from auth import check_auth
from os import environ
from bson.objectid import ObjectId
from dotenv import load_dotenv
app = Flask(__name__, static_folder="frontend/build", static_url_path="")
@app.route('/api/auth/', methods=["PUT", "POST"])
def handle_auth():
json_data = request.get_json()
auth_handler = Auth(json_data["uname"], json_data["pass"])
if request.method == "POST":
try:
uid = auth_handler.signup()
respObject = jsonify({"message": "Signed Up!"})
respObject.set_cookie("uid", uid)
return respObject, 200
except Exception as e:
if str(e) == "User Exists":
return jsonify({"message": str(e)}), 409
else:
print(e)
return jsonify({"message": "Internal Server Error"}), 500
else:
try:
can_login, uid = auth_handler.login()
if can_login:
respObject = jsonify({"message": "Logged In!"})
respObject.set_cookie("uid", uid)
return respObject, 200
else:
return jsonify({"message": "Invalid Credentials"}), 405
except Exception as e:
if str(e) == "No Account":
return jsonify({"message": str(e)}), 409
else:
print(e)
return jsonify({"message": "Internal Server Error"}), 500
@app.route("/api/auth/signout/", methods=["POST"])
@check_auth
def handle_signout():
Auth.logout()
respObject = jsonify({"message": "Logged Out!"})
return respObject, 200
@app.route("/api/users/", methods=["GET"])
@check_auth
def handle_users():
if request.args.get('query') is None:
return jsonify({"message": "Missing Query"}), 400
users = User.get_users(request.args.get('query'))
respObject = jsonify({"users": users})
return respObject, 200
@app.route('/api/projects/<pid>/invite/', methods=["PUT", "GET"])
@check_auth
def handle_invite(pid):
project = Project(id=pid, creator=request.user)
if request.method == "PUT":
json_data = request.get_json()
project.members = json_data["members"]
try:
project.set_users()
return jsonify({"message": "Invited!"}), 200
except Exception as e:
if str(e) == "User Lacks Permissions":
return jsonify({"message": str(e)}), 403
else:
print(e)
return jsonify({"message": str(e)}), 500
else:
try:
return jsonify({"users": project.get_users()}), 200
except Exception as e:
if str(e) == "User Lacks Permissions":
return jsonify({"message": str(e)}), 403
else:
print(e)
return jsonify({"message": str(e)}), 500
@app.route('/api/projects/<pid>/', methods=('GET', 'PUT', 'DELETE'))
@check_auth
def handle_specific_project(pid: str):
schema = ProjectSchema()
if request.method == 'GET':
project = Project(id=pid, creator=request.user)
try:
project_data = project.get_project()
return jsonify({"projectData": project_data}), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
elif request.method == 'PUT':
project = schema.load(request.json, partial=True, unknown="INCLUDE")
project.id = ObjectId(pid)
project.creator = ObjectId(request.user)
try:
project.update_project()
return jsonify({"message": "Project Updated"}), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
elif request.method == 'DELETE':
project = Project(id=pid, creator=request.user)
try:
return jsonify({"projectData": project.delete_project()}), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
@app.route('/api/projects/', methods=('GET', 'POST'))
@check_auth
def handle_projects():
schema = ProjectSchema()
if request.method == 'GET':
if request.args.get('offset') is None:
return jsonify({"message": "Missing Offset"}), 400
project = Project(creator=request.user)
project_data, count = project.get_projects(int(request.args.get('offset')))
return jsonify({"projectData": project_data, "totalProjects": count}), 200
elif request.method == 'POST':
project = schema.load(request.json)
project.creator = ObjectId(request.user)
project.create_project()
return jsonify({"message": "Created Successfully"}), 201
# TODO handle authorization versus server errors
@app.route("/api/hardware/", methods = ("GET", "PUT"))
def handle_hardware():
if request.method == "GET":
if request.args.get('offset') is None:
return jsonify({"message": "Missing Offset"}), 400
hardware_data, count = Hardware.get_all_hardware(int(request.args.get('offset')))
return jsonify({"hardwareData":hardware_data, "totalHardware": count}), 200
else:
try:
return jsonify({"projectData": Hardware.update_hardware(request.json)}), 200
except Exception as e:
if str(e) == "Invalid Amount":
return jsonify({"message": str(e)}), 400
else:
return jsonify({"message": str(e)}), 500
@app.route("/api/hardware/<id>/", methods = ["GET"])
def handle_specific_hardware(id: str):
hardware = Hardware(id=id)
return jsonify({"hardwareData": hardware.get_hardware()}), 200
@app.errorhandler(404)
def catch_all(element):
return send_from_directory(app.static_folder, "index.html")
load_dotenv()
init_db()
app.config.update(
SESSION_COOKIE_HTTPONLY=False,
SECRET_KEY=environ.get("SECRET_KEY")
)
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
# -*- coding: utf-8 -*-
"""
@brief test log(time=4s)
"""
import os
import unittest
import numpy
import pandas
from pyquickhelper.pycode import ExtTestCase
from pandas_streaming.df import dataframe_hash_columns
class TestDataFrameHelpers(ExtTestCase):
def test_hash_columns(self):
df = pandas.DataFrame([dict(a=1, b="e", c=5.6, ind="a1", ai=1),
dict(b="f", c=5.7, ind="a2", ai=2),
dict(a=4, b="g", ind="a3", ai=3),
dict(a=8, b="h", c=5.9, ai=4),
dict(a=16, b="i", c=6.2, ind="a5", ai=5)])
df2 = dataframe_hash_columns(df)
self.assertEqual(df2.shape, df.shape)
for j in range(df.shape[1]):
self.assertEqual(df.columns[j], df2.columns[j])
self.assertEqual(df.dtypes[j], df2.dtypes[j])
for i in range(df.shape[0]):
v1 = df.iloc[i, j]
v2 = df2.iloc[i, j]
if isinstance(v1, float):
if numpy.isnan(v1):
self.assertTrue(numpy.isnan(v2))
else:
self.assertEqual(type(v1), type(v2))
else:
self.assertEqual(type(v1), type(v2))
def test_hash_columns_bigger(self):
data = os.path.join(os.path.dirname(__file__), "data")
name = os.path.join(data, "buggy_hash.csv")
df = pandas.read_csv(name, sep="\t", encoding="utf-8")
df2 = dataframe_hash_columns(df)
self.assertEqual(df.shape, df2.shape)
if __name__ == "__main__":
unittest.main()
|
import setuptools
import mazikeen.version
with open("README.rst", "r", encoding="utf-8") as fh:
long_description = fh.read()
with open("LICENSE", "r", encoding="utf-8") as fh:
LICENSE = fh.read()
setuptools.setup(
name="mazikeen",
version=mazikeen.version.__version__,
author="Neaga Septimiu",
author_email="neagas@gmail.com",
description="Test enviroment for CLI application",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/hanniballar/mazikeen",
project_urls={
"Bug Tracker": "https://github.com/hanniballar/mazikeen/issues",
},
license_file = LICENSE,
license="MIT",
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Testing",
"Topic :: Utilities"
],
packages=["mazikeen"],
install_requires=["junit_xml>=1.8", "pyyaml>=5.4.1"],
extras_require={
'testing': [
"xmldiff==2.4"
]
},
entry_points={"console_scripts": ["mazikeen=mazikeen.__main__:main"]},
)
|
"""
2.7 – Removendo caracteres em branco de nomes: Armazene o nome de uma pessoa e inclua alguns caracteres em branco no início e no final do nome. Lembre-se de usar cada combinação de caracteres, "\t" e "\n", pelo menos uma vez.
"""
nome = " \t Francisco \n"
print(nome)
# Usando lstrip()
print(nome.rstrip())
# Usando rstrip()
print(nome.lstrip())
# Usando strip()
print(nome.strip())
|
from Prepocessing_word_char import Preprocess
def main(input_dir: str, output_dir: str):
preprocess = Preprocess()
# Lines segmenting
lines = preprocess.segment_all(input_dir)
# Words segmenting
words = preprocess.crop_word_from_line(lines)
# Characters segmenting
chars = preprocess.crop_char_from_word(words)
# Export character images
line_num = 0
for line in chars:
word_num = 0
for word in line:
j = 0
for char in word:
cv2.imwrite(output_dir + "/line{0}_word{1}_char{2}.png".format(line_num, word_num, str(j)), char)
j += 1
word_num += 1
line_num += 1
if __name__ == '__main__':
input_dir = input("Image path: ")
output_dir = input("Output path: ")
main(input_dir, output_dir)
|
import os
from . import BaseCommand
from ..i18n import _
class Command(BaseCommand):
name = os.path.splitext(os.path.basename(__file__))[0]
description = _('a complete list of group name aliases')
quiet_fields = {
'aliasName': _('alias name'),
'groupName': _('group name'),
}
def fill_arguments(self):
pass
def main():
Command().run()
|
from datetime import datetime, timedelta, time
from typing import List, Dict, Any, Union, Set
import firebase_admin
from firebase_admin import firestore
from google.cloud.firestore_v1 import Query
from google.cloud.firestore_v1.document import DocumentSnapshot
max_date = datetime(9999, 12, 30, 12, 0, 0)
DailysData = Dict[str, Any] # Just the "data" part of a DailysEntry
DailysEntry = Dict[str, Any] # A full dailys entry, with data, source, stat name, and date
DailysEntries = List[DailysEntry] # A list of dailys entries
DailysDate = Union[datetime, str] # A date, can be a datetime object, or "earliest", "latest", "static"
class CantUpdate(Exception):
pass
class DataSource:
def __init__(self):
firebase_admin.initialize_app()
self.data_source = firestore.client().collection('Dailys stats')
def get_unique_stat_names(self) -> Set[str]:
unique_names = set()
for stat in self.data_source.get():
if stat.get("stat_name"):
unique_names.add(stat.get("stat_name"))
return unique_names
def get_entries_for_stat(self, stat_name: str) -> DailysEntries:
return [
x.to_dict()
for x
in self.data_source.where("stat_name", "==", stat_name).order_by("date").get()
]
def get_documents_for_stat_on_date(self, stat_name: str, view_date: DailysDate) -> List[DocumentSnapshot]:
data_partial = self.data_source.where("stat_name", "==", stat_name)
if view_date == "latest":
data_partial = data_partial\
.where("date", "<=", max_date)\
.order_by("date", direction=Query.DESCENDING)\
.limit(1)
elif view_date == "static":
data_partial = data_partial.where("date", "==", "static")
else:
start_datetime = datetime.combine(view_date, time(0, 0, 0))
end_datetime = datetime.combine(view_date + timedelta(days=1), time(0, 0, 0))
data_partial = data_partial.where("date", ">=", start_datetime).where("date", "<", end_datetime)
return list(data_partial.get())
def get_entries_for_stat_on_date(self, stat_name: str, view_date: DailysDate) -> DailysEntries:
return [x.to_dict() for x in self.get_documents_for_stat_on_date(stat_name, view_date)]
def get_entries_over_range(self, start_date: DailysDate, end_date: DailysDate) -> DailysEntries:
data_partial = self.data_source
# Filter start date
if start_date != "earliest":
start_datetime = datetime.combine(start_date, time(0, 0, 0))
data_partial = data_partial.where("date", ">=", start_datetime)
# Filter end date
if end_date != "latest":
end_datetime = datetime.combine(end_date + timedelta(days=1), time(0, 0, 0))
data_partial = data_partial.where("date", "<=", end_datetime)
# Collapse data to dicts
stat_list = [x.to_dict() for x in data_partial.order_by("date").get()]
# If date range is unbounded, filter out static data
if start_date == "earliest" and end_date == "latest":
stat_list = [x for x in stat_list if x['date'] != 'static']
return stat_list
def get_entries_for_stat_over_range(self, stat_name: str, start_date: DailysDate, end_date: DailysDate) -> DailysEntries:
data_partial = self.data_source.where("stat_name", "==", stat_name)
# Filter start date
if start_date != "earliest":
start_datetime = datetime.combine(start_date, time(0, 0, 0))
data_partial = data_partial.where("date", ">=", start_datetime)
# Filter end date
if end_date != "latest":
end_datetime = datetime.combine(end_date + timedelta(days=1), time(0, 0, 0))
data_partial = data_partial.where("date", "<=", end_datetime)
# Collapse data to dicts
data = [x.to_dict() for x in data_partial.order_by("date").get()]
# If date range is unbounded, filter out static data
if start_date == "earliest" and end_date == "latest":
data = [x for x in data if x['date'] != 'static']
return data
def update_entry_for_stat_on_date(
self,
stat_name: str,
update_date: DailysDate,
new_data: DailysData,
source: str) -> DailysEntry:
# Construct new data object
total_data = {'stat_name': stat_name}
if update_date == "latest":
raise CantUpdate("Can't update data on latest")
elif update_date == "static":
total_data['date'] = "static"
else:
total_data['date'] = datetime.combine(update_date, time(0, 0, 0))
total_data['source'] = source or "Unknown [via API]"
total_data['data'] = new_data
# See if data exists
data = self.get_documents_for_stat_on_date(stat_name, update_date)
if len(data) == 1:
self.data_source.document(data[0].id).set(total_data)
else:
self.data_source.add(total_data)
return total_data
def get_latest_n_entries_for_stat(self, stat_name: str, n: int) -> List[DailysData]:
raw_data = self.data_source.where("stat_name", "==", stat_name)\
.where("date", "<", max_date)\
.order_by("date", direction=Query.DESCENDING).limit(n).get()
sleeps = [x.to_dict()['data'] for x in raw_data]
return sleeps
|
result = first(lambda: "second")
|
import pytest
import numpy as np
from dltranz.data_load.iterable_processing.id_filter import IdFilter
def get_data(id_type):
return [{'client_id': id_type(i)} for i in range(1, 10)]
def test_int():
i_filter = IdFilter('client_id', [1, 5, 9])
data = i_filter(get_data(int))
data = [x['client_id'] for x in data]
assert data == [1, 5, 9]
def test_np():
i_filter = IdFilter('client_id', np.array([1, 5, 9]).astype(np.int16))
data = i_filter(get_data(np.int16))
data = [x['client_id'] for x in data]
assert data == [1, 5, 9]
def test_type_mismatch_int_str():
i_filter = IdFilter('client_id', [1, 5, 9])
data = i_filter(get_data(str))
with pytest.raises(TypeError):
_ = [x['client_id'] for x in data]
|
from django.apps import AppConfig
class AddressbookConfig(AppConfig):
name = 'AddressBook'
|
"""
Otter Service tornado server
"""
import os
import json
import yaml
import hashlib
import jwt
import tornado.options
import queries
import stdio_proxy
from io import BytesIO
from datetime import datetime
from binascii import hexlify
from tornado.httpserver import HTTPServer
from tornado.web import Application, RequestHandler
from tornado.auth import GoogleOAuth2Mixin
from tornado.ioloop import IOLoop
from tornado.queues import Queue
from concurrent.futures import ThreadPoolExecutor
from .utils import connect_db
from ..grade.containers import grade_assignments
OTTER_SERVICE_DIR = "/otter-service"
ARGS = None
SUBMISSION_QUEUE = Queue()
CONN = None
EXECUTOR = ThreadPoolExecutor()
class BaseHandler(tornado.web.RequestHandler):
"""Base login handler"""
def get_current_user(self):
"""
Gets secure user cookie for personal authentication
"""
return self.get_secure_cookie("user")
class LoginHandler(BaseHandler):
"""
Default auth handler
A login handler that requires instructors to setup users and passwords in database beforehand,
allowing students to auth within the notebook.
"""
async def get(self):
"""
GET request handler for personal/default authentication login
"""
username = self.get_argument('username', True)
password = self.get_argument('password', True)
pw_hash = hashlib.sha256(password.encode()).hexdigest()
account_check = await self.db.query(
"""
SELECT * FROM users
WHERE username = %s AND password = %s
""",
[username, pw_hash]
)
if len(account_check) > 0:
print("Logged in user {} and generating API key".format(username))
account_check.free()
api_key = hexlify(os.urandom(32)).decode("utf-8")
self.write(api_key)
results = await self.db.query(
"""
INSERT INTO users (api_keys, username, password) VALUES (%s, %s, %s)
ON CONFLICT (username)
DO UPDATE SET api_keys = array_append(users.api_keys, %s)
""",
[[api_key], username, pw_hash, api_key]
)
results.free()
else:
print("Failed login attempt for user {}".format(username))
account_check.free()
self.clear()
self.set_status(401)
self.finish()
@property
def db(self):
return self.application.db
class GoogleOAuth2LoginHandler(RequestHandler, GoogleOAuth2Mixin):
async def get(self):
"""
GET request handler for Google OAuth
Handler for authenticating users with Google OAuth. Requires that user sets environment
variables containing their client key and secret. Provides users with an API key that they
can enter in the notebook by way of authenticating.
"""
if not self.get_argument('code', False):
print("Redirecting user to Google OAuth")
return self.authorize_redirect(
redirect_uri=self.settings['auth_redirect_uri'],
client_id = ARGS.google_key if ARGS.google_key else self.settings['google_oauth']['key'],
client_secret = ARGS.google_secret if ARGS.google_secret else self.settings['google_oauth']['secret'],
scope=['email', 'profile'],
response_type='code',
extra_params={'approval_prompt': 'auto'}
)
else:
resp = await self.get_authenticated_user(
redirect_uri=self.settings['auth_redirect_uri'],
code=self.get_argument('code')
)
api_key = hexlify(os.urandom(32)).decode("utf-8")
email = jwt.decode(resp['id_token'], verify=False)['email']
print("Generating API key for user {} from Google OAuth".format(email))
results = await self.db.query(
"""
INSERT INTO users (api_keys, email) VALUES (%s, %s)
ON CONFLICT (email)
DO UPDATE SET api_keys = array_append(users.api_keys, %s)
""",
[[api_key], email, api_key]
)
results.free()
self.render("templates/api_key.html", key=api_key)
@property
def db(self):
return self.application.db
class SubmissionHandler(RequestHandler):
"""
Processes and validates student submission
Handler for processing and validating a student's submission. Ensure that notebook is present
and valid, checks API key, and implements rate limiting to prevent spamming the autograder.
Queues submission for grading by ``EXECUTOR``.
"""
async def get(self):
"""
GET request handler. Route warns users that this is a POST-only route.
"""
self.write("This is a POST-only route; you probably shouldn't be here.")
self.finish()
async def post(self):
"""
POST request handler. Validates JSON params and queues submission for grading.
"""
self.submission_id = None
try:
# check request params
request = tornado.escape.json_decode(self.request.body)
assert 'nb' in request.keys(), 'submission contains no notebook'
assert 'api_key' in request.keys(), 'missing api key'
notebook = request['nb']
api_key = request['api_key']
# run through submission
await self.submit(notebook, api_key)
except Exception as e:
print(e)
self.finish()
# if submission successful, queue notebook for grading
if self.submission_id is not None:
SUBMISSION_QUEUE.put(self.submission_id)
async def validate(self, notebook, api_key):
"""
Ensures a submision is valid by checking user credentials, submission frequency, and
validity of notebook file.
Arguments:
notebook (``dict``): notebook in JSON form
api_key (``str``): API key generated during submission
Returns:
``tuple``: submission information
"""
# authenticate user with api_key
results = await self.db.query("SELECT user_id, username, email FROM users WHERE %s=ANY(api_keys) LIMIT 1", [api_key])
user_id = results.as_dict()['user_id'] if len(results) > 0 else None
username = results.as_dict()['username'] or results.as_dict()['email'] if len(results) > 0 else None
results.free()
assert user_id, 'invalid API key: {}'.format(api_key)
# get assignment and class id from notebook metadata
assert all(key in notebook for key in ['metadata', 'nbformat', 'cells']), 'invalid Jupyter notebook'
assert 'assignment_id' in notebook['metadata'], 'missing required metadata attribute: assignment_id'
assert 'class_id' in notebook['metadata'], 'missing required metadata attribute: class_id'
assignment_id = notebook['metadata']['assignment_id']
class_id = notebook['metadata']['class_id']
# rate limiting
results = await self.db.query(
"""
SELECT timestamp
FROM submissions
WHERE user_id = %s AND assignment_id = %s AND class_id = %s
ORDER BY timestamp DESC
LIMIT 1
""",
(user_id, assignment_id, class_id)
)
last_submitted = results.as_dict()['timestamp'] if len(results) > 0 else None
results.free()
if last_submitted:
delta = datetime.utcnow() - last_submitted
# rate_limit = 120
if delta.seconds < ARGS.rate_limit:
self.write_error(429, message='Please wait {} second(s) before re-submitting.'.format(ARGS.rate_limit - delta.seconds))
return
# check that assignment exists
results = await self.db.query("SELECT * FROM assignments WHERE assignment_id=%s LIMIT 1", [assignment_id])
assert results, 'assignment_id {} not found on server'.format(assignment_id)
assignment = results.as_dict()
results.free()
return (user_id, username, assignment['class_id'], assignment_id, assignment['assignment_name'])
async def submit(self, notebook, api_key):
"""
If valid submission, inserts notebook into submissions table in database and queues it
for grading.
Arguments:
notebook (``dict``): notebook in JSON form
api_key (``str``): API key generated during submission
"""
try:
user_id, username, class_id, assignment_id, assignment_name = await self.validate(notebook, api_key)
except TypeError as e:
print("Submission failed for user with API key {}: ".format(api_key, e))
return
except AssertionError as e:
print("Submission failed for user with API key {} due to due to client error: {}".format(api_key, e))
self.write_error(400, message=e)
return
# fetch next submission id
results = await self.db.query("SELECT nextval(pg_get_serial_sequence('submissions', 'submission_id')) as id")
submission_id = results.as_dict()['id']
results.free()
print("Successfully received submission {} from user {}".format(submission_id, username))
# save notebook to disk
dir_path = os.path.join(
self.settings['notebook_dir'],
'class-{}'.format(class_id),
'assignment-{}'.format(assignment_id),
'submission-{}'.format(submission_id)
)
file_path = os.path.join(dir_path, '{}.ipynb'.format(assignment_name))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(file_path, 'w') as f:
json.dump(notebook, f)
print("Successfully saved submission {} at {}".format(submission_id, file_path))
# store submission to database
results = await self.db.query("INSERT INTO submissions (submission_id, assignment_id, class_id, user_id, file_path, timestamp) VALUES (%s, %s, %s, %s, %s, %s)",
[submission_id, assignment_id, class_id, user_id, file_path, datetime.utcnow()])
assert results, 'submission failed'
results.free()
self.submission_id = submission_id
self.write('Submission {} received.'.format(submission_id))
@property
def db(self):
return self.application.db
def write_error(self, status_code, **kwargs):
"""
Writes an error message to response
Args:
status_code (``int``): the response status
message (``str``): message to include in the response
"""
if 'message' in kwargs:
self.write('Submission failed: {}'.format(kwargs['message']))
else:
self.write('Submission failed.')
def grade_submission(submission_id):
"""
Grades a single submission with id ``submission_id``
Args:
submission_id (``str``): the id of the submission to grade
Returns:
``tuple``: grading message and results dataframe for printing
"""
global CONN
cursor = CONN.cursor()
cursor.execute(
"""
SELECT user_id, submission_id, assignment_id, class_id, file_path
FROM submissions
WHERE submission_id = %s
ORDER BY timestamp DESC
LIMIT 1
""",
(submission_id, )
)
user_record = cursor.fetchall()
assert len(user_record) == 1, "Submission {} not found".format(submission_id)
row = user_record[0]
user_id = int(row[0])
submission_id = int(row[1])
assignment_id = str(row[2])
class_id = str(row[3])
file_path = str(row[4])
cursor.execute(
"""
SELECT seed
FROM assignments
WHERE assignment_id = %s AND class_id = %s
""",
(assignment_id, class_id)
)
assignment_record = cursor.fetchall()
assert len(assignment_record) == 1, "Assignment {} for class {} not found".format(assignment_id, class_id)
seed = int(assignment_record[0][0]) if assignment_record[0][0] else None
cursor.execute(
"""
SELECT username, email
FROM users
WHERE user_id = %s
LIMIT 1
""",
(user_id, )
)
user_record = cursor.fetchall()
row = user_record[0]
username = str(row[0] or row[1])
# Run grading function in a docker container
stdout = BytesIO()
stderr = BytesIO()
try:
with stdio_proxy.redirect_stdout(stdout), stdio_proxy.redirect_stderr(stderr):
df = grade_assignments(
tests_dir=None,
notebooks_dir=file_path,
id=assignment_id,
image=class_id + "-" + assignment_id,
debug=True,
verbose=True,
seed=seed
)
message = "Graded submission {} from user {}".format(submission_id, username)
df_json_str = df.to_json()
# Insert score into submissions table
cursor.execute(
"""
UPDATE submissions
SET score = %s
WHERE submission_id = %s
""",
(df_json_str, submission_id)
)
finally:
stdout = stdout.getvalue().decode("utf-8")
stderr = stderr.getvalue().decode("utf-8")
with open(os.path.join(os.path.split(file_path)[0], "GRADING_STDOUT"), "w+") as f:
f.write(stdout)
with open(os.path.join(os.path.split(file_path)[0], "GRADING_STDERR"), "w+") as f:
f.write(stderr)
cursor.close()
return message, df
async def start_grading_queue(shutdown=False):
"""
Pops submission ids off ``SUBMISSION_QUEUE`` and sending them into ``EXECUTOR`` to be graded
Args:
shutdown (``bool``): whether or not to shutdown EXECUTOR after processing queue; default
``False``
"""
global SUBMISSION_QUEUE
async for submission_id in SUBMISSION_QUEUE:
future = EXECUTOR.submit(
grade_submission,
submission_id
)
future.add_done_callback(lambda f: print(f.result()[0], "\n", f.result()[1]))
# Set task done in queue
SUBMISSION_QUEUE.task_done()
if shutdown:
EXECUTOR.shutdown(wait=True)
class Application(tornado.web.Application):
"""
Otter Service tornado application
"""
def __init__(self):
"""
Initialize tornado server for receiving/grading submissions
"""
endpoint = ARGS.endpoint or os.environ.get("OTTER_ENDPOINT", None)
assert endpoint is not None, "no endpoint address provided"
assert os.path.isdir(OTTER_SERVICE_DIR), "{} does not exist".format(OTTER_SERVICE_DIR)
settings = dict(
google_oauth={
"key": ARGS.google_key or os.environ.get("GOOGLE_CLIENT_KEY", None),
"secret": ARGS.google_secret or os.environ.get("GOOGLE_CLIENT_SECRET", None)
},
notebook_dir = os.path.join(OTTER_SERVICE_DIR, "submissions"),
auth_redirect_uri = os.path.join(endpoint, "auth/callback")
)
handlers = [
(r"/submit", SubmissionHandler),
(r"/auth/google", GoogleOAuth2LoginHandler),
(r"/auth/callback", GoogleOAuth2LoginHandler),
(r"/auth", LoginHandler)
]
tornado.web.Application.__init__(self, handlers, **settings)
# Initialize database session
self.db = queries.TornadoSession(queries.uri(
host=ARGS.db_host,
port=ARGS.db_port,
dbname='otter_db',
user=ARGS.db_user,
password=ARGS.db_pass
))
def main(cli_args):
"""
Starts Otter Service tornado server
Args:
cli_args (``argparse.Namespace``): parsed command-line arguments
"""
# if args.missing_packages:
# raise ImportError(
# "Missing some packages required for otter service. "
# "Please install all requirements at "
# "https://raw.githubusercontent.com/ucbds-infra/otter-grader/master/requirements.txt"
# )
global CONN
global ARGS
ARGS = cli_args
CONN = connect_db(ARGS.db_host, ARGS.db_user, ARGS.db_pass, ARGS.db_port)
port = ARGS.port
tornado.options.parse_command_line()
# make submissions forlder
if not os.path.isdir(OTTER_SERVICE_DIR):
os.makedirs(os.path.join(OTTER_SERVICE_DIR))
server = HTTPServer(Application())
server.listen(port)
print("Listening on port {}".format(port))
IOLoop.current().add_callback(start_grading_queue)
IOLoop.current().start()
|
# Generated by Django 3.1.3 on 2020-11-12 16:20
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Event",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=255, unique=True)),
("slug", models.SlugField(default="", editable=False, max_length=255)),
("image", models.ImageField(upload_to="")),
(
"description",
ckeditor_uploader.fields.RichTextUploadingField(
blank=True, null=True
),
),
("date", models.DateField()),
("is_active", models.BooleanField(default=True)),
],
options={
"ordering": ["-date"],
},
),
]
|
from setuptools import setup
setup(
name = 'gym-nats',
packages = ['gym_nats'],
version = '0.1.2',
license='MIT',
description='OpenAI Gym environment interfacing with NATS.io',
long_description = 'Implements an OpenAI gym environment using NATS.io. Using this environment, a reinforcement learning agent can use the environment to learn from any source \
of data since NATS.io can feed arbitrary data into the environment. For further information on the interface check out the README file.',
author = 'Moritz Pascal Stephan',
author_email = 'moritz.stephan@gmx.at',
url = 'https://github.com/austrian-code-wizard/gym-nats',
download_url = 'https://github.com/austrian-code-wizard/gym-nats/archive/v_01.tar.gz',
keywords = ['nats', 'reinforcement learning', 'openai'],
install_requires=['gym', 'asyncio-nats-client', 'numpy'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
)
|
#!/usr/bin/env python3
import collections
import os
import json
import unicodedata
import sys
import sqlite3
import re
import operator
VERSION = 1
MAX_RESULTS = 20
INDEX_FILE_NAME = os.path.expanduser(
'~/Library/Caches/unicode_names.{0}.cache'.format(
'.'.join(map(str, sys.version_info))
)
)
STOPWORDS = ['of', 'with', 'the', 'a', 'an']
def tokenize(string):
words = re.split(r'[\s_-]+', string.lower())
non_stop_words = [w for w in words if w not in STOPWORDS]
if non_stop_words:
return non_stop_words
return words
def ngrams(tokens, n):
if len(tokens) < n:
return
for start in range(0, len(tokens) - n):
lst = []
for idx in range(n):
lst.append(tokens[start + idx])
yield lst
def build_index(db):
db.execute('DROP TABLE IF EXISTS tokens')
db.execute('CREATE TABLE tokens (token VARCHAR(32), codepoint INT)')
db.execute('DROP TABLE IF EXISTS bigrams')
db.execute('CREATE TABLE bigrams (token1 VARCHAR(32), token2 VARCHAR(32), codepoint INT)')
db.execute('CREATE INDEX idx_bigrams_on_tokens ON bigrams (token1, token2)')
db.execute('DROP TABLE IF EXISTS trigrams')
db.execute('CREATE TABLE trigrams (token1 VARCHAR(32), token2 VARCHAR(32), token3 VARCHAR(32), codepoint INT)')
db.execute('CREATE INDEX idx_trigrams_on_tokens ON trigrams (token1, token2, token3)')
db.execute('DROP TABLE IF EXISTS original_names')
db.execute('CREATE TABLE original_names (codepoint INT PRIMARY KEY, name VARCHAR(128))')
db.execute('CREATE INDEX idx_original_names_on_name ON original_names (name)')
db.execute('PRAGMA journal_mode = WAL')
num_codepoints = 0
for codepoint in range(32, 0x2FA1D):
with db:
char = chr(codepoint)
try:
name = unicodedata.name(char)
except ValueError:
continue
num_codepoints += 1
words = tokenize(name)
for word in sorted(words):
db.execute('INSERT INTO tokens(token, codepoint) VALUES(?, ?)', (word, codepoint))
for bigram in ngrams(words, 2):
db.execute('INSERT INTO bigrams(token1, token2, codepoint) VALUES(?, ?, ?)', (bigram[0], bigram[1], codepoint))
for trigram in ngrams(words, 3):
db.execute(
'INSERT INTO trigrams(token1, token2, token3, codepoint) VALUES(?, ?, ?, ?)',
(trigram[0], trigram[1], trigram[2], codepoint)
)
db.execute('INSERT INTO original_names(codepoint, name) VALUES (?, ?)',
(codepoint, name.lower()))
db.execute(
'CREATE TABLE IF NOT EXISTS build_meta ('
'build_version INT PRIMARY KEY,'
'unidata_version STRING,'
'distinct_tokens BIGINT,'
'distinct_bigrams BIGINT,'
'distinct_trigrams BIGINT,'
'codepoints BIGINT)'
)
with db:
cursor = db.cursor()
cursor.execute('SELECT COUNT(DISTINCT token) FROM tokens')
distinct_tokens = cursor.fetchall()[0][0]
cursor.execute('SELECT token1, token2, COUNT(*) FROM bigrams GROUP BY 1, 2')
distinct_bigrams = 0
for row in cursor:
distinct_bigrams += 1
cursor.execute('SELECT token1, token2, token3, COUNT(*) FROM trigrams GROUP BY 1, 2, 3')
distinct_trigrams = 0
for row in cursor:
distinct_trigrams += 1
db.execute(
'INSERT INTO build_meta(build_version, unidata_version, distinct_tokens, distinct_bigrams, codepoints)'
'VALUES(?, ?, ?, ?, ?)',
(VERSION, unicodedata.unidata_version, distinct_tokens, distinct_bigrams, num_codepoints)
)
db.execute('PRAGMA wal_checkpoint(TRUNCATE)')
return db
def open_or_create_db():
db = sqlite3.connect(INDEX_FILE_NAME)
try:
cursor = db.cursor()
cursor.execute('SELECT build_version, unidata_version FROM build_meta ORDER BY build_version DESC LIMIT 1')
for row in cursor:
if row[0] != VERSION:
raise ValueError('Too old!')
if row[1] != unicodedata.unidata_version:
raise ValueError('Unidata too old')
return db
except Exception:
pass
db.close()
try:
os.unlink(INDEX_FILE_NAME)
except Exception:
pass
db = sqlite3.connect(INDEX_FILE_NAME)
build_index(db)
return db
def main():
query = ' '.join(sys.argv[1:])
db = open_or_create_db()
cursor = db.cursor()
cursor.execute('SELECT * FROM build_meta ORDER BY build_version DESC LIMIT 1')
for row in cursor:
_, _, distinct_tokens, distinct_bigrams, distinct_trigrams, distinct_codepoints = row
break
matches = collections.Counter()
tokens = tokenize(query)
def score_codepoints(ngram):
codepoint_matches = []
matching_codepoints = 0
for row in cursor:
codepoint_matches.append(row[0])
matching_codepoints += 1
for row in codepoint_matches:
matches[row] += ngram / matching_codepoints
for word in tokens:
cursor.execute('SELECT codepoint FROM tokens WHERE token=?', (word,))
score_codepoints(1)
for token1, token2 in ngrams(tokens, 2):
cursor.execute('SELECT codepoint FROM bigrams WHERE token1=? AND token2=?', (token1, token2))
score_codepoints(2)
for token1, token2, token3 in ngrams(tokens, 3):
cursor.execute('SELECT codepoint FROM trigrams WHERE token1=? AND token2=? AND token3=?', (token1, token2, token3))
score_codepoints(3)
cursor.execute('SELECT codepoint FROM original_names WHERE name=?', (query.lower(),))
for row in cursor:
matches[row[0]] += 10
results = []
for codepoint, score in sorted(matches.items(), key=operator.itemgetter(1), reverse=True):
char = chr(codepoint)
name = unicodedata.name(char)
subtitle = 'U+{0} {1}'.format(hex(codepoint)[2:].upper(), name)
results.append({
'title': char,
'subtitle': subtitle,
'autocomplete': name,
'type': 'default',
'arg': char,
'extra': {
'score': score,
'codepoint': codepoint,
},
'mods': {
'shift': {
'arg': subtitle,
},
'cmd': {
'arg': subtitle,
},
},
'text': {
'copy': char,
'largetype': '{0} {1}'.format(char, name)
}
})
if len(results) >= MAX_RESULTS:
break
results.sort(key=lambda r: (-r['extra']['score'], len(r['subtitle']), r['extra']['codepoint']))
print(json.dumps({'items': results}))
if __name__ == '__main__':
main()
|
'''Server that echoes data back until there is no more.'''
import sys, socket
size, host, port = 1024, '', int(sys.argv[1])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(True)
conn, addr = s.accept()
print 'Connected by', addr
result = ''
while True:
data = conn.recv(size)
print '...server:', `data`
if not data:
break
result += data
print 'server saw', `result`
conn.close()
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
BLUE = [255,0,0]
img1 = cv2.imread('image.png')
replicate = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REPLICATE)
reflect = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT)
reflect101 = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT_101)
wrap = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_WRAP)
constant= cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_CONSTANT,value=BLUE)
plt.subplot(231),plt.imshow(img1,'gray'),plt.title('ORIGINAL')
plt.subplot(232),plt.imshow(replicate,'gray'),plt.title('REPLICATE')
plt.subplot(233),plt.imshow(reflect,'gray'),plt.title('REFLECT')
plt.subplot(234),plt.imshow(reflect101,'gray'),plt.title('REFLECT_101')
plt.subplot(235),plt.imshow(wrap,'gray'),plt.title('WRAP')
plt.subplot(236),plt.imshow(constant,'gray'),plt.title('CONSTANT')
plt.show()
|
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import pickle
cols = """
duration,
protocol_type,
service,
flag,
src_bytes,
dst_bytes,
land,
wrong_fragment,
urgent,
hot,
num_failed_logins,
logged_in,
num_compromised,
root_shell,
su_attempted,
num_root,
num_file_creations,
num_shells,
num_access_files,
num_outbound_cmds,
is_host_login,
is_guest_login,
count,
srv_count,
serror_rate,
srv_serror_rate,
rerror_rate,
srv_rerror_rate,
same_srv_rate,
diff_srv_rate,
srv_diff_host_rate,
dst_host_count,
dst_host_srv_count,
dst_host_same_srv_rate,
dst_host_diff_srv_rate,
dst_host_same_src_port_rate,
dst_host_srv_diff_host_rate,
dst_host_serror_rate,
dst_host_srv_serror_rate,
dst_host_rerror_rate,
dst_host_srv_rerror_rate"""
cols = [c.strip() for c in cols.split(",") if c.strip()]
cols.append('target')
kdd = pd.read_csv("./data/kddcup.data",names=cols)
# attacks_type = {
# 'normal': 'normal',
# 'back': 'dos',
# 'buffer_overflow': 'u2r',
# 'ftp_write': 'r2l',
# 'guess_passwd': 'r2l',
# 'imap': 'r2l',
# 'ipsweep': 'probe',
# 'land': 'dos',
# 'loadmodule': 'u2r',
# 'multihop': 'r2l',
# 'neptune': 'dos',
# 'nmap': 'probe',
# 'perl': 'u2r',
# 'phf': 'r2l',
# 'pod': 'dos',
# 'portsweep': 'probe',
# 'rootkit': 'u2r',
# 'satan': 'probe',
# 'smurf': 'dos',
# 'spy': 'r2l',
# 'teardrop': 'dos',
# 'warezclient': 'r2l',
# 'warezmaster': 'r2l',
# }
kdd.head(5)
kdd_std=kdd.std()
kdd_std=kdd_std.sort_values(ascending=True)
kdd.drop(["service","is_host_login","num_outbound_cmds"],axis=1,inplace=True)
encoder = preprocessing.LabelEncoder()
for c in kdd.columns:
if str(kdd[c].dtype) == 'object':
kdd[c] = encoder.fit_transform(kdd[c])
scaler = StandardScaler(copy=True, with_mean=True, with_std=True)
kdd[['dst_bytes','src_bytes']] = scaler.fit_transform(kdd[['dst_bytes','src_bytes']])
with open('./data/kdd_df.pth','wb') as f:
pickle.dump(kdd,f)
|
#!/usr/bin/python
# Simple demo of of the PCA9685 PWM servo/LED controller library.
# This will move channel 0 from min to max position repeatedly.
# Author: Tony DiCola
# License: Public Domain
from __future__ import division
import time
import sys
import Adafruit_PCA9685# Import the PCA9685 module.
import Adafruit_PCA9685
# Uncomment to enable debug output.
#import logging
#logging.basicConfig(level=logging.DEBUG)
# Initialise the PCA9685 using the default address (0x40).
pwm = Adafruit_PCA9685.PCA9685()
# Alternatively specify a different address and/or bus:
#pwm = Adafruit_PCA9685.PCA9685(address=0x41, busnum=2)
# Configure min and max servo pulse lengths
servo_min = 150 # Min pulse length out of 4096
servo_max = 600 # Max pulse length out of 4096
servo_tilt = 15
servo_pan = 14
# Set frequency to 60hz, good for servos.
pwm.set_pwm_freq(60)
def angle(val):
'''calculate the pwm for requested angle
angle goes from -90 to +90 deg'''
return int(round(servo_min + (90.0 - val) * (servo_max - servo_min) / 180.0)
)
def parse(args=None):
from argparse import ArgumentParser
if args is None:
args = sys.argv[:]
parser = ArgumentParser(usage="goto.py -t 90 -p0 ",
description=None,
epilog=None)
parser.add_argument("-t", "--tilt", dest="tilt", type=float,
help="Move up/down", default=0)
parser.add_argument("-p", "--pan", dest="pan", type=float,
help="Move right/down", default=0)
options = parser.parse_args()
return options.tilt, options.pan
tilt, pan = parse()
print('Moving servos of tilt-pan camera to zenith tilt=%.1f pan=%.1f'%(tilt, pan))
pwm.set_pwm(servo_pan, 0, angle(pan))
pwm.set_pwm(servo_tilt, 0, angle(tilt))
|
from distutils.core import setup
setup(
name = 'config',
version = "VERSION",
packages = [''],
)
|
from typing import Any
import pytest
from hw.vadim_maletski.func6 import level_06
from .common import azaza
from .common import validate_data
from .common import validate_errors
happy_data = [ # noqa: ECE001
pytest.param(arg, expected, id=name)
for name, (arg, expected) in {
"empty-1": ("", {}),
"empty-2": (azaza("", bs=[str]), {}),
"v-0": ("xx", {"xx": [""]}),
"v-1": ("xx=", {"xx": [""]}),
"v-2": ("xx=&yy=", {"xx": [""], "yy": [""]}),
"v-3": ("xx=1&yy=2&yy=3", {"xx": ["1"], "yy": ["2", "3"]}),
"v-4": ("xx=xx&yy=yy&yy=yy", {"xx": ["xx"], "yy": ["yy", "yy"]}),
"v-5": ("xx=xx&yy=yy&yy=", {"xx": ["xx"], "yy": ["yy", ""]}),
}.items()
]
@pytest.mark.parametrize("arg,expected", happy_data)
def test_task_06_happy(arg: Any, expected: Any) -> None:
outcome = level_06(arg)
validate_data(outcome)
data = outcome["data"]
assert data == expected
unhappy_data = [
pytest.param(arg, id=name)
for name, arg in {
"invalid-type": azaza(),
}.items()
]
@pytest.mark.parametrize("arg", unhappy_data)
def test_task_06_unhappy(arg: Any) -> None:
outcome = level_06(arg)
validate_errors(outcome)
|
import json
import os
import sys
import time
from queue import Queue, Empty
from threading import Thread, Event
import tensorflow as tf
# Script used to evaluate the MaskRCNN ablation experiment
# Requires https://github.com/matterport/Mask_RCNN
if os.name == 'nt':
COCO_MODEL_PATH = os.path.join('D:/Skola/PhD/code/Mask_RCNN_v2', "mask_rcnn_coco.h5")
sys.path[0:0] = [os.path.join(sys.path[0], '../../Mask_RCNN_v2')]
else:
COCO_MODEL_PATH = os.path.join('/home/k/kocur15/code/Mask_RCNN', "mask_rcnn_coco.h5")
sys.path[0:0] = [os.path.join(sys.path[0], '../../Mask_RCNN')]
print(sys.path)
import coco as coco
import cv2
import numpy as np
import model as modellib
from dataset_utils.geometry import tangent_point_poly
from dataset_utils.tracker import Tracker
from dataset_utils.writer import Writer
from dataset_utils.warper import get_transform_matrix, get_transform_matrix_with_criterion
from dataset_utils.geometry import line, intersection, computeCameraCalibration
ROOT_DIR = os.getcwd()
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.0
def get_single_box_mask(image, M, vp, im_w, im_h):
image = cv2.warpPerspective(np.array(200 * image), M, (im_w, im_h), borderMode=cv2.BORDER_CONSTANT)
_, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if len(contours) == 0:
return None
cnt = contours[0]
x_min, y_min, w, h = cv2.boundingRect(cnt)
x_max = x_min + w
y_max = y_min + h
if x_max < vp[0]:
# box vlavo
cls = 1
elif x_min > vp[0]:
# box vpravo
cls = 3
else:
# box vstrede
cls = 2
hull = cv2.convexHull(cnt)
V = [p[0].tolist() for p in hull]
rt, lt = tangent_point_poly(vp, V, im_h)
# image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)
# image = cv2.line(image,tuple(rt),tuple(vp),(0,255,0))
# image = cv2.line(image,tuple(lt),tuple(vp),(0,0,255))
# image = cv2.rectangle(image,(x_min,y_min),(x_max,y_max),(255,0,0),2)
if cls == 1:
cy1 = intersection(line([x_min, y_min], [x_min, y_max]), line(vp, lt))
if vp[1] < 0:
cx = intersection(line([x_min, y_max], [x_max, y_max]), line(vp, rt))
cy2 = intersection(line(cx, [cx[0], cx[1] + 1]), line(vp, [x_max, y_min]))
else:
cx = intersection(line([x_min, y_min], [x_max, y_min]), line(vp, rt))
cy2 = intersection(line(cx, [cx[0], cx[1] + 1]), line(vp, [x_max, y_max]))
if cls == 3:
cy1 = intersection(line([x_max, y_min], [x_max, y_max]), line(vp, rt))
if vp[1] < 0:
cx = intersection(line([x_min, y_max], [x_max, y_max]), line(vp, lt))
cy2 = intersection(line(cx, [cx[0], cx[1] + 1]), line(vp, [x_min, y_min]))
else:
cx = intersection(line([x_min, y_min], [x_max, y_min]), line(vp, lt))
cy2 = intersection(line(cx, [cx[0], cx[1] + 1]), line(vp, [x_min, y_max]))
if cls == 2:
cy1 = intersection(line([x_max, y_min], [x_max, y_max]), line(vp, rt))
cy2 = intersection(line([x_min, y_min], [x_min, y_max]), line(vp, lt))
# image = cv2.circle(image,tuple(cy1),2,(0,255,0))
# image = cv2.circle(image,tuple(cy2),2,(0,0,255))
# cv2.imshow("Detects", image)
# cv2.waitKey(0)
cy = min(cy1[1], cy2[1])
centery = (cy - y_min) / (y_max - y_min)
if centery < 0:
centery = 0
elif centery > 1:
centery = 1
# cv2.imshow("Debug", image)
# cv2.waitKey(0)
box = np.array([cls, x_min, y_min, x_max, y_max, centery])
return box
def get_boxes_mask(y_pred, M, vp1_t, im_w, im_h):
boxes = []
for idx in range(len(y_pred['class_ids'])):
if y_pred['class_ids'][idx] in [3]:
box = get_single_box_mask(y_pred['masks'][:, :, idx].astype(np.uint8), M, vp1_t, im_w, im_h)
if box is not None:
boxes.append(box)
return boxes
def test_video(model, video_path, json_path, im_w, im_h, batch, name, out_path=None, online=True):
with open(json_path, 'r+') as file:
# with open(os.path.join(os.path.dirname(json_path), 'system_retinanet_first.json'), 'r+') as file:
structure = json.load(file)
camera_calibration = structure['camera_calibration']
vp1, vp2, vp3, _, _, _ = computeCameraCalibration(camera_calibration["vp1"], camera_calibration["vp2"],
camera_calibration["pp"])
vp1 = vp1[:-1] / vp1[-1]
vp2 = vp2[:-1] / vp2[-1]
vp3 = vp3[:-1] / vp3[-1]
cap = cv2.VideoCapture(os.path.join(video_path, 'video.avi'))
mask = cv2.imread(os.path.join(video_path, 'video_mask.png'), 0)
M, IM = get_transform_matrix_with_criterion(vp3, vp2, mask, im_w, im_h)
vp1_t = np.array([vp1], dtype="float32")
vp1_t = np.array([vp1_t])
vp1_t = cv2.perspectiveTransform(vp1_t, M)
vp1_t = vp1_t[0][0]
ret, frame = cap.read()
if out_path is not None:
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
out = cv2.VideoWriter(out_path, fourcc, 25.0, (frame.shape[1], frame.shape[0]))
q_frames = Queue(10)
q_images = Queue(10)
q_predict = Queue(10)
e_stop = Event()
vid_name = os.path.basename(os.path.normpath(video_path))
def read():
while (cap.isOpened() and not e_stop.isSet()):
# read_time = time.time()
images = []
frames = []
for _ in range(batch):
ret, frame = cap.read()
if not ret:
cap.release()
continue
frames.append(frame)
image = cv2.bitwise_and(frame, frame, mask=mask)
images.append(image)
# print("Read FPS: {}".format(batch / (time.time() - read_time)))
q_images.put(images)
q_frames.put(frames)
def read_offline():
while (cap.isOpened() and not e_stop.isSet()):
# read_time = time.time()
images = []
for _ in range(batch):
for _ in range(150):
_ = cap.read()
ret, frame = cap.read()
if not ret:
cap.release()
continue
image = cv2.bitwise_and(frame, frame, mask=mask)
images.append(image[:, :, ::-1])
# print("Read FPS: {}".format(batch / (time.time() - read_time)))
q_images.put(images)
def inference():
while (not e_stop.isSet()):
try:
images = q_images.get(timeout=100)
except Empty:
break
gpu_time = time.time()
# cv2.imshow('t_frame', images[0])
# if cv2.waitKey(1) & 0xFF == ord('q'):
# e_stop.set()
# with graph.as_default():
y_pred = model.detect(images, verbose=0)
# cv2.imshow("Images", images[0])
# cv2.waitKey(1)
print(y_pred)
q_predict.put(y_pred)
print("GPU FPS: {}".format(batch / (time.time() - gpu_time)))
def postprocess():
tracker = Tracker(json_path, M, IM, vp1, vp2, vp3, im_w, im_h, name, pair='23', threshold=0.2)
counter = 0
total_time = time.time()
while not e_stop.isSet():
try:
y_pred = q_predict.get(timeout=100)
frames = q_frames.get(timeout=100)
except Empty:
tracker.write()
break
# post_time = time.time()
for i in range(len(frames)):
boxes = get_boxes_mask(y_pred[i], M, vp1_t, im_w, im_h)
image_b = tracker.process(boxes, frames[i])
if out_path is not None:
out.write(image_b)
cv2.imshow('frame', image_b)
counter += 1
cv2.imwrite('frames/frame_{}_{}_{}.png'.format(vid_name, name, counter), image_b)
if cv2.waitKey(1) & 0xFF == ord('q'):
e_stop.set()
# break
# print("Post FPS: {}".format(batch / (time.time() - post_time)))
# print("Total FPS: {}".format(batch / (time.time() - total_time)))
# total_time = time.time()
def postprocess_offline():
writer = Writer(json_path, name)
total_time = time.time()
frame_cnt = 1
while not e_stop.isSet():
try:
y_pred = q_predict.get(timeout=100)
except Empty:
writer.write()
break
for i in range(len(y_pred)):
boxes = get_boxes_mask(y_pred[i], M, vp1_t, im_w, im_h)
print(boxes)
writer.process(boxes)
frame_cnt += 1
# print("Total FPS: {}".format(batch / (time.time() - total_time)))
print("Video: {} at frame: {}, FPS: {}".format(vid_name, frame_cnt, frame_cnt / (time.time() - total_time)))
# total_time = time.time()
inferencer = Thread(target=inference)
if online:
reader = Thread(target=read)
postprocesser = Thread(target=postprocess)
else:
reader = Thread(target=read_offline)
postprocesser = Thread(target=postprocess_offline)
reader.start()
inferencer.start()
postprocesser.start()
reader.join()
inferencer.join()
postprocesser.join()
if out_path is not None:
out.release()
def track_detections(json_path, video_path, im_w, im_h, name, threshold):
print('Tracking: {} for t = {}'.format(name, threshold))
with open(json_path, 'r+') as file:
structure = json.load(file)
camera_calibration = structure['camera_calibration']
vp1, vp2, vp3, _, _, _ = computeCameraCalibration(camera_calibration["vp1"], camera_calibration["vp2"],
camera_calibration["pp"])
mask = cv2.imread(os.path.join(video_path, 'video_mask.png'), 0)
vp1 = vp1[:-1] / vp1[-1]
vp2 = vp2[:-1] / vp2[-1]
vp3 = vp3[:-1] / vp3[-1]
frame = np.zeros([1080, 1920])
M, IM = get_transform_matrix_with_criterion(vp3, vp2, mask, im_w, im_h)
tracker = Tracker(json_path, M, IM, vp1, vp2, vp3, im_w, im_h, name, threshold=threshold, pair='23')
tracker.read()
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config = InferenceConfig()
config.display()
# from tensorflow.python.framework.ops import disable_eager_execution
# disable_eager_execution()
# model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# model.load_weights(COCO_MODEL_PATH, by_name=True)
if os.name == 'nt':
vid_path = 'D:/Skola/PhD/data/2016-ITS-BrnoCompSpeed/dataset'
results_path = 'D:/Skola/PhD/data/2016-ITS-BrnoCompSpeed/results/'
else:
vid_path = '/home/k/kocur15/data/2016-ITS-BrnoCompSpeed/dataset/'
results_path = '/home/k/kocur15/data/2016-ITS-BrnoCompSpeed/results/'
vid_list = []
calib_list = []
for i in range(4, 7):
dir_list = ['session{}_center'.format(i), 'session{}_left'.format(i), 'session{}_right'.format(i)]
vid_list.extend([os.path.join(vid_path, d) for d in dir_list])
calib_list.extend(
[os.path.join(results_path, d, 'system_SochorCVIU_Edgelets_BBScale_Reg.json') for d in dir_list])
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
name = 'mask_ablation'
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
model.load_weights(COCO_MODEL_PATH, by_name=True)
# global graph
# graph = tf.get_default_graph()
# model._make_predict_function()
for vid, calib in zip(vid_list, calib_list):
test_video(model, vid, calib, 640, 360, 1, name, out_path=None, online=False)
thresholds = [0.5]
for calib, vid in zip(calib_list, vid_list):
for threshold in thresholds:
track_detections(calib, vid, 640, 360, name, threshold)
|
import iterm2
import os
from datetime import datetime
FILE = "/tmp/.pomoout"
async def main(connection):
component = iterm2.StatusBarComponent(
short_description="Pomo Timer",
detailed_description="Displays the contents of your pomo timer.",
knobs=[],
exemplar="🔥25m",
update_cadence=10,
identifier="com.iterm2.pomo",
)
@iterm2.StatusBarRPC
async def coro(knobs):
pomo_file = open(FILE, "r+")
contents = pomo_file.read()
pomo_file.close()
return contents
# Register the component.
await component.async_register(connection, coro)
iterm2.run_forever(main)
|
import json
from pathlib import Path
def parse_config(parser, config_path, root=Path('config')):
_config_path = root/config_path
with open(_config_path) as f:
configs = json.load(f)
for k, v in configs.items():
if type(v) == dict:
parser.set_defaults(**v)
else:
parser.set_defaults(**k)
return parser
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--train_ds')
# Parse saved configuration to arguments
parser = parse_config(parser, 'sample.json', root=Path('../../sample'))
args = parser.parse_args()
print(args)
|
from django_fortumo.settings.base import *
FORTUMO_SECRET = 'bad54c617b3a51230ac7cc3da398855e'
SECRET_KEY = '0_6iq7a%wez2ibbrt07#g&hj1v#pnt9)!^0t)sk3vy72)p%87@'
|
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import os
import dataFormat
import filterData as fd
import numpy as np
import keras
import wget
import zipfile
class Config():
def __init__(self, load=True):
if load:
url = 'http://nlp.stanford.edu/data/glove.6B.zip'
filename = wget.download(url)
if not os.path.exists('glove.6B'):
os.mkdir('glove.6B')
with zipfile.ZipFile(filename,'r') as zf:
zf.extractall('glove.6B')
self.load()
def load(self):
pre_data = fd.DataHandler()
D_format = dataFormat.DataFormat()
self.word_to_index, self.index_to_embed = pre_data.load_embedding_from_file(self.glove_name)
if not os.path.exists(self.source_file_path):
print("Generating data from corpus")
pre_data.set_tokens(self.orig_text_folder_path)
pre_data.write_tokens_to_file(self.source_file_path)
all_text = D_format.load_token_data(self.source_file_path)
tags = D_format.tags
words = D_format.uni_word
self.n_tags = len(tags)
self.n_words = len(words)
print('The number of unique words is {}'.format(self.n_words))
self.embedding_matrix = np.zeros((self.n_words, self.EMBEDDING))
for i, aw in enumerate(words):
try:
idx = self.word_to_index[aw]
embd = list(self.index_to_embed[idx])
if embd is not None:
self.embedding_matrix[i] = embd
else:
self.embedding_matrix[i] = np.random.randn(self.EMBEDDING)
except:
print(aw)
tag2idx = {t: i + 1 for i, t in enumerate(tags)}
tag2idx["PAD"] = 0
self.idx2tag = {i: w for w, i in tag2idx.items()}
print("word to idx finished")
self.X = [[self.word_to_index[w[0]] for w in s] for s in all_text]
self.X = pad_sequences(maxlen=self.MAX_LEN, sequences=self.X, padding='post', value=0)
self.Y = [[tag2idx[w[1]] for w in s] for s in all_text]
self.Y = pad_sequences(maxlen=self.MAX_LEN, sequences=self.Y, padding="post", value=tag2idx["PAD"])
self.Y = [to_categorical(i, num_classes=self.n_tags + 1) for i in self.Y]
self.X_tr, self.X_te, self.y_tr, self.y_te = train_test_split(self.X, self.Y, test_size=self.test_size)
BATCH_SIZE = 64 # Number of examples used in each iteration
EPOCHS = 100 # Number of passes through entire dataset
MAX_LEN = 50 # Max length of Sentence (in words)
EMBEDDING = 50 # Dimension of word embedding vector
orig_text_folder_path = 'tokenized'
glove_name = 'glove.6B/glove.6B.{}d.txt'.format(EMBEDDING)
source_file_path = 'token_text_with_label.txt'
model_save_path = 'final_model_0421-50.h5'
test_size = 0.1
|
from model.mapping.order import Order
from model.dao.dao import DAO
from model.dao.dao_error_handler import dao_error_handler
from model.mapping.product import Product
class OrderDAO(DAO):
"""
Order Mapping DAO
"""
# Get a specific order by its id
@dao_error_handler
def get(self, id):
return self._database_session.query(Order).filter_by(id=id).one()
# Get all orders ordered by their client
@dao_error_handler
def getAll(self):
return self._database_session.query(Order).order_by(Order.client).all()
# Get all orders from a specific client
@dao_error_handler
def getByClient(self, client: str):
return self._database_session.query(Order).filter_by(client=client).order_by(Order.id).all()
# Get all orders from a specific restaurant
@dao_error_handler
def getByRestaurant(self, restaurant: str):
return self._database_session.query(Order).filter_by(restaurant=restaurant).order_by(Order.id).all()
# Create an order
@dao_error_handler
def create(self, data: dict):
order = Order(client=data.get('client'), restaurant=data.get('restaurant'), address=data.get('address'), price=0.0)
self._database_session.add(order)
for product in data['products']:
product_obj = self._database_session.query(Product).filter_by(id=product['id']).one()
order.add_product(product_obj, self._database_session)
self._database_session.flush()
return order
####################################################################################################################################################
##### Illogics functions #####
##### A order shouldn't be updated or deleted #####
####################################################################################################################################################
# Update an order
@dao_error_handler
def update(self, order: Order, data: dict):
if 'restaurant' in data:
order.restaurant = data['restaurant']
if 'address' in data:
order.address = data['address']
if 'products' in data:
order.price = 0
order.products.clear()
for product in data['products']:
order.products.append(product)
order.price += product['price']
self._database_session.merge(order)
self._database_session.flush()
return order
# Delete an order
@dao_error_handler
def delete(self, entity):
self._database_session.delete(entity)
|
import pygame
import sys
import os
import ftb_functions as fft
from pygame.sprite import Sprite
class Player():
"""储存球员信息的类"""
def __init__(self,screen):
"""储存一些基本信息"""
self.screen = screen
self.location = os.path.dirname(os.path.abspath(__file__)) + os.sep+'images'+os.sep+'player-s.png' #球员图片位置
#print(self.location)
self.image = pygame.image.load(self.location)
self.rect = self.image.get_rect() #获取球员矩形
self.screen_rect = screen.get_rect()
self.group = 1 #设置球员所处队伍
#将球员开始时放在屏幕最下方正中间
self.rect.centerx = self.screen_rect.centerx
#self.rect.bottom = self.screen_rect.bottom
self.rect.centery = self.screen_rect.top
#使球员的位置可以储存小数
self.img_centerx = float(self.rect.centerx)
self.img_centery = float(self.rect.centery)
self.img_x = float(self.rect.x)
self.img_y = float(self.rect.y)
self.moving = False
def mov(self,allplyr,allplyr2,number):
"""让球员随着鼠标移动"""
self.position = pygame.mouse.get_pos()
self.text_intersect = False
for i in range(0,11): #判断是否与1队球员重叠
if i==number and self.group == 1:
continue
else:
if(self.position[0]>=allplyr[i].img_x and \
self.position[0]<=allplyr[i].img_x+allplyr[i].rect.w\
and self.position[1]>=allplyr[i].img_y and \
self.position[1]<=allplyr[i].img_y+allplyr[i].rect.h):
self.text_intersect = True
break
for i in range(0,11): #判断是否与2队球员重叠
if i==number and self.group == 2:
continue
else:
if(self.position[0]>=allplyr2[i].img_x and \
self.position[0]<=allplyr2[i].img_x+allplyr2[i].rect.w\
and self.position[1]>=allplyr2[i].img_y and \
self.position[1]<=allplyr2[i].img_y+allplyr2[i].rect.h):
self.text_intersect = True
break
if(self.position[0]>=self.img_x and self.position[0]<=self.img_x+self.rect.w\
and self.position[1]>=self.img_y and self.position[1]<=self.img_y+self.rect.h\
and self.text_intersect==False): #移动球员到指针处
self.img_centerx = self.position[0]
self.img_centery = self.position[1]
self.rect.centerx = self.img_centerx
self.rect.centery = self.img_centery
self.img_x = self.rect.x
self.img_y = self.rect.y
def blitme(self):
"""绘制球员"""
self.screen.blit(self.image,self.rect)
class Player2(Player):
"""储存对方球员信息的类"""
def __init__(self,screen):
"""储存一些基本信息"""
self.screen = screen
self.location = os.path.dirname(os.path.abspath(__file__)) + os.sep+'images'+os.sep+'player-1-s.png' #球员图片位置
#print(self.location)
self.image = pygame.image.load(self.location)
self.rect = self.image.get_rect() #获取球员矩形
self.screen_rect = screen.get_rect()
self.group = 2 #设置球员所处队伍
#将球员开始时放在屏幕最下方正中间
self.rect.centerx = self.screen_rect.centerx
#self.rect.bottom = self.screen_rect.bottom
self.rect.centery = self.screen_rect.top
#使球员的位置可以储存小数
self.img_centerx = float(self.rect.centerx)
self.img_centery = float(self.rect.centery)
self.img_x = float(self.rect.x)
self.img_y = float(self.rect.y)
self.moving = False
|
from datetime import datetime
from bamboo.lib.datetools import recognize_dates
from bamboo.lib.schema_builder import DATETIME, SIMPLETYPE, Schema
from bamboo.tests.test_base import TestBase
class TestDatetools(TestBase):
def setUp(self):
TestBase.setUp(self)
self.dframe = self.get_data('good_eats.csv')
def test_recognize_dates(self):
dframe = self.get_data('soil_samples.csv')
with_dates = recognize_dates(dframe)
for field in with_dates['single_letter']:
self.assertTrue(isinstance(field, basestring))
def test_recognize_dates_as_dates(self):
df_with_dates = recognize_dates(self.dframe)
for field in df_with_dates['submit_date']:
self.assertTrue(isinstance(field, datetime))
def test_recognize_dates_from_schema(self):
schema = Schema({
'submit_date': {
SIMPLETYPE: DATETIME
}
})
df_with_dates = recognize_dates(self.dframe, schema)
for field in df_with_dates['submit_date']:
self.assertTrue(isinstance(field, datetime))
|
class Solution(object):
def findSecretWord(self, wordlist, master):
def pair_matches(a, b): # count the number of matching characters
return sum(c1 == c2 for c1, c2 in zip(a, b))
def most_overlap_word():
counts = [[0 for _ in range(26)] for _ in range(6)] # counts[i][j] is nb of words with char j at index i
for word in candidates:
for i, c in enumerate(word):
counts[i][ord(c) - ord("a")] += 1
best_score = 0
for word in candidates:
score = 0
for i, c in enumerate(word):
score += counts[i][ord(c) - ord("a")] # all words with same chars in same positions
if score > best_score:
best_score = score
best_word = word
return best_word
candidates = wordlist[:] # all remaining candidates, initially all words
while candidates:
s = most_overlap_word() # guess the word that overlaps with most others
matches = master.guess(s)
if matches == 6:
return
candidates = [w for w in candidates if pair_matches(s, w) == matches] # filter words with same matches
|
s = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz_{}"
def _l(idx, s):
return s[idx:] + s[:idx]
def decrypt(ct, k1, k2):
s = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz_{}"
t = [[_l((i + j) % len(s), s) for j in range(len(s))] for i in range(len(s))]
i1 = 0
i2 = 0
decrypted = ""
for a in ct:
for c in s:
if t[s.find(c)][s.find(k1[i1])][s.find(k2[i2])] == a:
decrypted += c
break
i1 = (i1 + 1) % len(k1)
i2 = (i2 + 1) % len(k2)
return decrypted
def encrypt(p, k1, k2):
t = [[_l((i + j) % len(s), s) for j in range(len(s))] for i in range(len(s))]
i1 = 0
i2 = 0
c = ""
for a in p:
c += t[s.find(a)][s.find(k1[i1])][s.find(k2[i2])]
i1 = (i1 + 1) % len(k1)
i2 = (i2 + 1) % len(k2)
return c
def recover_key(known_prefix, ciphertex):
final_key = ['*'] * 14
for pos in range(7):
for c in s:
partial_candidate_key = ['*'] * 14
partial_candidate_key[pos] = c
partial_candidate_key[13 - pos] = c
key = "".join(partial_candidate_key)
res = encrypt(known_prefix, key, key[::-1])
if res[pos] == ciphertex[pos]:
final_key[pos] = c
final_key[13 - pos] = c
print "".join(final_key)
return "".join(final_key)
def main():
ciphertext = "POR4dnyTLHBfwbxAAZhe}}ocZR3Cxcftw9"
key = recover_key("SECCON{", ciphertext)
flag = decrypt(ciphertext, key, key[::-1])
print(flag)
main()
|
import re
from os import listdir
from os.path import join
from django.test import TestCase
from funfactory.manage import path
class MigrationTests(TestCase):
"""Sanity checks for the SQL migration scripts."""
@staticmethod
def _migrations_path():
"""Return the absolute path to the migration script folder."""
return path('migrations')
def test_unique(self):
"""Assert that the numeric prefixes of the DB migrations are unique."""
leading_digits = re.compile(r'^\d+')
seen_numbers = set()
path = self._migrations_path()
for filename in listdir(path):
match = leading_digits.match(filename)
if match:
number = match.group()
if number in seen_numbers:
self.fail('There is more than one migration #%s in %s.' %
(number, path))
seen_numbers.add(number)
def test_innodb_and_utf8(self):
"""Make sure each created table uses the InnoDB engine and UTF-8."""
# Heuristic: make sure there are at least as many "ENGINE=InnoDB"s as
# "CREATE TABLE"s. (There might be additional "InnoDB"s in ALTER TABLE
# statements, which are fine.)
path = self._migrations_path()
for filename in sorted(listdir(path)):
with open(join(path, filename)) as f:
contents = f.read()
creates = contents.count('CREATE TABLE')
engines = contents.count('ENGINE=InnoDB')
encodings = (contents.count('CHARSET=utf8') +
contents.count('CHARACTER SET utf8'))
assert engines >= creates, ("There weren't as many "
'occurrences of "ENGINE=InnoDB" as of "CREATE TABLE" in '
'migration %s.' % filename)
assert encodings >= creates, ("There weren't as many "
'UTF-8 declarations as "CREATE TABLE" occurrences in '
'migration %s.' % filename)
|
# this is auto-generated by swagger-marshmallow-codegen
from __future__ import annotations
from marshmallow import (
Schema,
fields,
INCLUDE,
)
import re
from marshmallow.validate import Regexp
from ._lazy import _usePet
class Owner(Schema):
id = fields.String(required=True, description='ObjectId', validate=[Regexp(regex=re.compile('[0-9a-f]{24}'))])
name = fields.String(required=True)
pets = fields.List(fields.Nested(_usePet))
class Meta:
unknown = INCLUDE
|
class MinStack:
def __init__(self):
self.A = []
self.M = []
def push(self, x):
self.A.append(x)
M = self.M
M.append( x if not M else min(x,M[-1]) )
def pop(self):
self.A.pop()
self.M.pop()
def top(self):
return self.A[-1]
def getMin(self):
return self.M[-1]
|
# Generated by Django 3.0.8 on 2020-07-18 05:54
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
import users.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('phone_number', models.CharField(max_length=11, unique=True, validators=[django.core.validators.RegexValidator(message='شماره تلفن خود را به صورت 11 رقمی مثل 09123334455 وارد کنید', regex='^(09)\\d{9}$')], verbose_name='phone')),
('full_name', models.CharField(blank=True, max_length=130, verbose_name='full name')),
('city', models.CharField(max_length=100, verbose_name='city name')),
('email', models.EmailField(max_length=70)),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('phone_number_verified', models.BooleanField(default=False)),
('change_pw', models.BooleanField(default=True)),
('verification_code', models.CharField(default='535485', max_length=6, verbose_name='verification code')),
('last_sent_vcode_time', models.DateTimeField(default=django.utils.timezone.now)),
('is_staff', models.BooleanField(default=False, verbose_name='is_staff')),
('is_active', models.BooleanField(default=True, verbose_name='is_active')),
('is_admin', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'ordering': ('phone_number',),
},
managers=[
('objects', users.manager.UserManager()),
],
),
]
|
STATUS = {}
|
from keras.models import Sequential
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import Lambda
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.merge import concatenate
def simple_CNN(n_timesteps, n_features):
''' A simple CNN model, should probably add some dropout layers. '''
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features)))
model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters=32, kernel_size=5, activation='relu', input_shape=(n_timesteps,n_features)))
model.add(Conv1D(filters=32, kernel_size=5, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(1))
return model, 1
def complex_CNN(n_timesteps, n_features):
''' A three-way CNN with different kernel sizes for different levels of generalisation. '''
# head 1
inputs1 = Input(shape=(n_timesteps,n_features))
conv1 = Conv1D(filters=64, kernel_size=3, activation='relu')(inputs1)
conv12 = Conv1D(filters=64, kernel_size=3, activation='relu')(conv1)
pool1 = MaxPooling1D(pool_size=2)(conv12)
conv13 = Conv1D(filters=64, kernel_size=3, activation='relu')(pool1)
conv14 = Conv1D(filters=64, kernel_size=3, activation='relu')(conv13)
pool1 = MaxPooling1D(pool_size=2)(conv14)
flat1 = Flatten()(pool1)
# head 2
inputs2 = Input(shape=(n_timesteps,n_features))
conv2 = Conv1D(filters=64, kernel_size=5, activation='relu')(inputs2)
conv22 = Conv1D(filters=64, kernel_size=5, activation='relu')(conv2)
pool2 = MaxPooling1D(pool_size=2)(conv22)
conv23 = Conv1D(filters=64, kernel_size=5, activation='relu')(pool2)
conv24 = Conv1D(filters=64, kernel_size=5, activation='relu')(conv23)
pool2 = MaxPooling1D(pool_size=2)(conv24)
flat2 = Flatten()(pool2)
# head 3
inputs3 = Input(shape=(n_timesteps,n_features))
conv3 = Conv1D(filters=64, kernel_size=11, activation='relu')(inputs3)
conv32 = Conv1D(filters=64, kernel_size=11, activation='relu')(conv3)
pool3 = MaxPooling1D(pool_size=2)(conv32)
conv33 = Conv1D(filters=64, kernel_size=11, activation='relu')(pool3)
conv34 = Conv1D(filters=64, kernel_size=11, activation='relu')(conv33)
pool3 = MaxPooling1D(pool_size=2)(conv34)
flat3 = Flatten()(pool3)
# merge
merged = concatenate([flat1, flat2, flat3])
# interpretation
dense1 = Dense(100)(merged)
dense2 = Dense(1)(dense1)
outputs = LeakyReLU(alpha=0.25)(dense2)
model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs)
return model, 3
def simple_CNN_double_output(n_timesteps, n_features):
''' Last layer has two nodes, for when both DYS and CA have to be predicted. '''
model = Sequential()
model.add(Conv1D(filters=32, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features)))
model.add(Conv1D(filters=32, kernel_size=3, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters=32, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features)))
model.add(Conv1D(filters=32, kernel_size=3, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(2))
return model, 1
def simple_LSTM(n_timesteps, n_features):
''' A simple LSTM model, no good results yet. '''
model = Sequential()
model.add(LSTM(100, input_shape=(n_timesteps,n_features)))
model.add(Dropout(0.5))
model.add(Dense(100, activation='relu'))
model.add(Dense(1))
return model, 1
def complex_LSTM(n_timesteps, n_features):
''' Multiple LSTM layers, better results. '''
model = Sequential()
model.add(LSTM(100, input_shape=(n_timesteps,n_features), return_sequences=True))
model.add(LSTM(100, return_sequences=True))
model.add(MaxPooling1D(pool_size=2))
model.add(LSTM(100, return_sequences=True))
model.add(LSTM(100, return_sequences=True))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(1))
return model, 1
|
from django.contrib import admin
from daterange_filter.filter import DateRangeFilter
class Inline(admin.TabularInline):
extra = 0
ordering = ['-created_date', 'name',]
readonly_fields = ['created_date', 'last_update_date',]
class ModelAdmin(admin.ModelAdmin):
list_display = ('name', 'member', 'created_date', 'last_update_date',)
list_filter = (('created_date', DateRangeFilter),)
search_fields = ('member__user__username', 'member__user__first_name',
'member__user__last_name', 'name',)
raw_id_fields = ('member',)
def has_delete_permission(self, request, obj=None):
return False
|
from django.http import JsonResponse
from django.db.models import Avg, Count, Func
from ..models import Movie, Rating, Comment
def new_movie(request):
if request.method != 'POST':
pass
# get movie id and title
id = request.POST.get('id', '')
title = request.POST.get('title', '')
# save new movie
m = Movie(source_id = id, title = title)
try:
m.save()
except Exception as e:
return JsonResponse({
'status': 'fail',
'data': {
'message': str(e) if type(e) == ValueError else 'Error while saving movie'
}
}, status=500)
return JsonResponse({
'status': 'success',
'data': {
'title': m.title
}
})
def movie_details(request, movie_id):
if request.method != 'GET':
pass
# get movie
try:
m = Movie.objects.get(source_id=movie_id)
except Movie.DoesNotExist:
return JsonResponse({
'status': 'success',
'data': {
'rating': {
'avg': None,
'comments': None
}
}
})
# get rating
r = Rating.objects.filter(movie=m)\
.values('rating')\
.aggregate(
avg_rating=Avg('rating'),
rating_count=Count('rating')
)
avg_rating = r['avg_rating']
rating_count = r['rating_count']
# get comments
c = Comment.objects.filter(movie=m).values('body', 'username')
return JsonResponse({
'status': 'success',
'data': {
'rating': {
'avg': '{:.1f}'.format(avg_rating) if avg_rating is not None else None,
'count': rating_count
},
'comments': list(c)
}
})
class Round(Func):
function = 'ROUND'
template='%(function)s(%(expressions)s, 1)'
def movies_summary(request):
if request.method != 'GET':
pass
# get all requested movie ids
movie_ids = request.GET.get('ids', '').split(',')
m = Movie.objects.filter(source_id__in=movie_ids).annotate(
avg_rating=Round(Avg('rating__rating')), # avg on rating column of rating table
comment_count=Count('comment', distinct=True)
).values()
movies = {}
for movie in list(m):
movies[movie.get('source_id')] = movie
return JsonResponse({
'status': 'success',
'data': {
'movies': movies
}
})
|
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
from tensorflow.tools.graph_transforms import TransformGraph
import argparse
def load_graph(checkpoint_path, mb, seq_len):
init_all_op = tf.initialize_all_variables()
graph2 = tf.Graph()
with graph2.as_default():
with tf.Session(graph=graph2) as sess:
saver = tf.train.import_meta_graph(checkpoint_path + '.meta')
saver.restore(sess, checkpoint_path)
print("Restored structure...")
saver.restore(sess, checkpoint_path)
print("Restored params...")
'''
# input_names = ["IteratorGetNext"]
input_names = ["IteratorGetNext:0", "IteratorGetNext:1", "IteratorGetNext:4"]
output_names = ["loss/LogSoftmax"]
transforms = ['strip_unused_nodes(type=int32, shape="4,128")']
graph2 = TransformGraph(graph2.as_graph_def(), input_names, output_names, transforms)
# graph2 = TransformGraph(graph2, input_names, output_names, transforms)
# graph2 = tf.graph_util.remove_training_nodes(input_graph=graph2.as_graph_def())
graph2 = tf.graph_util.remove_training_nodes(input_graph=graph2)
'''
'''
input_names = ["IteratorGetNext:0", "IteratorGetNext:1", "IteratorGetNext:4"]
output_names = ["loss/LogSoftmax"]
transforms = ['strip_unused_nodes(type=int32, shape="4,128")']
# graph2 = TransformGraph(graph2.as_graph_def(), input_names, output_names, transforms)
graph2 = TransformGraph(graph2.as_graph_def(), inputs=input_names, outputs=output_names, transforms=transforms)
'''
'''
#2019-02-27 00:36:31.079753: I tensorflow/tools/graph_transforms/transform_graph.cc:317] Applying strip_unused_nodes
# terminate called after throwing an instance of 'std::out_of_range'
# what(): map::at
# Aborted
input_names = ["IteratorV2"] #Same result with "IteratorV2:0"
output_names = ["loss/LogSoftmax"]
transforms = ['strip_unused_nodes(type=resource)']
graph2 = TransformGraph(graph2.as_graph_def(), inputs=input_names, outputs=output_names, transforms=transforms)
'''
# input_names = ["IteratorGetNext", "IteratorGetNext:1", "IteratorGetNext:4"]
input_names = []
# output_names = ["loss/LogSoftmax"]
output_names = ["loss/Softmax"]
transforms = ['strip_unused_nodes(type=int32, shape="' + str(mb) + ',' + str(seq_len) + '")']
# graph2 = TransformGraph(graph2.as_graph_def(), input_names, output_names, transforms)
graph2 = TransformGraph(graph2.as_graph_def(), inputs=input_names, outputs=output_names, transforms=transforms)
# for op in graph2.get_operations():
# print(op.name)
return graph2
parser = argparse.ArgumentParser(description='Freeze BERT model')
parser.add_argument('--minibatch', help='Minibatch size', default=4)
parser.add_argument('--seq_length', help='Sequence length', default=128)
parser.add_argument('--input_dir', help='Input directory for model', default="/TF_Graphs/mrpc_output/")
parser.add_argument('--ckpt', help='Checkpoint filename in input dir', default="model.ckpt-2751")
args = parser.parse_args()
mb = int(args.minibatch)
seq_len = int(args.seq_length)
print("minibatch: ", mb)
print("seq_length: ", seq_len)
print("input_dir: ", args.input_dir)
print("checkpoint: ", args.ckpt)
dirIn = args.input_dir
dirOut = dirIn + "frozen/"
ckpt = args.ckpt
graph = load_graph(dirIn + ckpt, mb, seq_len)
txtName = "bert_export_mb" + str(mb) + "_len" + str(seq_len) + ".pb.txt"
txtPath = dirOut + txtName
tf.train.write_graph(graph, dirOut, txtName, True)
output_graph = dirOut + "bert_frozen_mb" + str(mb) + "_len" + str(seq_len) + ".pb"
print("Freezing Graph...")
freeze_graph.freeze_graph(
input_graph=txtPath,
input_checkpoint=dirIn+ckpt,
input_saver="",
output_graph=output_graph,
input_binary=False,
# output_node_names="loss/LogSoftmax", #This is log(prob(x))
output_node_names="loss/Softmax", #This is log(prob(x))
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
clear_devices=True,
initializer_nodes="")
print("Freezing graph complete...")
|
Note: the data item is a tuple.
1) "Domain DS"
For calculating the Domain:
list(index: data_item) -> set(possible_values)
Domain - -> For each data_item -> the set of possibilites of values
2) "Source List"
For calculating alpha and p(C_wdv=1):
2d-List:
list - -> DataItems/Sources as index -> set(values)
list[Source][Data_item]=set(value)
This removes the extractor and only includes each value once.
3) "p(V_d | X_d) List"
For storing the p(V_d=v | X_d):
list - -> data_item as index - -> map(value): p(V_d=v | X_d)
4) "C_wdv List"
For storing C_wdv
2d-List:
# In notes, something about "List of p(C_wdv = 1)"?
list - -> DataItems/Sources as index -> map(value) -> p(C_wdv=1)
list[Source][Data_item][Value]=p(C_wdv=1)
Extends the source list to include the probability of C_wdv
5) "Accuracy List"
Storing A_w - - 2D list
list[source][A_w value] # Check this one - "Web Source 2D List"
6) "Alpha List"
Storing the alpha value
2D list - -> Web Source - -> Data item - -> Map(value)=alpha value
|
# -*- coding: utf-8 -*-
"""Function to increase the tracking column in a workflow."""
from typing import Dict, Optional
from django.contrib.auth import get_user_model
from django.core import signing
from django.utils.translation import ugettext
from ontask import models
from ontask.dataops import sql
class ExecuteIncreaseTrackCount:
"""Process the increase track count in a workflow."""
def __init__(self):
"""Assign default fields."""
super().__init__()
self.log_event = models.Log.ACTION_EMAIL_READ
def execute_operation(
self,
user,
workflow: Optional[models.Workflow] = None,
action: Optional[models.Action] = None,
payload: Optional[Dict] = None,
log_item: Optional[models.Log] = None,
):
"""Process track requests asynchronously.
:param user: User object
:param workflow: Optional workflow object
:param action: Optional action object
:param payload: has fields method and get_dict with the request
method and the get dictionary.
:param log_item: Optional logitem object.
:return: Nothing
"""
del user, workflow, action, log_item
method = payload.get('method')
if method != 'GET':
# Only GET requests are accepted
raise Exception(ugettext('Non-GET request received in Track URL'))
get_dict = payload.get('get_dict')
if get_dict is None:
raise Exception(ugettext('No dictionary in Track URL'))
# Obtain the track_id from the request
track_id = get_dict.get('v')
if not track_id:
raise Exception(ugettext('No track_id found in request'))
# If the track_id is not correctly signed, finish.
try:
track_id = signing.loads(track_id)
except signing.BadSignature:
raise Exception(ugettext('Bad signature in track_id'))
# The request is legit and the value has been verified. Track_id has
# now the dictionary with the tracking information
# Get the objects related to the ping
user = get_user_model().objects.filter(
email=track_id['sender']).first()
if not user:
raise Exception(
ugettext('Incorrect user email %s'),
track_id['sender'])
action = models.Action.objects.filter(pk=track_id['action']).first()
if not action:
raise Exception(
ugettext('Incorrect action id %s'),
track_id['action'])
# Extract the relevant fields from the track_id
column_dst = track_id.get('column_dst', '')
column_to = track_id.get('column_to', '')
msg_to = track_id.get('to', '')
column = action.workflow.columns.filter(name=column_dst).first()
if not column:
# If the column does not exist, we are done
raise Exception(ugettext('Column %s does not exist'), column_dst)
log_payload = {
'to': msg_to,
'email_column': column_to,
'column_dst': column_dst}
# If the track comes with column_dst, the event needs to be reflected
# back in the data frame
if column_dst:
try:
# Increase the relevant cell by one
sql.increase_row_integer(
action.workflow.get_data_frame_table_name(),
column_dst,
column_to,
msg_to)
except Exception as exc:
log_payload['EXCEPTION_MSG'] = str(exc)
else:
# Get the tracking column and update all the conditions in the
# actions that have this column as part of their formulas
# FIX: Too aggressive?
track_col = action.workflow.columns.get(name=column_dst)
for action in action.workflow.actions.all():
action.update_n_rows_selected(track_col)
# Record the event
action.log(user, self.log_event, **log_payload)
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function, division
import onnx
import pytest
from onnx.helper import make_node, make_graph, make_tensor_value_info
from ngraph.frontends.onnx.onnx_importer.model_wrappers import NodeWrapper, GraphWrapper
from ngraph.frontends.onnx.onnx_importer.utils.conv import get_pads
def test_get_pads():
def wrap_node(node):
graph = make_graph([node], 'test_graph',
[make_tensor_value_info('X', onnx.TensorProto.FLOAT, (1, 1, 1, 1)),
make_tensor_value_info('Y', onnx.TensorProto.FLOAT, (1, 1, 1, 1))],
[make_tensor_value_info('Z', onnx.TensorProto.FLOAT, ())])
return NodeWrapper(node, GraphWrapper(graph))
node = wrap_node(make_node('Conv', ['X', 'Y'], ['Z'], pads=(1, 2, 3, 1, 2, 3)))
assert get_pads(node) == (1, 2, 3)
with pytest.raises(NotImplementedError):
node = wrap_node(make_node('Conv', ['X', 'Y'], ['Z'], pads=(1, 1, 2, 4)))
assert get_pads(node) == (1, 1, 0)
node = wrap_node(make_node('Conv', ['X', 'Y'], ['Z'], auto_pad='VALID', kernel_shape=(5, 5)))
assert get_pads(node) == (0, 0, 0)
node = wrap_node(make_node('Conv', ['X', 'Y'], ['Z'],
auto_pad='SAME_UPPER', kernel_shape=(5, 5)))
assert get_pads(node) == (2, 2, 0)
node = wrap_node(make_node('Conv', ['X', 'Y'], ['Z'],
auto_pad='SAME_UPPER', kernel_shape=(7, 7, 7)))
assert get_pads(node) == (3, 3, 3)
with pytest.raises(NotImplementedError):
node = wrap_node(make_node('Conv', ['X', 'Y'], ['Z'],
auto_pad='SAME_UPPER', kernel_shape=(6, 6)))
assert get_pads(node) == (2, 2, 0)
|
"""
Simulate strategic voters for point voter model.
Types
-----
Compromising
************
Voter insincerely ranks/rates an alternative higher in the hope of
getting it elected.
1. Run honest election, determine top two candidates.
2. Throw maximum support behind preferred top candidate.
Burying
**********
Insincerely rank/rate an alternative lower in hopes of defeating it.
1. Run honest election, determine top two candidates.
2. Throw minimum support behind unpreferred top candidate.
Bullet Voting / Truncation
******************************
Voter shows no support for less-preferred candidates
Min-Maxing
***********
Voter gives maximal support to some candidates
Compromising-Compression
***************************
Insincerely give two candidates equal ranking or equal rating.
Compromise-reversal
*********************
Insincerely reverse the order of two candidates on the ballot
Burying
"""
# -*- coding: utf-8 -*-
import copy
import itertools
import numpy as np
import sympy
from sympy.utilities.iterables import multiset_permutations
import votesim
from votesim.models.spatial.base import Voters, Candidates, Election, _RandomState
from votesim import utilities
STRATEGIC_BASE_SEED = 5
def all_ranks(cnum):
"""Construct all possible permutations of rank ballosts, including
zeroing out candidates"""
a = np.arange(1, cnum+1)
new = []
for p in multiset_permutations(a):
new.append(p)
return np.array(new)
def all_scores(cnum, maxscore):
a = np.arange(0, maxscore+1)
iter1 = itertools.product(a, repeat=cnum)
new = [i for i in iter1]
return np.array(new)
def random_scores(cnum, scoremax, num=1000, rs=None):
"""Generate random scored ballots
"""
if rs is None:
rs = np.random.RandomState(None)
d = rs.rand(num, cnum) * 2 - 1.
d[d < 0] = 0
dimax = np.max(d, axis=1)
dimax[dimax == 0] = 1
d = d / dimax[:, None]
d = d * scoremax
d = np.round(d)
zballot = np.zeros((1, cnum))
d = np.vstack((d, zballot))
return d
def all_score_minmax(cnum, scoremax,):
a = np.arange(0, 2)
iter1 = itertools.product(a, repeat=cnum)
new = [i for i in iter1]
return np.array(new)
def random_ranks(cnum, num=1000, rs=None, allow_zeros=True):
"""Generate random ranked ballots
"""
if rs is None:
rs = np.random.RandomState(None)
distances = rs.rand(num, cnum)
ranks = np.argsort(distances, axis=1) + 1
if allow_zeros:
threshold = rs.rand(num, 1) * cnum + 1
ranks[ranks > threshold] = 0
zballot = np.zeros((1, cnum))
ranks = np.vstack((ranks, zballot))
return ranks
def all_votes(cnum):
"""Generate every possible combination of ballots for single-mark ballot"""
ballots = np.zeros((cnum + 1, cnum), dtype=int)
for i in range(cnum):
ballots[i, i] = 1
return ballots
class StrategicElection(object):
def __init__(self, election:Election, etype, seed=None):
self.etype = etype
self.election = election
self._election_honest = copy.deepcopy(election)
self.init(seed=seed, etype=etype)
return
@property
def election_honest(self):
return self._election_honest
@utilities.recorder.record_actions(replace=True)
def init(self, seed, etype):
"""Initialize some election properties"""
self._set_seed(seed)
self.election_honest.run(etype)
self.btype = self.election_honest.btype
return
def _set_seed(self, seed):
""" Set pseudorandom seed """
if seed is None:
self._seed = None
self._randomstate = _RandomState(None)
else:
self._seed = (seed, STRATEGIC_BASE_SEED)
self._randomstate = _RandomState(*self._seed)
return
@utilities.lazy_property
def _voters_unique(self):
u, inv, c = np.unique(self.election.voters.voters,
axis=0,
return_inverse=True,
return_counts=True)
return u, inv, c
@property
def voters_unique(self):
"""Retrieve unique voter coordinates"""
return self._voters_unique[0]
@utilities.lazy_property
def group_indices(self):
"""
list of int arrays shaped (a,)
Index locations of each group
"""
unique_num = len(self.voters_unique)
unique_locs = self._voters_unique[1]
locations = []
for i in range(unique_num):
locs = np.where(unique_locs == i)[0]
locations.append(locs)
return locations
@property
def voters_unique_num(self):
"""Retrieve number of voters per unique voter group"""
return self._voters_unique[2]
@utilities.lazy_property
def ballot_combos(self):
"""Generate combinations of potential ballots"""
e = self.election
cnum = self.election.candidates.candidates.shape[0]
bnum = 1000
btype = e.btype
scoremax = e.scoremax
rs = self._randomstate
if btype == 'rank':
stratballots = random_ranks(cnum, num=bnum, rs=rs)
elif btype == 'score':
stratballots1 = random_scores(cnum,
num=bnum,
scoremax=scoremax,
rs=rs)
stratballots2 = all_score_minmax(cnum, scoremax=scoremax)
stratballots = np.vstack((stratballots1, stratballots2))
elif btype == 'vote':
stratballots = all_votes(cnum)
return stratballots
@utilities.lazy_property
def ballot_combos_num(self):
"""Number of random ballot combinations generated for this election"""
return len(self.ballot_combos)
@utilities.lazy_property
def honest_ballots(self):
"""Save honest ballots from the election here"""
return self.election_honest.ballots.copy()
@utilities.lazy_property
def honest_regrets(self):
"""Save honest voter regrets for each voter group"""
e = self.election_honest
w = e.winners
regrets = []
for group, gindex in enumerate(self.group_indices):
g_regret = e.voters.distances[gindex[0], w]
regrets.append(g_regret)
return np.array(regrets)
def get_regrets(self):
"""Retrieve voter regrets for each voter group"""
e = self.election_honest
w = e.winners
regrets = []
for group, gindex in enumerate(self.group_indices):
g_regret = e.voters.distances[gindex[0], w]
regrets.append(g_regret)
return np.array(regrets)
def group_indices_strat(self, group, strat_ratio=1):
"""Retrieve ballot indexes for a voter group"""
vindex = self.group_indices[group]
imin = vindex.min()
imax = vindex.max()
inum = (imax - imin + 1)
imax2 = imin + int(strat_ratio*inum)
jj = (vindex <= imax2)
return vindex[jj]
def run_rand(self, strat_ratios=1, num_elections=5000):
"""
Run random ballots.
Find ballots that manage to result in a superior regret than an
honest ballot.
"""
rs = self._randomstate
groupnum = len(self.group_indices)
s_ratios = np.ones(groupnum) * strat_ratios
combos_num = self.ballot_combos_num
# Obtain voter regrets for honest voting as benchmark
# Get voter indices who will vote strategically
group_locs = []
for ii in range(groupnum):
gii = self.group_indices_strat(ii, s_ratios[ii])
group_locs.append(gii)
# Select which ballots will be used for all elections
combo_indices = rs.choice(combos_num, size=(num_elections, groupnum))
for ii in range(num_elections):
ballots = self.honest_ballots.copy()
for jj in range(groupnum):
cindex = combo_indices[ii, jj]
ballot = self.ballot_combos[cindex]
group_loc = group_locs[jj]
ballots[group_loc] = ballot
self.run(ballots)
regrets
return
def run(self, ballots):
"""re-run election with new ballots"""
if self.btype == 'rank':
self.election.ranks = ballots
elif self.btype == 'score':
self.election.scores = ballots
elif self.btype == 'vote':
self.election.votes = ballots
self.election.run(etype=self.etype)
return
@utilities.recorder.record_actions()
def run_iter(self, voter_groups, strat_ratio):
"""
Run strategic iteration of a single voter group.
Parameters
-----------
voter_group : int
Index location of point voter group to strategize
strat_ratio : float
Ratio of strategic voters in voter group from [0.0 to 1.0]
"""
newballots = self.honest_ballots.copy()
vindexes = []
vnums = []
for voter_group in voter_groups:
vindex = self.group_indices[voter_group]
vnum = self.voters_unique_num[voter_group]
vindexes.append(vindex)
vnums.append(vnum)
# Get honest results
dnets = []
winners = self.election.winners
distances = self.election.voters.distances[vindex , winners] / vnum
dnet = np.sum(distances)
dnets.append(dnet)
for group in range(len(self.group_indices)):
for ballot in self.ballot_combos:
vindex = self.group_indices[group]
newballots[vindex] = ballot
if self.btype == 'rank':
self.election.ranks = newballots
elif self.btype == 'score':
self.election.scores = newballots
elif self.btype == 'vote':
self.election.votes = newballots
self.election.run(etype=self.etype)
winners = self.election.winners
print(self.election.votes.sum(axis=0))
print(winners)
distances = self.election.voters.distances[vindex , winners] / vnum
dnet = np.sum(distances)
dnets.append(dnet)
dnets = np.array(dnets)
return dnets
|
# -*- coding: utf-8 -*-
# Python3.4*
from Bot.Game import Piece
class Parser:
def __init__(self, game):
self._game = game
self._playerNames = []
def parse(self, line):
parts = line.split()
if parts[0] == 'settings':
self.set(parts[1:])
elif parts[0] == 'update':
self.update(parts[1:])
def set(self, values):
if values[0] == 'player_names':
self._playerNames = values[1].split(',')
elif values[0] == 'your_bot':
self._game.me.name = values[1]
self._playerNames.remove(values[1])
self._game.enemy.name = self._playerNames[0]
elif values[0] == 'field_width':
self._game.me.field.width = int(values[1])
self._game.enemy.field.width = int(values[1])
elif values[0] == 'field_height':
self._game.me.field.height = int(values[1])
self._game.enemy.field.height = int(values[1])
elif values[0] == 'timebank':
self._game.timebank = values[1]
elif values[0] == 'time_per_move':
self._game.timePerMove = values[1]
def update(self, values):
if values[0] == 'game':
self.updateGame(values[1:])
else:
self.updatePlayer(values[0], values[1:])
def updateGame(self, values):
if values[0] == 'this_piece_position':
# self._game.piecePosition = tuple(map(lambda x: int(x), values[1].split(',')))
self._game.piecePosition = tuple([int(x) for x in values[1].split(',')])
elif values[0] == 'this_piece_type':
self._game.piece = Piece.create(values[1])
elif values[0] == 'next_piece_type':
self._game.nextPiece = Piece.create(values[1])
elif values[0] == 'round':
self._game.round = int(values[1])
def updatePlayer(self, playerName, values):
if playerName != self._game.me.name:
player = self._game.enemy
else:
player = self._game.me
if values[0] == 'field':
# player.field.updateField(map(lambda r: map(lambda x: int(x), r.split(',')), values[1].split(';')))
player.field.updateField([[int(x) for x in r.split(',')] for r in values[1].split(';')])
elif values[0] == 'combo':
player.combo = values[1]
elif values[0] == 'row_points':
player.combo = values[1]
|
import tensorflow as tf
import os
import pandas as pd
import nltk
import numpy as np
from parametrs import *
import time
from collections import Counter
import re
def get_trainable_variables_num():
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
# print(shape)
# print(len(shape))
variable_parameters = 1
for dim in shape:
# print(dim)
variable_parameters *= dim.value
# print(variable_parameters)
total_parameters += variable_parameters
return total_parameters
def check_restore_parameters(sess, saver, path):
""" Restore the previously trained parameters if there are any. """
ckpt = tf.train.get_checkpoint_state(os.path.dirname(path))
if ckpt and ckpt.model_checkpoint_path:
print('Restoring parameters')
saver.restore(sess, ckpt.model_checkpoint_path)
def load_data_from_csv(data_path, length=None):
# print('aaaaa')
# df2 = pd.read_csv(data_path)
# t = time.time()
with open(data_path, mode='r', encoding='utf-8') as f:
lines = f.readlines()
# print('file read in ', time.time() - t)
# print('len lines', len(lines))
# print('len lines[0]', len(lines[0]))
# print('len line[1] splitted', len(lines[1].split(',')))
# print('line0', lines[0][:100])
# print('------------------------------')
# print('line1', lines[1][:100])
# for line in lines:
# print(line+"\n")
# cols2 = df2.columns
token_num = len(lines)
records_num = len(lines[0])
# print('token_num', token_num)
lines = [l.split(',') for l in lines]
# print(len(lines[0]))
# print(lines[0][1:10])
# print(lines[1][1:10])
# print(lines[70][1:10])
# print(lines[71][1:10])
data = []
if length is not None:
for i in range(records_num):
if i > length and length is not None:
break
line = []
for j in range(1, token_num):
line.append(lines[j][i])
data.append(line)
return data
def load_vocab_from_csv(vocab_path):
df = pd.read_csv(vocab_path)
cols = df.columns
dict = {}
dict_rev = {}
for i, token in enumerate(cols[1:]):
# print(df[token][0] , ' --- ', token)
# if(df[token][0] == 15575):
# exit()
# print('token',token)
dict[df[token][0]] = token
dict_rev[token] = df[token][0]
return dict, dict_rev
def pad_sentence(sen, length):
sen_ = [start_token] + sen.split()
sen_ = sen_[:min(length, len(sen_)) - 1]
for i in range(len(sen_), length):
sen_.append(end_token)
return sen_
def get_sentence_back(sen, vocab):
sent = ""
for token in sen:
# print(token)
# print(token)
sent += vocab[token + 1] + " "
if vocab[token + 1] == end_token:
return sent
return sent
def BLEU_score(ref_ans, output_ans):
return nltk.translate.bleu_score.sentence_bleu([ref_ans], output_ans)
def get_one_hot(idx, vocab_size):
# print('idx ', idx)
vec = np.zeros(vocab_size)
vec[idx] = 1
return vec
# def get_start_token_index(dict_rev):
# return [dict_rev[start_token]]
def get_token_index(dict_rev, token):
return [dict_rev[token]]
def sentence_by_id(sen, dic_rev):
li = []
for token in sen:
if token in dic_rev:
li.append(dic_rev[token])
else:
li.append(dic_rev[unknown_token])
return li
|
import numpy as np
from rlscore.utilities.reader import read_sparse
from rlscore.utilities.cross_validation import map_ids
def print_stats():
X_train = read_sparse("train_2000_x.txt")
Y_train = np.loadtxt("train_2000_y.txt")
ids = np.loadtxt("train_2000_qids.txt", dtype=int)
folds = map_ids(ids)
print("Parse data set characteristics")
print("Training set: %d instances, %d features" %X_train.shape)
print("Instances grouped into %d sentences" %len(folds))
if __name__=="__main__":
print_stats()
|
'''
The base Controller API
Provides the BaseController class for subclassing.
@author: carny
'''
import logging
from datetime import datetime
import time
from pylons import session
from cyberweb import model
from cyberweb.model import JobState
from cyberweb.model.meta import Session
log = logging.getLogger(__name__)
class BaseJob:
'''
The base jobs wraps the Job model and JobState. It is kept separate from the models because...
@todo: fill in more of the docs
'''
def __init__(self):
self._username = session.get('user')
if not self._username:
log.critical('Non-user wants to run a job. This should not be allowed.')
raise
try: self._user = Session.query(model.User).filter(model.User.username == self._username).one()
except Exception as _:
log.critical('User does not exist.')
raise
def _createname(self, name=None):
idpart = '_%s' % self.id if self.id else ''
name = '_%s' % name if name else ''
return ''.join([time.strftime('%Y%m%d_%H%M%S'), name, idpart])
def create(self, service_id, name=None, parent=None):
'''
method creates a job in the database and returns its id and name
'''
self._job = model.Job(self._user.id, service_id, name, parent)
self._job.name = ''
self._job.state = JobState.setup
# Save job to get ID
try:
Session.add(self._job)
Session.commit()
except Exception as _:
log.warn('Couldn\'t commit job')
Session.rollback()
return None, None
# Save job name
self._job.name = self._createname(name)
try:
Session.add(self._job)
Session.commit()
except Exception as _:
log.warn('Couldn\'t commit job')
return self.id, self._job.name
def load(self, idStr):
try:
id = int(idStr)
except Exception:
log.error('Invalid id %s', idStr)
return None
try: jobs = Session.query(model.Job).filter(model.Job.id == id)
except Exception as _: pass
if not jobs.count() == 1:
log.error('No jobs found with id: %d', id)
return None
self._job = jobs.one()
self._state = self._job.state
def __repr__(self):
if self._job.id:
return '%06d' % self._job.id
else:
return ''
###
# Getter functions (readonly)
###
@property
def state(self):
try: return self._job._state
except AttributeError as _: pass
return JobState.unknown
@property
def statename(self):
return JobState.get_name(self.state)
@property
def id(self):
try: return '%06d' % self._job.id
except Exception as _: pass
return None
@property
def userid(self):
return self._user.id
@property
def name(self):
return self._job.name
###
# Functions to change state
###
def queued(self):
self._change_state(JobState.queued)
def start(self):
if JobState.is_finished(self._state):
log.warn('Job %s wants to go from finished state to running. Start a new job.' % self)
self._change_state(JobState.running)
def finish(self):
self._change_state(JobState.finished)
log.debug('TODO: Move job to job history table.')
def error(self):
self._change_state(JobState.error)
def _change_state(self, state):
if self._job.state == state:
log.debug('Job %s already in this state. Do nothing' % self)
return
if state == JobState.running:
self._job.start_time = datetime.now()
elif state == JobState.finished or state == JobState.error:
self._job.end_time = datetime.now()
try:
self._job.state = state
Session.add(self._job)
Session.commit()
except:
log.warn('Couldn\'t commit job')
else:
self._state = state
log.debug('Job %s changed state.' % self)
|
import scipy as sp
from scipy.optimize import minimize
import time
class SupportVectorMachine(object):
def __init__(self, name="svm", debug=False):
self.name = name
self.training_data = None
self.testing_data = None
self.GramMatrix = None
self.IsSet_GramMatrix = False
self.debug = debug
self.N = None
self.D = None
self.testN = None
self.testD = None
self.missClassRateS = None
self.missClassRateB = None
self.missClassRate = None
self.a = None
self.w = None
self.b = None
self.xi = None
self.C = None
self.aTol = 1e-6
self.IsSet_C = False
self.Kernel = self.DotProdKernel
self.KernelParameters = None
self.UserSet_Kernel = False
if (debug):
self.msg("Hi I'm " + name + ". Nice to meet you.")
def __str__(self):
return "--- " + self.name + " : "
def Debug(self, message):
if self.debug:
self.msg(message)
def msg(self, message):
print(str(self) + str(message))
def SetTrainingData(self, training_data):
self.training_data = sp.array(training_data)
self.N = self.training_data.shape[0]
self.D = self.training_data.shape[1] - 1
self.Debug("Training data set to N={} data points (of D={}).".format(self.N, self.D))
def SetTestingData(self, testing_data):
self.testing_data = sp.array(testing_data)
self.testN = self.testing_data.shape[0]
self.testD = self.testing_data.shape[1] - 1
self.Debug("Testing data set to N={} data points (of D={}).".format(self.testN, self.testD))
def PrepareTraining(self):
self.Debug("Preparing training...")
if not self.IsSet_C:
self.SetC()
self.GramMatrix = self.GetGramMatrix(self.Kernel)
def SetC(self, C=1.):
self.Debug("Setting penalty coefficient C = {}".format(C))
self.C = C
self.IsSet_C = True
def Setup(self, options):
try:
self.w = sp.array(options["w"])
self.b = options["b"]
except:
self.msg("Couldn't setup classifier with options" + str(options))
def GetGramMatrix(self, kfunc=None, pars=None):
if not self.IsSet_GramMatrix:
self.Debug("GramMatrix not set, attempting to set it now...")
if kfunc is None:
self.Debug("No user supplied kernel function, using the default.")
kfunc = self.Kernel
self.CalculateGramMatrix(kfunc, pars=None)
self.Debug("GramMatrix is now set (it might not have been before). Returning it.")
return self.GramMatrix
def CalculateGramMatrix(self, kfunc, pars=None):
self.Debug("Calculating GramMatrix...")
self.GramMatrix = sp.array(sp.empty((self.N, self.N)))
for i in range(self.N):
for j in range(i, self.N):
xn = self.training_data[i,0:-1]
xm = self.training_data[j,0:-1]
k = kfunc(xn, xm, pars)
self.GramMatrix[i,j] = k
self.GramMatrix[j,i] = k
self.IsSet_GramMatrix = True
self.Debug("GramMatrix appears to have been calculated properly.")
def DotProdKernel(self, x, xprim, pars=None):
return sp.dot(x,xprim)
def SetKernelFunction(self, func, pars=None):
self.Debug("Setting user supplied kernel. MAKE SURE IT IS SYMMETRIC! I will not check that for you...")
self.Kernel = func
self.KernelParameters = pars
self.UserSet_Kernel = True
self.Debug("Kernel set to user supplied function.")
if self.IsSet_GramMatrix:
self.Debug("GramMatrix already calculated, but kernel is set by user. Will recalulate...")
self.CalculateGramMatrix(self.Kernel, self.KernelParameters)
def DualLagrangian(self, a, t, K):
l1 = 0.
l2 = 0.
for n in range(self.N):
for m in range(self.N):
l2 += a[n]*a[m]*t[n]*t[m]*K[n,m]
l1 += a[n]
return 0.5*l2 - l1
#def CostFuntion(self, W
def TrainMethodDual(self):
self.Debug("Starting training with dual Lagrangian...")
a = sp.zeros((self.N))
a = sp.random.uniform(0.,self.C,self.N)
opts = {"disp":False}
#if self.debug:
# opts["disp"] = True
cons = (
{"type":"ineq", "fun":lambda a: a},
{"type":"ineq", "fun":lambda a: self.C - a},
{"type":"eq", "fun":lambda a,t: sp.dot(a,t), "args":[self.training_data[:,-1]]}
)
func = self.DualLagrangian
res = minimize(func, a, constraints=cons, args=(self.training_data[:,-1], self.GramMatrix), options=opts, method="SLSQP")
if not res.success:
self.Debug(res.message + " (Status: {:d})".format(res.status))
self.Debug("nfev={:d}".format(res.nfev))
self.Debug("nit={:d}".format(res.nit))
self.a = res.x
self.xi = sp.zeros((self.N))
self.w = sp.zeros((self.D))
for d in range(self.D):
for n in range(self.N):
self.w[d] += self.a[n]*self.training_data[n,-1]*self.training_data[n,d]
Ns = 0
s = 0.
for n in range(self.N):
if self.a[n] > self.aTol and self.a[n] < self.C:
s2 = 0.
Ns += 1
for m in range(self.N):
if self.a[m] > self.aTol:
s2 += self.a[m]*self.training_data[m,-1]*self.GramMatrix[n,m]
s += self.training_data[n,-1] - s2
try:
self.b = s/Ns
except ZeroDivisionError as e:
self.msg("ZeroDivisionError: {}".format(e))
self.b = None
self.msg("Ns={}".format(Ns))
print("a=", self.a)
pass
def TrainMethodCanonical(self):
self.Debug("Starting training with canonical hyperplanes...")
#W = sp.zeros(self.D + 1 + self.N)
W = sp.random.uniform(0., 1., self.N + self.D + 1)
opts = {"disp":False}
#if self.debug:
# opts["disp"] = True
cons = []
#self.C =
for n in range(self.N):
cons.append(
{
"type":"ineq",
"fun":lambda W,x,t,m: t*(sp.dot(W[1:self.D+1],x) + W[0]) - 1 + W[self.D+1:][m],
"args":[self.training_data[n,:-1], self.training_data[n,-1], n]
}
)
cons.append(
{
"type":"ineq",
"fun":lambda W: W[self.D+1:]
}
)
func = lambda W: 0.5*sp.dot(W[1:self.D+1],W[1:self.D+1]) + self.C*sp.sum(W[self.D+1:])
res = minimize(func, W, constraints=cons, options=opts, method="SLSQP")
if not res.success:
self.Debug(res.message + " (Status: {:d})".format(res.status))
self.Debug("nfev={:d}".format(res.nfev))
self.Debug("nit={:d}".format(res.nit))
self.w = res.x[1:self.D+1]
self.xi = res.x[self.D+1:]
self.b = res.x[0]
self.a = sp.zeros((self.N))
def Train(self, method="auto"):
self.msg("Starting training...")
tstart = time.time()
cstart = time.clock()
if method == "auto":
self.Debug("Determining fastest training method...")
if self.UserSet_Kernel:
self.Debug("It appears the user has defined the kernel. Will train with dual Lagrangian (to be safe).")
self.TrainMethodDual()
elif self.D < self.N:
self.Debug("Since D < N, I will use canonical hyperplanes to get complexity ~ O(D^3).")
self.TrainMethodCanonical()
else:
self.Debug("Since D >= N, I will use dual Lagrangian to get complexity ~ O(N^3).")
self.TrainMethodDual()
elif method == "canonical":
self.TrainMethodCanonical()
elif method == "dual":
self.TrainMethodDual()
cstop = time.clock()
tstop = time.time()
elapsed = tstop-tstart
celapsed = (cstop-cstart)
self.msg("Training done (Real: {:.3f}s CPU: {:.3f}s ).".format(elapsed, celapsed))
def EvalPoint(self, x):
y = sp.dot(self.w, x) + self.b
return y
def EvalSet(self, X):
Y = sp.zeros((self.testN))
for n in range(self.testN):
Y[n] = self.EvalPoint(X[n])
return Y
def Classify(self, X):
self.msg("Classifying data set...")
Y = self.EvalSet(X)
self.msg("Classification done.")
return Y
def Test(self):
self.msg("Testing classifier...")
Y = self.EvalSet(self.testing_data[:,:-1])
self.missClassRateS = 0.
self.missClassRateB = 0.
self.missClassRate = 0.
Nsignal = 0
for n in range(self.testN):
if self.testing_data[n,-1] == 1:
Nsignal += 1
if Y[n]*self.testing_data[n,-1] < 0:
self.missClassRate += 1.
if self.testing_data[n,-1] == 1:
self.missClassRateS += 1.
else:
self.missClassRateB += 1.
self.missClassRateS = self.missClassRateS/Nsignal
self.missClassRateB = self.missClassRateB/(self.testN - Nsignal)
self.missClassRate = self.missClassRate/self.testN
self.msg("Testing done with missclassifitation rate (S,B,Tot) = ({}, {}, {})"
.format(self.missClassRateS, self.missClassRateB, self.missClassRate))
return Y
|
# OpenWeatherMap API Key
weather_api_key = ""
# Google API Key
g_key = ""
|
import numpy as np
import random
class Genetic:
def natural_selection(self, population):
fitness_sum = sum([c.fitness for c in population])
selection_probes = [c.fitness / fitness_sum for c in population]
parent = population[np.random.choice(len(population), p=selection_probes)]
return parent
def crossover(self, parent_1, parent_2):
point_selected = random.randint(10, len(parent_1) // 2)
offspring_1 = np.concatenate(
(parent_1[:point_selected], parent_2[point_selected:])
)
offspring_2 = np.concatenate(
(parent_2[:point_selected], parent_1[point_selected:])
)
return offspring_1, offspring_2
def mutation(self, offspring):
for i in range(len(offspring)):
if np.random.random() < 0.01:
offspring[i] = np.random.normal(0, 1 / 4)
return offspring
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import numpy as np
from expressions.symbolic.basic import Add, Div, Mul, Sub
from ..dim import Dim
from .base import (CanFuseToExpression, Parameters, SameNumberOfDimensionsForInputs,
Transposable, expression_op, cls_op_name)
LOG = logging.getLogger("nntool." + __name__)
#pylint: disable=abstract-method
class Broadcastable(Parameters):
def __init__(self, *args, **kwargs) -> None:
super(Broadcastable, self).__init__(*args, **kwargs)
self.broadcast = None
self.axis_masks = None
self._is_broadcasted = False
def set_broadcast(self, shapes):
max_len = max(len(shape) for shape in shapes)
self.broadcast = [([1] * max(max_len - len(shape), 0)) + shape for shape in shapes]
self.axis_masks = [([None] * max(max_len - len(shape), 0)) + list(range(len(shape))) for shape in shapes]
self._is_broadcasted = not all(
self.broadcast[idx] == shape for idx, shape in enumerate(shapes))
@property
def is_broadcasted(self):
return self._is_broadcasted
@staticmethod
def strip_ones(shape):
return [dim for dim in shape if dim != 1]
def verify_broadcast(self, shape, broadcast):
if self.strip_ones(shape) != self.strip_ones(broadcast):
raise ValueError(f'invalid broadcast {shape} -> {broadcast} on {self.name}')
def broadcast_inputs(self, input_tensors):
if self.broadcast is None:
raise ValueError(f'broadcast has not been set on {self.name}')
res = []
for idx, input_tensor in enumerate(input_tensors):
self.verify_broadcast(input_tensor.shape, self.broadcast[idx])
res.append(np.reshape(input_tensor.copy(), self.broadcast[idx]))
return res
@staticmethod
def transpose_mask(mask, trans):
out_idx = 0
res = []
for axis in trans:
if mask[axis] is None:
res.append(None)
else:
res.append(out_idx)
out_idx += 1
return tuple(res)
def transpose_broadcast(self, transpose):
# # broadcasted input could have included batch dimensions
# # just expand the transpose not moving those axes
# while len(transpose) < max(len(shape) for shape in self.broadcast):
# transpose = [0] + [dim + 1 for dim in transpose]
self.broadcast = [tuple([shape[idx] for idx in transpose]) for shape in self.broadcast]
self.axis_masks = [self.transpose_mask(shape, transpose) for shape in self.axis_masks]
def derive_transpose(self, full_transpose, in_idx):
mask = self.axis_masks[in_idx]
return tuple([mask[idx] for idx in full_transpose if mask[idx] is not None])
def delete_transpose(self, in_idx, trans):
#TODO - Implement
pass
class MatrixBroadcastedLinearOpParameters(CanFuseToExpression, Transposable,
SameNumberOfDimensionsForInputs, Broadcastable):
def __init__(self, name, *args, **kwargs):
super(MatrixBroadcastedLinearOpParameters, self).__init__(name, *args, **kwargs)
self.at_options.valid_options['PARALLELFEATURES'] = int
self.at_options.valid_options['TILEORIENTATION'] = int
@property
def can_equalize(self):
return False
def get_parameter_size(self):
return 0
def compute_load(self):
return self.out_dims[0].size() * 2
def get_output_size(self, in_dims):
if self.transpose_in:
in_dims = [dim.calc_transpose(trans) if trans is not None else dim
for dim, trans in zip(in_dims, self.transpose_in)]
if self.broadcast is None:
self.set_broadcast([dim.shape for dim in in_dims])
out_dim = Dim.broadcast(in_dims)
if self.transpose_out and self.transpose_out[0]:
out_dim.transpose(self.transpose_out[0])
return [out_dim]
def should_fuse(self, node_set, qrec=None):
for transpose in [self.transpose_in, self.transpose_out]:
if transpose is None:
continue
if any(trans is not None for trans in transpose):
return False
return True
def __str__(self):
return "{} {} {}".format(self.op_name, Transposable.__str__(self), self.at_options)
@expression_op(Add)
@cls_op_name('add')
class MatrixAddParameters(MatrixBroadcastedLinearOpParameters):
TEST_MODE = False
def __init__(self, name, *args, **kwargs):
super(MatrixAddParameters, self).__init__(name, *args, **kwargs)
self._force_quantized_index = None
# This property is used in the padded add where the input scaled must always be the idx=0
@property
def force_quantized_index(self):
return self._force_quantized_index
@force_quantized_index.setter
def force_quantized_index(self, val):
self._force_quantized_index = val
def should_fuse(self, node_set, qrec=None):
# add should fuse into an expression if there are several adds or the input
# shapes don't match since we don't have broadcasted kernels in the AT gens
if self.TEST_MODE:
return True
return super().should_fuse(node_set, qrec=qrec) and (len(node_set) > 1 or self.in_dims[0].layout_shape != self.in_dims[1].layout_shape)
@expression_op(Mul)
@cls_op_name('mul')
class MatrixMulParameters(MatrixBroadcastedLinearOpParameters):
pass
@cls_op_name('sub')
@expression_op(Sub)
class MatrixSubParameters(MatrixBroadcastedLinearOpParameters):
pass
@cls_op_name('div')
@expression_op(Div)
class MatrixDivParameters(MatrixBroadcastedLinearOpParameters):
pass
@cls_op_name('matmul')
class MatMulOpParameters(Transposable):
def __init__(self, name, *args, **kwargs):
super(MatMulOpParameters, self).__init__(name, *args, **kwargs)
self.at_options.valid_options['PARALLELFEATURES'] = int
self.at_options.valid_options['TILEORIENTATION'] = int
# axes of extra channel dimension on either operand
@property
def can_equalize(self):
return False
def get_parameter_size(self):
return 0
def compute_load(self):
return self.out_dims[0].size() * 2
def get_output_size(self, in_dims):
if self.transpose_in:
in_dims = [dim.calc_transpose(trans) if trans is not None else dim
for dim, trans in zip(in_dims, self.transpose_in)]
x_shape = list(in_dims[0].shape).copy()
y_shape = list(in_dims[1].shape).copy()
if len(x_shape) == 1:
x_shape = [1] + x_shape
remove_first = True
else:
remove_first = False
if len(y_shape) == 1:
y_shape = y_shape + [1]
remove_last = True
else:
remove_last = False
x_chans = x_shape[:-2:]
y_chans = y_shape[:-2:]
out_chans = Dim.npbroadcast([x_chans, y_chans])
x = [] if remove_first else [x_shape[-2]]
y = [] if remove_last else [y_shape[-1]]
out_dim = Dim.unnamed(out_chans + x + y)
if self.transpose_out and self.transpose_out[0]:
out_dim.transpose(self.transpose_out[0])
return [out_dim]
def __str__(self):
return "{} {} {}".format(self.op_name, Transposable.__str__(self), self.at_options)
|
"""
.. module: lemur.authorizations.models
:platform: unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Netflix Secops <secops@netflix.com>
"""
from sqlalchemy import Column, Integer, String
from sqlalchemy_utils import JSONType
from lemur.database import db
from lemur.plugins.base import plugins
class Authorization(db.Model):
__tablename__ = "pending_dns_authorizations"
id = Column(Integer, primary_key=True, autoincrement=True)
account_number = Column(String(128))
domains = Column(JSONType)
dns_provider_type = Column(String(128))
options = Column(JSONType)
@property
def plugin(self):
return plugins.get(self.plugin_name)
def __repr__(self):
return "Authorization(id={id})".format(id=self.id)
def __init__(self, account_number, domains, dns_provider_type, options=None):
self.account_number = account_number
self.domains = domains
self.dns_provider_type = dns_provider_type
self.options = options
|
# Generated by Django 3.0.5 on 2020-05-20 00:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('responder', '0002_responder_location'),
('reporter', '0015_auto_20200520_0035'),
]
operations = [
migrations.RenameModel(
old_name='IncidentLocation',
new_name='Place',
),
]
|
from aiohttp import web
async def json(data, status=200, headers=None):
response = web.json_response(data, status=status, headers=headers, content_type='application/json')
return response
async def raw_json(data, status=200, headers=None):
response = web.Response(text=data, status=status, headers=headers, content_type='application/json')
return response
async def raw(data, status=200, headers=None):
response = web.Response(text=data, status=status, headers=headers, content_type='text/plain')
return response
async def stream():
return web.StreamResponse(
status=200,
reason='OK',
headers={'Content-Type': 'text/plain'},
)
|
### This program has been is the manifestation of inspiration in some sense ###
print("Welcome to \'GUESS WHAT?!\', version 1.1\n")
guess = int(input("Please enter your \'GUESS WHAT?!\' value: "))
if(guess%2 == 0):
print("\nEven Steven")
else:
print("\nOdd Nod")
|
import random
# |>
# #=====================================#
# | Python Browser Compatibility Layer \
# \ Copyright 2016 © Sebastian Silva |
# #====================================#
# | <sebastian@fuentelibre.org> \
# \ Choose your license: BSD2 Apache2 |
# +---------------------------------+
#
# Compatibility artifacts
from compat import _print as ՐՏ_print
from compat import stdlib
from compat import _new
width, height = 800, 300
#
# Game objects
#
class Bola:
def __init__(self, director):
self.director = director
self.to_delete = False
self.sprite = director.game.circle(10, colors.vibe_light)
self.sprite.x = width / 2
self.sprite.y = height / 2
self.sprite.vy = random.choice([-5, +5])
self.sprite.vx = random.choice([-1, +1])
self.recolor()
def recolor(self):
self.sprite.fillStyle = random.choice([colors.vibe_light, colors.vibe,
colors.mute, colors.mute_light])
def destroy(self):
self.to_delete = True
self.sprite.visible = False
self.director.game.remove(self.sprite)
self.sprite.destroy()
def play(self):
if self.sprite.visible:
if self.sprite.y > height - self.sprite.height:
self.sprite.vy *= -1
if self.sprite.x > width - self.sprite.width:
# self.sprite.vx *= -1
self.destroy()
return
if self.sprite.y < 0:
self.sprite.vy *= -1
if self.sprite.x < 0:
# self.sprite.vx *= -1
self.destroy()
return
self.director.game.move(self.sprite)
#
# Scene Director
#
class Director:
def __init__(self):
self.game = hexi(width, height, self.setup)
self.game.backgroundColor = "ffffff"
self.game.fps = 25
self.tick = False
self.actors = []
def setup(self):
self.game.state = self.play
# self.game.pointer.tap = self.make_bola
if self.tick is False:
self.tick = window.setInterval(self.make_bola, 250)
def recolor(self):
styles = document.styleSheets[document.styleSheets.length - 1]
styles.insertRule("#__terminal__ { color: " + colors.vibe_light + " }", 0)
self.game.backgroundColor = colors.mute_dark
for actor in self.actors:
actor.recolor()
self.rescale()
def make_bola(self):
self.actors.append(Bola(self))
def play(self):
if self.bgcolor != colors.mute_dark:
self.bgcolor = colors.mute_dark
self.recolor()
for index in range(len(self.actors)):
actor = self.actors[index]
if actor.to_delete is False:
actor.play()
elif actor.to_delete is True:
self.actors.pop(index)
def pause(self):
if self.tick:
window.clearInterval(self.tick)
self.tick = False
self.game.pause()
def resume(self):
if not self.tick:
self.tick = window.setInterval(self.make_bola, 250)
self.game.resume()
def rescale(self):
self.scale = self.game.scaleToWindow(colors.vibe_dark)
#
# Interface to Vibrant.js
#
class Palette:
def __init__(self, asset):
if asset:
v = _new(Vibrant, asset)
if v:
v.getPalette(self.parse)
self.asset = asset
self.vibe = "#335533"
self.vibe_light = "#656565"
self.vibe_dark = "#0f1f0f"
self.mute = "#111111"
self.mute_light = "#333333"
self.mute_dark = "#222222"
def parse(self, err, palette=""):
self.palette = palette
if palette:
self.vibe = palette.Vibrant.getHex()
self.vibe_light = palette.LightVibrant.getHex()
self.vibe_dark = palette.DarkVibrant.getHex()
self.mute = palette.Muted.getHex()
self.mute_light = palette.LightMuted.getHex()
self.mute_dark = palette.DarkMuted.getHex()
# Entry point
def main():
if window.educajuego:
return
if window.transpiler=='Transcrypt':
colors = Palette('docs/images/monk_transcribing_logo.png')
elif window.transpiler=='Rapydscript':
colors = Palette('docs/images/rs_logo_tiny.png')
else:
colors = Palette()
educajuego = Director()
educajuego.game.start()
window.onblur = educajuego.pause
window.onfocus = educajuego.resume
window.onresize = educajuego.rescale
window.colors = colors
window.educajuego = educajuego
# This is defined in common.py which is loaded
# from compat module differently for each compiler.
window.start_ide()
main()
|
#!/usr/bin/env python
#
# pytrader.py
#
#
# Project page
# https://github.com/jmkangkr/pyTrader
#
# To clone the project
# git clone https://github.com/jmkangkr/pyTrader.git
#
# Please send bugs to
# Jaemin Kang
# jmkangkr@gmail.com
#
import win32com.client
import pythoncom
import getpass
from queue import Queue, Empty
import logging
import sys
import datetime
import os
import sqlite3
import time
VERSION = (0, 1)
RES_DIRECTORY = "C:\\eBEST\\xingAPI\\Res"
NO_OCCURS = 0
logger = None
class Escape(Exception):
pass
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def setup_logger():
global logger
logger = logging.getLogger()
formatter = logging.Formatter(fmt='%(asctime)s (%(levelname)5s) %(message)s', datefmt='%Y%m%d %H:%M:%S')
file_handler = logging.FileHandler('{}.log'.format(os.path.splitext(sys.argv[0])[0]))
stream_handler = logging.StreamHandler()
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
def valid_date(s):
try:
date_parsed = datetime.datetime.strptime(s, "%Y%m%d").date()
except ValueError:
return None
return date_parsed
def simple_encode(key, clear):
enc = []
for i in range(len(clear)):
key_c = key[i % len(key)]
enc_c = chr((ord(clear[i]) + ord(key_c)) % 256)
enc.append(enc_c)
encoded = "".join(enc)
return encoded
def simple_decode(key, enc):
dec = []
for i in range(len(enc)):
key_c = key[i % len(key)]
dec_c = chr((256 + ord(enc[i]) - ord(key_c)) % 256)
dec.append(dec_c)
return "".join(dec)
def encrypt_login_information():
(user_id, user_ps, user_pw) = get_login_information()
key = getpass.getpass("Enter encryption key: ")
login_string = '\t'.join((user_id, user_ps, user_pw))
encoded = simple_encode(key, login_string)
f = open('ud', "wb")
f.write(encoded.encode('utf-8'))
f.close()
def get_login_information():
user_id = input("Enter user id: ")
user_ps = getpass.getpass("Enter password for {}: ".format(user_id))
user_pw = getpass.getpass("Enter password for certificate: ")
return (user_id, user_ps, user_pw)
def preprocess_options():
start_date = None
end_date = None
logging_level = logging.ERROR
help = """\
usage: {} [-h] [-v] [-s] start_date [end_date]
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-s, --login-setup creates user login data for easy login
-lc, -le, -lw, -li, -ld logging level (CRITICAL, ERROR, WARNING, INFO, DEBUG)
start_date trading start date (format YYYYMMDD)
end_date trading end date (format YYYYMMDD)\
""".format(sys.argv[0])
for option in sys.argv[1:]:
if option == '--help' or option == '-h':
print(help)
exit(0)
elif option == '--version' or option == '-v':
print("""\
{} version {}\
""".format(sys.argv[0], '.'.join(map(str, VERSION))))
exit(0)
elif option == '--login-setup' or option == '-s':
encrypt_login_information()
print("Login data created")
exit(0)
elif option == '-lc':
logging_level = logging.CRITICAL
elif option == '-le':
logging_level = logging.ERROR
elif option == '-lw':
logging_level = logging.WARNING
elif option == '-li':
logging_level = logging.INFO
elif option == '-ld':
logging_level = logging.DEBUG
else:
date_parsed = valid_date(option)
if date_parsed:
if start_date:
end_date = date_parsed
else:
start_date = date_parsed
else:
print("Not a valid date format.")
exit(0)
if not start_date or not end_date:
print(help)
exit(0)
logger.setLevel(logging_level)
return (start_date, end_date)
class Logger(object):
CRITICAL = logging.CRITICAL
ERROR = logging.ERROR
WARN = logging.WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
def __init__(self):
super(Logger, self).__init__()
def log(self, level, message):
logger.log(level, '{:<18s}: {}'.format(self.__class__.__name__, message))
class XAScheduler(Logger):
__message_queue = Queue()
__timers = []
def __init__(self):
super(XAScheduler, self).__init__()
self.__runnables = []
def registerRunnable(self, runnable):
self.log(Logger.DEBUG, 'Registering runnable: {}'.format(runnable))
self.__runnables.append(runnable)
@classmethod
def registerTimer(cls, runnable, parameter, seconds):
runnable.log(Logger.DEBUG, 'Registering {} seconds timer'.format(seconds))
XAScheduler.__timers.append((time.time(), runnable, parameter, seconds))
@classmethod
def sendMessage(cls, target, message, outparam, inparam, sender):
sender.log(Logger.DEBUG, 'MSG Snd: to({}), msg({})'.format(target.__class__.__name__, message))
XAScheduler.__message_queue.put((target, message, outparam, inparam, sender))
def run(self):
try:
while len(self.__runnables):
for runnable in list(self.__runnables):
if runnable.state == XARunnable.STAT_INIT:
self.log(Logger.DEBUG, 'MSG Rev: target({}), msg({})'.format(runnable.__class__.__name__, XARunnable.MSG_STARTED))
runnable.onMessage(XARunnable.MSG_STARTED, None, None, self)
runnable.state = XARunnable.STAT_RUNNING
elif runnable.state == XARunnable.STAT_PAUSED:
self.log(Logger.DEBUG, 'MSG Rev: target({}), msg({})'.format(runnable.__class__.__name__, XARunnable.MSG_PAUSED))
runnable.onMessage(XARunnable.MSG_PAUSED, None, None, self)
elif runnable.state == XARunnable.STAT_STOPPED:
self.log(Logger.DEBUG, 'MSG Rev: target({}), msg({})'.format(runnable.__class__.__name__, XARunnable.MSG_STOPPED))
runnable.onMessage(XARunnable.MSG_STOPPED, None, None, self)
runnable.state = XARunnable.STAT_DEAD
self.__runnables.remove(runnable)
for timer in list(XAScheduler.__timers):
(trigger, target, parameter, seconds) = timer
if seconds <= (time.time() - trigger):
XAScheduler.__timers.remove(timer)
XAScheduler.sendMessage(target, XARunnable.MSG_TIMER, None, parameter, self)
try:
(target, message, outparam, inparam, sender) = XAScheduler.__message_queue.get(False)
self.log(Logger.DEBUG, 'MSG Rev: to({}), msg({})'.format(target.__class__.__name__, message))
target.onMessage(message, outparam, inparam, sender)
except Empty:
pythoncom.PumpWaitingMessages()
except KeyboardInterrupt:
exit(0)
class XARunnable(Logger):
STAT_INIT = 0
STAT_RUNNING = 1
STAT_PAUSED = 2
STAT_STOPPED = 3
STAT_DEAD = 4
MSG_STARTED = 'MSG_STARTED'
MSG_PAUSED = 'MSG_PAUSED'
MSG_STOPPED = 'MSG_STOPPED'
MSG_TIMER = 'MSG_TIMER'
def __init__(self):
super(XARunnable, self).__init__()
self._state = XARunnable.STAT_INIT
def sendMessage(self, target, message, outparam, inparam):
XAScheduler.sendMessage(target, message, outparam, inparam, self)
def sleep(self, seconds, param):
XAScheduler.registerTimer(self, param, seconds)
def onMessage(self, message, outparam, inparam, sender):
raise NotImplementedError
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
class XASessionEvents(Logger):
MSG_LOGGED_ON = 'MSG_LOGGED_ON'
MSG_LOGGED_OUT = 'MSG_LOGGED_OUT'
MSG_DISCONNECTED = 'MSG_DISCONNECTED'
def __init__(self):
super(XASessionEvents, self).__init__()
self.__listener = None
def postInitialize(self, listener):
self.__listener = listener
def OnLogin(self, errorCode, errorDesc):
self.log(Logger.DEBUG, 'XASessionEvents:OnLogin')
if errorCode != '0000':
self.log(Logger.ERROR, "Login failed: {}".format(errorDesc))
XAScheduler.sendMessage(self.__listener, XASessionEvents.MSG_LOGGED_ON, errorCode == '0000', None, self)
def OnLogout(self):
self.log(Logger.DEBUG, 'XASessionEvents:OnLogout')
XAScheduler.sendMessage(self.__listener, XASessionEvents.MSG_LOGGED_OUT, None, None, self)
def OnDisconnect(self):
self.log(Logger.DEBUG, 'XASessionEvents:OnDisconnect')
XAScheduler.sendMessage(self.__listener, XASessionEvents.MSG_DISCONNECTED, None, None, self)
class Res(object):
def __init__(self, resName, resDescription, resAttributes, resBlocks):
super(Res, self).__init__()
self.__resName = resName
self.__resDescription = resDescription
self.__resAttributes = resAttributes
self.__resBlocks = resBlocks
@property
def resName(self):
return self.__resName
@property
def resDescription(self):
return self.__resDescription
@property
def resAttributes(self):
return self.__resAttributes
@property
def resBlocks(self):
return self.__resBlocks
@resBlocks.setter
def resBlocks(self, blocks):
self.__resBlocks = blocks
def __str__(self):
return '{},{},{}\n{}'.format(self.__resDescription, self.__resName, self.__resAttributes, "".join(map(str, self.__resBlocks)))
class Block(object):
def __init__(self, blockName, blockDescription, blockAttributes, blockVariables):
super(Res.Block, self).__init__()
self.__blockName = blockName
self.__blockDescription = blockDescription
self.__blockAttributes = blockAttributes
self.__blockVariables = blockVariables
@property
def blockName(self):
return self.__blockName
@property
def blockDescription(self):
return self.__blockDescription
@property
def blockAttributes(self):
return self.__blockAttributes
@property
def blockVariables(self):
return self.__blockVariables
@blockVariables.setter
def blockVariables(self, variables):
self.__blockVariables = variables
def __str__(self):
return '\t{},{},{}\n{}\n'.format(self.__blockName, self.__blockDescription, self.__blockAttributes, "\n".join(map(str, self.__blockVariables)))
class Variable(object):
def __init__(self, varName, varDescription, varDataType, varDataPrecision):
super(Res.Block.Variable, self).__init__()
self.__varName = varName
self.__varDescription = varDescription
self.__varDataType = varDataType
self.__varDataPrecision = varDataPrecision
@property
def varName(self):
return self.__varName
@property
def varDescription(self):
return self.__varDescription
@property
def varDataType(self):
return self.__varDataType
@property
def varDataPrecision(self):
return self.__varDataPrecision
def __str__(self):
return '\t\t{},{},{},{}'.format(self.__varDescription, self.__varName, self.__varDataType, self.__varDataPrecision)
class XAResResources(Logger, metaclass=Singleton):
__res = {}
def __init__(self):
super(XAResResources, self).__init__()
def inBlocksOf(self, baseName):
return self.__blocksOf(baseName, 'input')
def outBlocksOf(self, baseName):
return self.__blocksOf(baseName, 'output')
def __blocksOf(self, baseName, attribute):
if not baseName in XAResResources.__res:
self.__parseResFile(baseName)
res = XAResResources.__res[baseName]
blocks = [block for block in res.resBlocks if attribute in block.blockAttributes]
return blocks
def block(self, blockName):
baseName = self.baseNameOf(blockName)
res = XAResResources.__res[baseName]
for block in res.resBlocks:
if block.blockName == blockName:
return block
raise ValueError
def baseNameOf(self, blockName):
baseName = blockName.split('InBlock')[0]
baseName = baseName.split('OutBlock')[0]
return baseName
def resFileNameOf(self, baseName):
return '{}.res'.format(baseName)
def __parseResFile(self, baseName):
resFilePath = os.path.join(RES_DIRECTORY, self.resFileNameOf(baseName))
with open(resFilePath, 'r') as resFileHandle:
rawLines = resFileHandle.readlines()
lines = [line.strip().rstrip(';') for line in rawLines if line.strip().rstrip(';')]
blocks = []
res = None
for i, line in enumerate(lines):
marker = line.split(',')[0]
if marker == 'BEGIN_DATA_MAP':
tokens = lines[i-1].split(',')
(func, desc, name, *attributes) = tokens
res = Res(name, desc, attributes, None)
elif marker == 'begin':
tokens = lines[i-1].split(',')
(name, desc, *attributes) = tokens
block = Res.Block(name, desc, attributes, None)
blocks.append(block)
beginIndex = i
elif marker == 'end':
variables = []
for lline in lines[beginIndex+1:i]:
tokens = lline.split(',')
(desc, name, name2, type, precision) = tokens
variable = Res.Block.Variable(name, desc, type, precision)
variables.append(variable)
block.blockVariables = variables
elif marker == 'END_DATA_MAP':
res.resBlocks = blocks
break
XAResResources.__res[baseName] = res
print(res)
class XADataset(object):
def __init__(self, xatype, data):
super(XADataset, self).__init__()
res_parser = XAResParser(xatype)
self.__varNames = res_parser.varNames
if len(self.__varNames) != len(data):
raise ValueError
self.__vars = {}
for index, name in enumerate(self.__varNames):
self.__vars[name] = data[index]
def __getitem__(self, index):
return self.__vars[index]
class XAQueryEvents(Logger):
MSG_DATA_RECEIVED = 'MSG_DATA_RECEIVED'
def __init__(self):
super(XAQueryEvents, self).__init__()
self.__listener = None
self.__param = None
def postInitialize(self, listener, param):
self.__listener = listener
self.__param = param
def OnReceiveData(self, szTrCode):
self.log(Logger.DEBUG, "XAQueryEvents:OnReceiveData - szTrCode({})".format(szTrCode))
XAScheduler.sendMessage(self.__listener, XAQueryEvents.MSG_DATA_RECEIVED, None, self.__param, self)
def OnReceiveMessage(self, systemError, messageCode, message):
self.log(Logger.DEBUG, "XAQueryEvents:OnReceiveMessage - systemError({}), messageCode({}), message({})".format(systemError, messageCode, message))
class XAServerTransaction(XARunnable):
__FORCED_DELAY_BETWEEN_REQUESTS = {'t1305' : 1.0}
__timeOfLastRequest = {}
def __init__(self):
super(XAServerTransaction, self).__init__(self)
def request(self, inBlockName, params):
if 'InBlock' in inBlockName:
raise ValueError
resParser = XAResResources()
baseName = resParser.baseNameOf(inBlockName)
block = resParser.block(inBlockName)
if len(block.blockVariables) != len(params):
raise ValueError
xaquery = win32com.client.DispatchWithEvents("XA_DataSet.XAQuery", XAQueryEvents)
xaquery.postInitialize(self, None)
xaquery.LoadFromResFile(os.path.join(RES_DIRECTORY, resParser.resFileNameOf(baseName)))
for index, var in enumerate(block.blockVariables):
xaquery.SetFieldData(inBlockName, var.varName, NO_OCCURS, params[index])
timeToSleep = XAServerTransaction.__FORCED_DELAY_BETWEEN_REQUESTS[baseName] - (time.time() - self.__timeOfLastRequest[baseName])
if timeToSleep > 0:
self.log(Logger.DEBUG, "Delaying request by {} second".format())
time.sleep(timeToSleep)
result = xaquery.Request(0)
self.__timeOfLastRequest[baseName] = time.time()
if result < 0:
self.log(Logger.ERROR, "Request error: {}".format(result))
return False
return True
def onMessage(self, message, outparam, inparam, sender):
if message == XAQueryEvents.MSG_DATA_RECEIVED:
xaquery = sender
class XADataRetrievalDay(XARunnable):
MSG_DATA_RETRIEVED = 'MSG_DATA_RETRIEVED'
TIME_SENTINEL_ZERO = 0.0
T1305_REQUEST_TIME_LIMIT = 1.0
def __init__(self):
super(XADataRetrievalDay, self).__init__()
self.__timeLastRequest = XADataRetrievalDay.TIME_SENTINEL_ZERO
self.__xaQueries = []
def __del__(self):
pass
def __waitAndRequest(self, xaquery):
time_to_sleep = XADataRetrievalDay.T1305_REQUEST_TIME_LIMIT - (time.time() - self.__timeLastRequest)
if time_to_sleep > 0:
self.log(Logger.DEBUG, "Delaying request by {} second".format(time_to_sleep))
time.sleep(time_to_sleep)
result = xaquery.Request(0)
self.__timeLastRequest = time.time()
if result < 0:
self.log(Logger.ERROR, "Request error: {}".format(result))
return False
return True
def retrieve(self, stock, days, callback, param):
self.log(Logger.INFO, 'XADataRetrievalDay:retrieve called')
xaquery = win32com.client.DispatchWithEvents("XA_DataSet.XAQuery", XAQueryEvents)
xaquery.postInitialize(self, (stock, days, callback, param))
xaquery.LoadFromResFile(os.path.join(RES_DIRECTORY, 't1305.res'))
xaquery.SetFieldData('t1305InBlock', 'shcode', NO_OCCURS, stock)
xaquery.SetFieldData('t1305InBlock', 'dwmcode', NO_OCCURS, 1)
xaquery.SetFieldData('t1305InBlock', 'cnt', NO_OCCURS, days)
self.log(Logger.INFO, "Requesting stock {} data for {} days".format(stock, days))
if not self.__waitAndRequest(xaquery):
return False
self.__xaQueries.append(xaquery)
return True
def onMessage(self, message, outparam, inparam, sender):
if message == XAQueryEvents.MSG_DATA_RECEIVED:
(stock, days, callback, param) = inparam
xaquery = sender
datasets = []
for i in range(0, days):
val_date = xaquery.GetFieldData("t1305OutBlock1", "date", i)
val_open = xaquery.GetFieldData("t1305OutBlock1", "open", i)
val_high = xaquery.GetFieldData("t1305OutBlock1", "high", i)
val_low = xaquery.GetFieldData("t1305OutBlock1", "low", i)
val_close = xaquery.GetFieldData("t1305OutBlock1", "close", i)
val_sign = xaquery.GetFieldData("t1305OutBlock1", "sign", i)
val_change = xaquery.GetFieldData("t1305OutBlock1", "change", i)
val_diff = xaquery.GetFieldData("t1305OutBlock1", "diff", i)
val_volume = xaquery.GetFieldData("t1305OutBlock1", "volume", i)
val_diff_vol = xaquery.GetFieldData("t1305OutBlock1", "diff_vol", i)
val_chdegree = xaquery.GetFieldData("t1305OutBlock1", "chdegree", i)
val_sojinrate = xaquery.GetFieldData("t1305OutBlock1", "sojinrate", i)
val_changerate = xaquery.GetFieldData("t1305OutBlock1", "changerate", i)
val_fpvolume = xaquery.GetFieldData("t1305OutBlock1", "fpvolume", i)
val_covolume = xaquery.GetFieldData("t1305OutBlock1", "covolume", i)
val_shcode = xaquery.GetFieldData("t1305OutBlock1", "shcode", i)
val_value = xaquery.GetFieldData("t1305OutBlock1", "value", i)
val_ppvolume = xaquery.GetFieldData("t1305OutBlock1", "ppvolume", i)
val_o_sign = xaquery.GetFieldData("t1305OutBlock1", "o_sign", i)
val_o_change = xaquery.GetFieldData("t1305OutBlock1", "o_change", i)
val_o_diff = xaquery.GetFieldData("t1305OutBlock1", "o_diff", i)
val_h_sign = xaquery.GetFieldData("t1305OutBlock1", "h_sign", i)
val_h_change = xaquery.GetFieldData("t1305OutBlock1", "h_change", i)
val_h_diff = xaquery.GetFieldData("t1305OutBlock1", "h_diff", i)
val_l_sign = xaquery.GetFieldData("t1305OutBlock1", "l_sign", i)
val_l_change = xaquery.GetFieldData("t1305OutBlock1", "l_change", i)
val_l_diff = xaquery.GetFieldData("t1305OutBlock1", "l_diff", i)
val_marketcap = xaquery.GetFieldData("t1305OutBlock1", "marketcap", i)
dataset = XADataset('t1305OutBlock1', (val_date, val_open, val_high, val_low, val_close, val_sign, val_change, val_diff, val_volume, val_diff_vol, val_chdegree, val_sojinrate, val_changerate, val_fpvolume, val_covolume, val_shcode, val_value, val_ppvolume, val_o_sign, val_o_change, val_o_diff, val_h_sign, val_h_change, val_h_diff, val_l_sign, val_l_change, val_l_diff, val_marketcap))
datasets.append(dataset)
self.__xaQueries.remove(sender)
self.sendMessage(callback, XADataRetrievalDay.MSG_DATA_RETRIEVED, datasets, param)
class XADatabaseDay(XARunnable):
MSG_DATABASE_UPDATED = 'MSG_DATABASE_UPDATED'
BEGINNING = datetime.date(2008, 1, 1)
DAYS_IN_WEEK = 7.0
WEEKDAYS_IN_WEEK = 5.0
def __init__(self, stocks):
super(XADatabaseDay, self).__init__()
self.__stocks = stocks
self.__conn = sqlite3.connect('{}.db'.format(os.path.splitext(sys.argv[0])[0]))
self.__cur = self.__conn.cursor()
self.__server = XADataRetrievalDay()
def __del__(self):
self.__conn.close()
def _createDatabase(self):
for stock in self.__stocks:
sqlcommand = "CREATE TABLE IF NOT EXISTS t1305_{} (" \
"date TEXT UNIQUE, "\
"open INTEGER, "\
"high INTEGER, "\
"low INTEGER, "\
"close INTEGER, "\
"sign TEXT, "\
"change INTEGER, "\
"diff REAL, "\
"volume INTEGER, "\
"diff_vol REAL, "\
"chdegree REAL, "\
"sojinrate REAL, "\
"changerate REAL, "\
"fpvolume INTEGER, "\
"covolume INTEGER, "\
"shcode TEXT, "\
"value INTEGER, "\
"ppvolume INTEGER, "\
"o_sign TEXT, "\
"o_change INTEGER, "\
"o_diff REAL, "\
"h_sign TEXT, "\
"h_change INTEGER, "\
"h_diff REAL, "\
"l_sign TEXT, "\
"l_change INTEGER, "\
"l_diff REAL, "\
"marketcap INTEGER "\
")".format(stock)
self.__cur.execute(sqlcommand)
self.__conn.commit()
def updateDatabase(self, callback, param):
self.log(Logger.INFO, "XADatabaseDay:updateDatabase called")
self._createDatabase()
for stock in self.__stocks:
last_data = self.__lastData(stock)
if not last_data:
last_date = XADatabaseDay.BEGINNING
else:
last_date = datetime.datetime.strptime(last_data['date'], "%Y%m%d").date()
days_to_request = int((datetime.date.today() - last_date).days * (XADatabaseDay.WEEKDAYS_IN_WEEK / XADatabaseDay.DAYS_IN_WEEK) + XADatabaseDay.DAYS_IN_WEEK)
self.log(Logger.INFO, "Updating database - stock({}), last date({}), requesting {} days of data".format(stock, last_date, days_to_request))
success = self.__server.retrieve(stock, days_to_request, self, (stock, days_to_request, callback, param))
if not success:
self.log(Logger.ERROR, "Retrieval failed")
def onMessage(self, message, outparam, inparam, sender):
if message == XADataRetrievalDay.MSG_DATA_RETRIEVED:
datasets = outparam
(stock, days_to_request, callback, param) = inparam
insert_count = 0
for dataset in datasets:
date = datetime.datetime.strptime(dataset['date'], "%Y%m%d").date()
if date >= XADatabaseDay.BEGINNING:
sqlcommand = "INSERT OR IGNORE INTO t1305_{} (date, open, high, low, close, sign, change, diff, volume, diff_vol, chdegree, sojinrate, changerate, fpvolume, covolume, shcode, value, ppvolume, o_sign, o_change, o_diff, h_sign, h_change, h_diff, l_sign, l_change, l_diff, marketcap) VALUES ({}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {})".format(stock, dataset['date'], dataset['open'], dataset['high'], dataset['low'], dataset['close'], dataset['sign'], dataset['change'], dataset['diff'], dataset['volume'], dataset['diff_vol'], dataset['chdegree'], dataset['sojinrate'], dataset['changerate'], dataset['fpvolume'], dataset['covolume'], dataset['shcode'], dataset['value'], dataset['ppvolume'], dataset['o_sign'], dataset['o_change'], dataset['o_diff'], dataset['h_sign'], dataset['h_change'], dataset['h_diff'], dataset['l_sign'], dataset['l_change'], dataset['l_diff'], dataset['marketcap'])
self.__cur.execute(sqlcommand)
insert_count += 1
self.log(Logger.INFO, "{} row inserted".format(insert_count))
self.__conn.commit()
self.sendMessage(callback, XADatabaseDay.MSG_DATABASE_UPDATED, None, param)
def __lastData(self, stock):
sqlcommand = "SELECT * FROM t1305_{} ORDER BY date DESC LIMIT 1".format(stock)
self.__cur.execute(sqlcommand)
result = self.__cur.fetchone()
if not result:
return None
return XADataset('t1305OutBlock1', result)
def data(self, stock, date):
date_string = datetime.datetime.strftime(date, "%Y%m%d")
sqlcommand = "SELECT * FROM t1305_{} WHERE date LIKE {}".format(stock, date_string)
self.__cur.execute(sqlcommand)
result = self.__cur.fetchone()
if not result:
return None
return XADataset('t1305OutBlock1', result)
def initFetch(self, stock, start):
self.log(Logger.INFO, 'XADatabaseDay:initFetch called')
date_string = datetime.datetime.strftime(start, "%Y%m%d")
sqlcommand = "SELECT * FROM t1305_{} WHERE date > {} ORDER BY date ASC".format(stock, date_string)
return self.__cur.execute(sqlcommand)
def fetch(self):
self.log(Logger.INFO, 'XADatabaseDay:fetch called')
fetched = self.__cur.fetchone()
if not fetched:
return None
return XADataset('t1305OutBlock1', fetched)
class XADataFeederBase(XARunnable):
def __init__(self):
super(XADataFeederBase, self).__init__()
def startFeed(self):
raise NotImplementedError
def nextFeed(self, callback, param):
raise NotImplementedError
class XADataFeederDay(XADataFeederBase):
MSG_DATA_FED = 'MSG_DATA_FED'
MSG_DATA_FED_END = 'MSG_DATA_FED_END'
def __init__(self, stock, start, end):
super(XADataFeederDay, self).__init__()
self.__stock = stock
self.__start = start
self.__end = end
self.__current = None
self.__database = None
self.__server = None
self.__databaseUpdated = False
def startFeed(self):
self.log(Logger.INFO, "XADataFeederDay:startFeed called")
self.__current = self.__start
self.__database = XADatabaseDay([self.__stock])
self.__server = XADataRetrievalDay()
self.__database.updateDatabase(self, None)
self.__databaseUpdated = False
return self
def nextFeed(self, callback, param):
self.log(Logger.INFO, "XADataFeederDay:nextFeed called")
if not self.__databaseUpdated:
self.sleep(1, (callback, param))
return
if self.__current > self.__end:
self.sendMessage(callback, XADataFeederDay.MSG_DATA_FED_END, False, param)
return
dataset = self.__database.fetch()
if dataset:
date = datetime.datetime.strptime(dataset['date'], "%Y%m%d").date()
self.__current = date + datetime.timedelta(days=1)
if date > self.__end:
self.sendMessage(callback, XADataFeederDay.MSG_DATA_FED_END, False, param)
else:
self.sendMessage(callback, XADataFeederDay.MSG_DATA_FED, dataset, param)
else:
days_to_request = int((datetime.date.today() - self.__current).days * (XADatabaseDay.WEEKDAYS_IN_WEEK / XADatabaseDay.DAYS_IN_WEEK) + XADatabaseDay.DAYS_IN_WEEK)
success = self.__server.retrieve(self.__stock, days_to_request, self, (callback, param))
if not success:
self.log(Logger.ERROR, 'Data feed error')
self.sendMessage(callback, XADataFeederDay.MSG_DATA_FED_END, True, param)
return
def onMessage(self, message, outparam, inparam, sender):
if message == XADatabaseDay.MSG_DATABASE_UPDATED:
self.__databaseUpdated = True
self.__database.initFetch(self.__stock, self.__start)
elif message == XADataRetrievalDay.MSG_DATA_RETRIEVED:
datasets = outparam
(callback, param) = inparam
dataset_found = None
for dataset in reversed(datasets):
date = datetime.datetime.strptime(dataset['date'], "%Y%m%d").date()
if date >= self.__current:
dataset_found = dataset
break
if dataset_found:
date = datetime.datetime.strptime(dataset_found['date'], "%Y%m%d").date()
self.__current = date + datetime.timedelta(days=1)
if date > self.__end:
self.sendMessage(callback, XADataFeederDay.MSG_DATA_FED_END, False, param)
return
self.sendMessage(callback, XADataFeederDay.MSG_DATA_FED, dataset_found, param)
return
self.log(Logger.INFO, "Data is not available. Waiting some time and will try it later. Sleeping.")
self.sleep(3600, (callback, param))
elif message == XARunnable.MSG_TIMER:
(callback, param) = inparam
self.nextFeed(callback, param)
class XAStrategyBase(XARunnable):
def __init__(self, feeder):
super(XAStrategyBase, self).__init__()
self.__xasession = None
self.__feeder = feeder
def onLoggedOn(self):
raise NotImplementedError
def onLoggedOut(self):
raise NotImplementedError
def onDisconnected(self):
raise NotImplementedError
def onBar(self, dataset):
raise NotImplementedError
def onMessage(self, message, outparam, inparam, sender):
if message == XARunnable.MSG_STARTED:
success = self.__login()
if not success:
self.log(Logger.ERROR, "Login request was not made successfully.")
elif message == XARunnable.MSG_PAUSED:
pass
elif message == XARunnable.MSG_STOPPED:
pass
elif message == XASessionEvents.MSG_LOGGED_ON:
success = outparam
if not success:
self.log(Logger.ERROR, "Login was not successful. Try it again.")
self.__login()
return
self.__feeder.startFeed()
self.__feeder.nextFeed(self, None)
self.onLoggedOn()
elif message == XASessionEvents.MSG_LOGGED_OUT:
self.onLoggedOut()
elif message == XASessionEvents.MSG_DISCONNECTED:
self.onDisconnected()
elif message == XADataFeederDay.MSG_DATA_FED:
dataset = outparam
self.onBar(dataset)
self.__feeder.nextFeed(self, None)
elif message == XADataFeederDay.MSG_DATA_FED_END:
error = outparam
if error:
raise AssertionError
self.onBar(None)
return
def __login(self):
server_addr_demo = "demo.ebestsec.co.kr"
server_addr_real = "hts.ebestsec.co.kr"
server_port = 20001
server_type = 0
try:
cipher = open('ud', 'rb')
encoded = cipher.read()
key = getpass.getpass("Enter decryption key: ")
decoded = simple_decode(key, encoded.decode('utf-8'))
(user_id, user_ps, user_pw) = decoded.split('\t')
except IOError:
(user_id, user_ps, user_pw) = get_login_information()
self.__xasession = win32com.client.DispatchWithEvents("XA_Session.XASession", XASessionEvents)
self.__xasession.postInitialize(self)
success = self.__xasession.ConnectServer(server_addr_real, server_port)
if not success:
errorCode = self.__xasession.GetLastError()
errorDesc = self.__xasession.GetErrorMessage(errorCode)
logger.error("Error {}: {}".format(errorCode, errorDesc))
return False
success = self.__xasession.Login(user_id, user_ps, user_pw, server_type, 0)
if not success:
errorCode = self.__xasession.GetLastError()
errorDesc = self.__xasession.GetErrorMessage(errorCode)
logger.error("Error {}: {}".format(errorCode, errorDesc))
return False
return True
class MyStrategy(XAStrategyBase):
def __init__(self, feeder):
super(MyStrategy, self).__init__(feeder)
def onLoggedOn(self):
print('Logged on')
def onLoggedOut(self):
print('Logged out')
def onDisconnected(self):
print('Disconnected')
def onBar(self, dataset):
if not dataset:
print("End of data")
return
print("{} - open({:8}), high({:8}), low({:8}), close({:8}), diff({:3.2f})".format(dataset['date'], dataset['open'], dataset['high'], dataset['low'], dataset['close'], dataset['diff']))
return
def main():
setup_logger()
(start_date, end_date) = preprocess_options()
sched = XAScheduler()
feeder = XADataFeederDay("000150", start_date, end_date)
strategy = MyStrategy(feeder)
sched.registerRunnable(strategy)
sched.run()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os.path import join as join_path
from fuel_plugin_builder.validators.schemas import SchemaV5
from fuel_plugin_builder.validators import ValidatorV4
class ValidatorV5(ValidatorV4):
schema = SchemaV5()
def __init__(self, *args, **kwargs):
super(ValidatorV5, self).__init__(*args, **kwargs)
self.bond_config_path = join_path(self.plugin_path, 'bond_config.yaml')
self.nic_config_path = join_path(self.plugin_path, 'nic_config.yaml')
self.node_config_path = join_path(self.plugin_path, 'node_config.yaml')
@property
def basic_version(self):
return '9.0'
def check_schemas(self):
super(ValidatorV5, self).check_schemas()
self.check_node_attributes_schema()
self.check_interface_attributes_schema(self.bond_config_path)
self.check_interface_attributes_schema(self.nic_config_path)
def check_node_attributes_schema(self):
self.validate_file_by_schema(self.schema.node_attributes_schema,
self.node_config_path,
allow_not_exists=True)
def check_interface_attributes_schema(self, file_path):
self.validate_file_by_schema(self.schema.node_nic_attributes_schema,
file_path,
allow_not_exists=True)
|
# Generated by Django 3.0.5 on 2020-05-21 14:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('messdiener', '0010_auto_20200415_1445'),
]
operations = [
migrations.CreateModel(
name='Plan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dateFrom', models.DateField()),
('dateTo', models.DateField()),
],
),
migrations.AddField(
model_name='mass',
name='plan',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='masses', to='messdiener.Plan'),
),
]
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
inds = torch.LongTensor([0, -1, -2, -3, 1, 0, 3, -2, 2, -3, 0, 1, 3, 2, -1, 0]).view(4, 4)
def hamilton_product(q1, q2):
q_size = q1.size()
# q1 = q1.view(-1, 4)
# q2 = q2.view(-1, 4)
q1_q2_prods = []
for i in range(4):
q2_permute_0 = q2[:, :, np.abs(inds[i][0])]
q2_permute_0 = q2_permute_0 * np.sign(inds[i][0] + 0.01)
q2_permute_1 = q2[:, :, np.abs(inds[i][1])]
q2_permute_1 = q2_permute_1 * np.sign(inds[i][1] + 0.01)
q2_permute_2 = q2[:, :, np.abs(inds[i][2])]
q2_permute_2 = q2_permute_2 * np.sign(inds[i][2] + 0.01)
q2_permute_3 = q2[:, :, np.abs(inds[i][3])]
q2_permute_3 = q2_permute_3 * np.sign(inds[i][3] + 0.01)
q2_permute = torch.stack([q2_permute_0, q2_permute_1, q2_permute_2, q2_permute_3], dim=2)
q1q2_v1 = torch.sum(q1 * q2_permute, dim=2, keepdim=True)
q1_q2_prods.append(q1q2_v1)
# print(q1_q2_prods[0].shape)
q_ham = torch.cat(q1_q2_prods, dim=2)
# q_ham = q_ham.view(q_size)
return q_ham
def quat_conjugate(quat):
# quat = quat.view(-1, 4)
q0 = quat[:, :, 0]
q1 = -1 * quat[:, :, 1]
q2 = -1 * quat[:, :, 2]
q3 = -1 * quat[:, :, 3]
q_conj = torch.stack([q0, q1, q2, q3], dim=2)
return q_conj
def quat_rot_module(points, quats):
quatConjugate = quat_conjugate(quats)
mult = hamilton_product(quats, points)
mult = hamilton_product(mult, quatConjugate)
return mult[:, :, 1:4]
|
import pandas as pd
def __read_excel_file(filepath: str) -> pd.DataFrame:
return pd.read_excel(filepath, index_col=None, header=None)
def __remove_first_n_rows_from_df(df: pd.DataFrame, num_rows: int) -> pd.DataFrame:
return df.iloc[num_rows:]
def __get_col(df: pd.DataFrame, colNum: int):
return df.iloc[:,colNum]
def __lower(series: pd.Series) -> pd.Series:
return series.str.lower()
def mark_attendance(rollcallfile: str, attendeefile: str, sessiondate: str):
rollCallDf: pd.DataFrame = __remove_first_n_rows_from_df(__read_excel_file(rollcallfile), 3)
attendeeDf: pd.DataFrame = __remove_first_n_rows_from_df(__read_excel_file(attendeefile), 5)
roll_num_from_roll_call = __lower(__get_col(rollCallDf, 2))
name_from_attendee_df = __lower(__get_col(attendeeDf, 1))
email_from_attendee_df = __lower(__get_col(attendeeDf, 2))
# If roll number exists either in name or email
rollCallDf[sessiondate] = roll_num_from_roll_call.apply(lambda s: name_from_attendee_df.str.contains(s).any()) | roll_num_from_roll_call.apply(lambda s: email_from_attendee_df.str.contains(s).any())
rollCallDf.to_csv(rollcallfile+"_results")
|
import itertools
import random
import re
import gspread
import gspread.utils as utils
from .test import I18N_STR, GspreadTest
class WorksheetTest(GspreadTest):
"""Test for gspread.Worksheet."""
def setUp(self):
super().setUp()
self.spreadsheet = self.gc.open(self.get_temporary_spreadsheet_title())
# NOTE(msuozzo): Here, a new worksheet is created for each test.
# This was determined to be faster than reusing a single sheet and
# having to clear its contents after each test.
# Basically: Time(add_wks + del_wks) < Time(range + update_cells)
self.sheet = self.spreadsheet.add_worksheet("wksht_test", 20, 20)
def tearDown(self):
self.spreadsheet.del_worksheet(self.sheet)
super().tearDown()
def test_acell(self):
cell = self.sheet.acell("A1")
self.assertTrue(isinstance(cell, gspread.cell.Cell))
def test_cell(self):
cell = self.sheet.cell(1, 1)
self.assertTrue(isinstance(cell, gspread.cell.Cell))
def test_range(self):
cell_range1 = self.sheet.range("A1:A5")
cell_range2 = self.sheet.range(1, 1, 5, 1)
for c1, c2 in zip(cell_range1, cell_range2):
self.assertTrue(isinstance(c1, gspread.cell.Cell))
self.assertTrue(isinstance(c2, gspread.cell.Cell))
self.assertTrue(c1.col == c2.col)
self.assertTrue(c1.row == c2.row)
self.assertTrue(c1.value == c2.value)
def test_update_acell(self):
sg = self._sequence_generator()
value = next(sg)
self.sheet.update_acell("A2", value)
self.assertEqual(self.sheet.acell("A2").value, value)
def test_update_cell(self):
sg = self._sequence_generator()
value = next(sg)
self.sheet.update_cell(1, 2, value)
self.assertEqual(self.sheet.cell(1, 2).value, value)
self.sheet.update_cell(1, 2, 42)
self.assertEqual(self.sheet.cell(1, 2).value, "42")
self.sheet.update_cell(1, 2, "0042")
self.assertEqual(self.sheet.cell(1, 2).value, "42")
self.sheet.update_cell(1, 2, 42.01)
self.assertEqual(self.sheet.cell(1, 2).value, "42.01")
self.sheet.update_cell(1, 2, "Артур")
self.assertEqual(self.sheet.cell(1, 2).value, "Артур")
def test_update_cell_multiline(self):
sg = self._sequence_generator()
value = next(sg)
value = "{}\n{}".format(value, value)
self.sheet.update_cell(1, 2, value)
self.assertEqual(self.sheet.cell(1, 2).value, value)
def test_update_cell_unicode(self):
self.sheet.update_cell(1, 1, I18N_STR)
cell = self.sheet.cell(1, 1)
self.assertEqual(cell.value, I18N_STR)
def test_update_cells(self):
sg = self._sequence_generator()
list_len = 10
value_list = [next(sg) for i in range(list_len)]
# Test multiline
value_list[0] = "{}\n{}".format(value_list[0], value_list[0])
range_label = "A1:A%s" % list_len
cell_list = self.sheet.range(range_label)
for c, v in zip(cell_list, value_list):
c.value = v
self.sheet.update_cells(cell_list)
cell_list = self.sheet.range(range_label)
for c, v in zip(cell_list, value_list):
self.assertEqual(c.value, v)
def test_update_cells_unicode(self):
cell = self.sheet.cell(1, 1)
cell.value = I18N_STR
self.sheet.update_cells([cell])
cell = self.sheet.cell(1, 1)
self.assertEqual(cell.value, I18N_STR)
def test_update_cells_noncontiguous(self):
sg = self._sequence_generator()
num_rows = 6
num_cols = 4
rows = [[next(sg) for j in range(num_cols)] for i in range(num_rows)]
cell_list = self.sheet.range("A1:D6")
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
# Re-fetch cells
cell_list = self.sheet.range("A1:D6")
test_values = [c.value for c in cell_list]
top_left = cell_list[0]
bottom_right = cell_list[-1]
top_left.value = top_left_value = next(sg) + " top_left"
bottom_right.value = bottom_right_value = next(sg) + " bottom_right"
self.sheet.update_cells([top_left, bottom_right])
cell_list = self.sheet.range("A1:D6")
read_values = [c.value for c in cell_list]
test_values[0] = top_left_value
test_values[-1] = bottom_right_value
self.assertEqual(test_values, read_values)
def test_update_cell_objects(self):
test_values = ["cell row 1, col 2", "cell row 2 col 1"]
cell_list = [
gspread.cell.Cell(1, 2, test_values[0]),
gspread.cell.Cell(2, 1, test_values[1]),
]
self.sheet.update_cells(cell_list)
# Re-fetch cells
cell_list = (self.sheet.cell(1, 2), self.sheet.cell(2, 1))
read_values = [c.value for c in cell_list]
self.assertEqual(test_values, read_values)
def test_resize(self):
add_num = 10
new_rows = self.sheet.row_count + add_num
def get_grid_props():
sheets = self.sheet.spreadsheet.fetch_sheet_metadata()["sheets"]
return utils.finditem(
lambda x: x["properties"]["sheetId"] == self.sheet.id, sheets
)["properties"]["gridProperties"]
self.sheet.add_rows(add_num)
grid_props = get_grid_props()
self.assertEqual(grid_props["rowCount"], new_rows)
new_cols = self.sheet.col_count + add_num
self.sheet.add_cols(add_num)
grid_props = get_grid_props()
self.assertEqual(grid_props["columnCount"], new_cols)
new_rows -= add_num
new_cols -= add_num
self.sheet.resize(new_rows, new_cols)
grid_props = get_grid_props()
self.assertEqual(grid_props["rowCount"], new_rows)
self.assertEqual(grid_props["columnCount"], new_cols)
def test_sort(self):
rows = [
["Apple", "2012", "4"],
["Banana", "2013", "3"],
["Canada", "2007", "1"],
["Dinosaur", "2013", "6"],
["Elephant", "2019", "2"],
["Fox", "2077", "5"],
]
self.sheet.resize(6, 3)
cell_list = self.sheet.range("A1:C6")
for c, v in zip(cell_list, itertools.chain(*rows)):
c.value = v
self.sheet.update_cells(cell_list)
specs = [
(3, "asc"),
]
self.sheet.sort(*specs, range="A1:C6")
rows = sorted(rows, key=lambda x: int(x[2]), reverse=False)
self.assertEqual(self.sheet.get_all_values(), rows)
specs = [
(1, "des"),
]
self.sheet.sort(*specs, range="A1:C6")
rows = sorted(rows, key=lambda x: x[0], reverse=True)
self.assertEqual(self.sheet.get_all_values(), rows)
specs = [
(2, "asc"),
(3, "asc"),
]
self.sheet.sort(*specs, range="A1:C6")
rows = sorted(rows, key=lambda x: (x[1], int(x[2])), reverse=False)
self.assertEqual(self.sheet.get_all_values(), rows)
specs = [
(3, "asc"),
]
self.sheet.sort(*specs)
rows = sorted(rows, key=lambda x: int(x[2]), reverse=False)
self.assertEqual(self.sheet.get_all_values(), rows)
specs = [
(3, "des"),
]
self.sheet._properties["gridProperties"]["frozenRowCount"] = 1
self.sheet.sort(*specs)
rows = [rows[0]] + sorted(rows[1:], key=lambda x: int(x[2]), reverse=True)
self.assertEqual(self.sheet.get_all_values(), rows)
def test_freeze(self):
freeze_cols = 1
freeze_rows = 2
def get_grid_props():
sheets = self.sheet.spreadsheet.fetch_sheet_metadata()["sheets"]
return utils.finditem(
lambda x: x["properties"]["sheetId"] == self.sheet.id, sheets
)["properties"]["gridProperties"]
self.sheet.freeze(freeze_rows)
grid_props = get_grid_props()
self.assertEqual(grid_props["frozenRowCount"], freeze_rows)
self.sheet.freeze(cols=freeze_cols)
grid_props = get_grid_props()
self.assertEqual(grid_props["frozenColumnCount"], freeze_cols)
self.sheet.freeze(0, 0)
grid_props = get_grid_props()
self.assertTrue("frozenRowCount" not in grid_props)
self.assertTrue("frozenColumnCount" not in grid_props)
def test_basic_filters(self):
def get_sheet():
sheets = self.sheet.spreadsheet.fetch_sheet_metadata()["sheets"]
return utils.finditem(
lambda x: x["properties"]["sheetId"] == self.sheet.id, sheets
)
def get_basic_filter_range():
return get_sheet()["basicFilter"]["range"]
self.sheet.resize(20, 20)
self.sheet.set_basic_filter()
filter_range = get_basic_filter_range()
self.assertEqual(filter_range["startRowIndex"], 0)
self.assertEqual(filter_range["startColumnIndex"], 0)
self.assertEqual(filter_range["endRowIndex"], 20)
self.assertEqual(filter_range["endColumnIndex"], 20)
self.sheet.set_basic_filter("B1:C2")
filter_range = get_basic_filter_range()
self.assertEqual(filter_range["startRowIndex"], 0)
self.assertEqual(filter_range["startColumnIndex"], 1)
self.assertEqual(filter_range["endRowIndex"], 2)
self.assertEqual(filter_range["endColumnIndex"], 3)
self.sheet.set_basic_filter(1, 2, 2, 3)
filter_range = get_basic_filter_range()
self.assertEqual(filter_range["startRowIndex"], 0)
self.assertEqual(filter_range["startColumnIndex"], 1)
self.assertEqual(filter_range["endRowIndex"], 2)
self.assertEqual(filter_range["endColumnIndex"], 3)
self.sheet.clear_basic_filter()
self.assertTrue("basicFilter" not in get_sheet())
def test_find(self):
sg = self._sequence_generator()
value = next(sg)
self.sheet.update_cell(2, 10, value)
self.sheet.update_cell(2, 11, value)
cell = self.sheet.find(value)
self.assertEqual(cell.value, value)
value2 = next(sg)
value = "{}o_O{}".format(value, value2)
self.sheet.update_cell(2, 11, value)
o_O_re = re.compile("[a-z]_[A-Z]%s" % value2)
cell = self.sheet.find(o_O_re)
self.assertEqual(cell.value, value)
not_found = self.sheet.find("does not exists")
self.assertIs(
not_found, None, "find should return 'None' when value is not found"
)
def test_findall(self):
list_len = 10
range_label = "A1:A%s" % list_len
cell_list = self.sheet.range(range_label)
sg = self._sequence_generator()
value = next(sg)
for c in cell_list:
c.value = value
self.sheet.update_cells(cell_list)
result_list = self.sheet.findall(value)
self.assertEqual(list_len, len(result_list))
for c in result_list:
self.assertEqual(c.value, value)
cell_list = self.sheet.range(range_label)
value = next(sg)
for c in cell_list:
char = chr(random.randrange(ord("a"), ord("z")))
c.value = "{}{}_{}{}".format(c.value, char, char.upper(), value)
self.sheet.update_cells(cell_list)
o_O_re = re.compile("[a-z]_[A-Z]%s" % value)
result_list = self.sheet.findall(o_O_re)
self.assertEqual(list_len, len(result_list))
def test_get_all_values(self):
self.sheet.resize(4, 4)
# put in new values, made from three lists
rows = [
["A1", "B1", "", "D1"],
["", "b2", "", ""],
["", "", "", ""],
["A4", "B4", "", "D4"],
]
cell_list = self.sheet.range("A1:D1")
cell_list.extend(self.sheet.range("A2:D2"))
cell_list.extend(self.sheet.range("A3:D3"))
cell_list.extend(self.sheet.range("A4:D4"))
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
# read values with get_all_values, get a list of lists
read_data = self.sheet.get_all_values()
# values should match with original lists
self.assertEqual(read_data, rows)
def test_get_all_values_title_is_a1_notation(self):
self.sheet.resize(4, 4)
# renames sheet to contain single and double quotes
self.sheet.update_title("D3")
# put in new values, made from three lists
rows = [
["A1", "B1", "", "D1"],
["", "b2", "", ""],
["", "", "", ""],
["A4", "B4", "", "d4"],
]
cell_list = self.sheet.range("A1:D1")
cell_list.extend(self.sheet.range("A2:D2"))
cell_list.extend(self.sheet.range("A3:D3"))
cell_list.extend(self.sheet.range("A4:D4"))
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
# read values with get_all_values, get a list of lists
read_data = self.sheet.get_all_values()
# values should match with original lists
self.assertEqual(read_data, rows)
def test_get_all_records(self):
self.sheet.resize(4, 4)
# put in new values, made from three lists
rows = [
["A1", "B1", "", "D1"],
[1, "b2", 1.45, ""],
["", "", "", ""],
["A4", 0.4, "", 4],
]
cell_list = self.sheet.range("A1:D4")
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
# first, read empty strings to empty strings
read_records = self.sheet.get_all_records()
d0 = dict(zip(rows[0], rows[1]))
d1 = dict(zip(rows[0], rows[2]))
d2 = dict(zip(rows[0], rows[3]))
self.assertEqual(read_records[0], d0)
self.assertEqual(read_records[1], d1)
self.assertEqual(read_records[2], d2)
# then, read empty strings to zeros
read_records = self.sheet.get_all_records(empty2zero=True)
d1 = dict(zip(rows[0], (0, 0, 0, 0)))
self.assertEqual(read_records[1], d1)
# then, read empty strings to None
read_records = self.sheet.get_all_records(default_blank=None)
d1 = dict(zip(rows[0], (None, None, None, None)))
self.assertEqual(read_records[1], d1)
# then, read empty strings to something else
read_records = self.sheet.get_all_records(default_blank="foo")
d1 = dict(zip(rows[0], ("foo", "foo", "foo", "foo")))
self.assertEqual(read_records[1], d1)
def test_get_all_records_different_header(self):
self.sheet.resize(6, 4)
# put in new values, made from three lists
rows = [
["", "", "", ""],
["", "", "", ""],
["A1", "B1", "", "D1"],
[1, "b2", 1.45, ""],
["", "", "", ""],
["A4", 0.4, "", 4],
]
cell_list = self.sheet.range("A1:D6")
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
# first, read empty strings to empty strings
read_records = self.sheet.get_all_records(head=3)
d0 = dict(zip(rows[2], rows[3]))
d1 = dict(zip(rows[2], rows[4]))
d2 = dict(zip(rows[2], rows[5]))
self.assertEqual(read_records[0], d0)
self.assertEqual(read_records[1], d1)
self.assertEqual(read_records[2], d2)
# then, read empty strings to zeros
read_records = self.sheet.get_all_records(empty2zero=True, head=3)
d1 = dict(zip(rows[2], (0, 0, 0, 0)))
self.assertEqual(read_records[1], d1)
# then, read empty strings to None
read_records = self.sheet.get_all_records(default_blank=None, head=3)
d1 = dict(zip(rows[2], (None, None, None, None)))
self.assertEqual(read_records[1], d1)
# then, read empty strings to something else
read_records = self.sheet.get_all_records(default_blank="foo", head=3)
d1 = dict(zip(rows[2], ("foo", "foo", "foo", "foo")))
self.assertEqual(read_records[1], d1)
def test_get_all_records_value_render_options(self):
self.sheet.resize(2, 4)
# put in new values, made from three lists
rows = [
["=4/2", "2020-01-01", "string", 53],
["=3/2", 0.12, "1999-01-02", ""],
]
cell_list = self.sheet.range("A1:D2")
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list, value_input_option="USER_ENTERED")
# default, formatted read
read_records = self.sheet.get_all_records()
expected_keys = ["2", "2020-01-01", "string", "53"]
expected_values = [3 / 2, 0.12, "1999-01-02", ""]
d0 = dict(zip(expected_keys, expected_values))
self.assertEqual(read_records[0], d0)
# unformatted read
read_records = self.sheet.get_all_records(
value_render_option="UNFORMATTED_VALUE"
)
expected_keys = [2, 43831, "string", 53]
expected_values = [3 / 2, 0.12, 36162, ""]
d0 = dict(zip(expected_keys, expected_values))
self.assertEqual(read_records[0], d0)
# formula read
read_records = self.sheet.get_all_records(value_render_option="FORMULA")
expected_keys = ["=4/2", 43831, "string", 53]
expected_values = ["=3/2", 0.12, 36162, ""]
d0 = dict(zip(expected_keys, expected_values))
self.assertEqual(read_records[0], d0)
def test_get_all_records_numericise_unformatted(self):
self.sheet.resize(2, 4)
# put in new values, made from three lists
rows = [
["A", "", "C", "3_1_0"],
["=3/2", 0.12, "", "3_2_1"],
]
cell_list = self.sheet.range("A1:D2")
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list, value_input_option="USER_ENTERED")
read_records = self.sheet.get_all_records(
default_blank="empty",
allow_underscores_in_numeric_literals=True,
value_render_option="UNFORMATTED_VALUE",
)
expected_values = [3 / 2, 0.12, "empty", 321]
d0 = dict(zip(rows[0], expected_values))
self.assertEqual(read_records[0], d0)
def test_append_row(self):
sg = self._sequence_generator()
value_list = [next(sg) for i in range(10)]
self.sheet.append_row(value_list)
read_values = self.sheet.row_values(1)
self.assertEqual(value_list, read_values)
def test_append_row_with_empty_value(self):
sg = self._sequence_generator()
value_list = [next(sg) for i in range(3)]
value_list[1] = "" # Skip one cell to create two "tables" as in #537
self.sheet.append_row(value_list)
# Append it again
self.sheet.append_row(value_list)
# This should produce a shift in rows as in #537
shifted_value_list = ["", ""] + value_list
read_values = self.sheet.row_values(2)
self.assertEqual(shifted_value_list, read_values)
def test_append_row_with_empty_value_and_table_range(self):
sg = self._sequence_generator()
value_list = [next(sg) for i in range(3)]
value_list[1] = "" # Skip one cell to create two "tables" as in #537
self.sheet.append_row(value_list)
# Append it again
self.sheet.append_row(value_list, table_range="A1")
# This should produce no shift in rows
# contrary to test_append_row_with_empty_value
read_values = self.sheet.row_values(2)
self.assertEqual(value_list, read_values)
def test_insert_row(self):
sg = self._sequence_generator()
num_rows = 6
num_cols = 4
rows = [[next(sg) for j in range(num_cols)] for i in range(num_rows)]
cell_list = self.sheet.range("A1:D6")
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
new_row_values = [next(sg) for i in range(num_cols + 4)]
self.sheet.insert_row(new_row_values, 2)
read_values = self.sheet.row_values(2)
self.assertEqual(new_row_values, read_values)
formula = "=1+1"
self.sheet.update_acell("B2", formula)
values = [next(sg) for i in range(num_cols + 4)]
self.sheet.insert_row(values, 1)
b3 = self.sheet.acell("B3", value_render_option="FORMULA")
self.assertEqual(b3.value, formula)
def test_delete_row(self):
sg = self._sequence_generator()
for i in range(5):
value_list = [next(sg) for i in range(10)]
self.sheet.append_row(value_list)
prev_row = self.sheet.row_values(1)
next_row = self.sheet.row_values(3)
self.sheet.delete_row(2)
self.assertEqual(self.sheet.row_values(1), prev_row)
self.assertEqual(self.sheet.row_values(2), next_row)
def test_clear(self):
rows = [
["", "", "", ""],
["", "", "", ""],
["A1", "B1", "", "D1"],
[1, "b2", 1.45, ""],
["", "", "", ""],
["A4", 0.4, "", 4],
]
cell_list = self.sheet.range("A1:D6")
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
self.sheet.clear()
self.assertEqual(self.sheet.get_all_values(), [])
def test_update_and_get(self):
values = [
["A1", "B1", "", "D1"],
["", "b2", "", ""],
["", "", "", ""],
["A4", "B4", "", "D4"],
]
self.sheet.update("A1", values)
read_data = self.sheet.get("A1:D4")
self.assertEqual(
read_data, [["A1", "B1", "", "D1"], ["", "b2"], [], ["A4", "B4", "", "D4"]]
)
def test_batch_get(self):
values = [
["A1", "B1", "", "D1"],
["", "b2", "", ""],
["", "", "", ""],
["A4", "B4", "", "D4"],
]
self.sheet.update("A1", values)
value_ranges = self.sheet.batch_get(["A1:B1", "B4:D4"])
self.assertEqual(value_ranges, [[["A1", "B1"]], [["B4", "", "D4"]]])
self.assertEqual(value_ranges[0].range, "wksht_test!A1:B1")
self.assertEqual(value_ranges[1].range, "wksht_test!B4:D4")
self.assertEqual(value_ranges[0].first(), "A1")
def test_batch_update(self):
self.sheet.batch_update(
[
{
"range": "A1:D1",
"values": [["A1", "B1", "", "D1"]],
},
{
"range": "A4:D4",
"values": [["A4", "B4", "", "D4"]],
},
]
)
data = self.sheet.get("A1:D4")
self.assertEqual(data, [["A1", "B1", "", "D1"], [], [], ["A4", "B4", "", "D4"]])
def test_format(self):
cell_format = {
"backgroundColor": {"green": 1, "blue": 1},
"horizontalAlignment": "CENTER",
"textFormat": {
"foregroundColor": {
"red": 1,
"green": 1,
},
"fontSize": 12,
"bold": True,
},
}
self.maxDiff = None
self.sheet.format("A2:B2", cell_format)
data = self.spreadsheet._spreadsheets_get(
{
"includeGridData": False,
"ranges": ["wksht_test!A2"],
"fields": "sheets.data.rowData.values.userEnteredFormat",
}
)
uef = data["sheets"][0]["data"][0]["rowData"][0]["values"][0][
"userEnteredFormat"
]
del uef["backgroundColorStyle"]
del uef["textFormat"]["foregroundColorStyle"]
self.assertEqual(uef, cell_format)
def test_reorder_worksheets(self):
w = self.spreadsheet.worksheets()
w.reverse()
self.spreadsheet.reorder_worksheets(w)
self.assertEqual(
[i.id for i in w], [i.id for i in self.spreadsheet.worksheets()]
)
def test_worksheet_update_index(self):
w = self.spreadsheet.worksheets()
last_sheet = w[-1]
last_sheet.update_index(0)
w = self.spreadsheet.worksheets()
self.assertEqual(w[0].id, last_sheet.id)
def test_worksheet_notes(self):
w = self.spreadsheet.worksheets()[0]
# will trigger a Exception in case of any issue
self.assertEqual(w.get_note("A1"), "")
test_note_string = "This is a test note"
w.insert_note("A1", test_note_string)
self.assertEqual(w.get_note("A1"), test_note_string)
w.clear_note("A1")
self.assertEqual(w.get_note("A1"), "")
with self.assertRaises(TypeError) as _:
w.insert_note("A1", 42)
w.insert_note("A1", ["asddf", "asdfqwebn"])
w.insert_note("A1", w)
def test_batch_clear(self):
w = self.spreadsheet.sheet1
# make sure cells are empty
self.assertListEqual(w.get_values("A1:B1"), [])
self.assertListEqual(w.get_values("C2:E2"), [])
# fill the cells
w.update("A1:B1", [["12345", "ThisIsText"]])
w.update("C2:E2", [["5678", "Second", "Text"]])
# confirm the cells are not empty
self.assertNotEqual(w.get_values("A1:B1"), [])
self.assertNotEqual(w.get_values("C2:E2"), [])
# empty both cell range at once
w.batch_clear(["A1:B1", "C2:E2"])
# confirm cells are empty
# make sure cells are empty
self.assertListEqual(w.get_values("A1:B1"), [])
self.assertListEqual(w.get_values("C2:E2"), [])
|
from sys import version_info
if version_info[0] == 2:
from urllib import quote, quote_plus, unquote, urlencode
from urlparse import parse_qs, parse_qsl, urljoin, urlparse, urlunparse, ParseResult
else:
from urllib.parse import (
parse_qs,
parse_qsl,
quote,
quote_plus,
unquote,
urlencode,
urljoin,
urlparse,
urlunparse,
ParseResult
)
__export__ = (parse_qs,
parse_qsl,
quote,
quote_plus,
unquote,
urlencode,
urljoin,
urlparse,
urlunparse,
ParseResult)
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from app_metrics.models import (
Metric,
MetricSet,
MetricItem,
MetricDay,
MetricWeek,
MetricMonth,
MetricYear,
)
class MetricAdmin(admin.ModelAdmin):
list_display = ("__str__", "slug", "num")
list_filter = ["metric__name"]
def slug(self, obj):
return obj.metric.slug
admin.site.register(Metric)
admin.site.register(MetricSet)
admin.site.register(MetricDay, MetricAdmin)
admin.site.register(MetricWeek, MetricAdmin)
admin.site.register(MetricMonth, MetricAdmin)
admin.site.register(MetricYear, MetricAdmin)
admin.site.register(MetricItem, MetricAdmin)
|
from sklearn.datasets import make_classification
from featureband.feature_band import FeatureBand
from featureband.util.metrics_util import load_clf
FINAL_CLASSIFIER = "knn" # ["knn", "logistic", "decision_tree"]
selected_num = 10
x, y = make_classification(n_samples=10000, n_features=20, n_informative=10,
n_redundant=10, n_classes=2, random_state=42)
clf = load_clf(FINAL_CLASSIFIER)
fb = FeatureBand(r0=80, n0=1000, clf=clf, k=selected_num, population_size=5, max_iter=20)
fb.fit(x, y, "accuracy")
fb.transform(x)
print("clf:", FINAL_CLASSIFIER, "selected_num:", selected_num)
|
default_app_config = 'tg_utils.health_check.checks.phantomjs.apps.HealthCheckConfig'
|
from docx import Document
from typing import List
from .IngestorInterface import IngestorInterface
from .QuoteModel import QuoteModel
class DocxIngestor(IngestorInterface):
"""
A class that inherits from IngestorInterface and creates a list of quotes.
...
Attributes
----------
path : str
file path
Methods
-------
parse(path):
Receives the quote file path and returns list of QuoteModel objects that contains the quote body and author.
"""
file_types = ['docx']
@classmethod
def parse(cls, path: str) -> List[QuoteModel]:
"""
Parses an accepted file and returns a list of QuoteModel objects that contains the quote body and author.
Parameters
----------
path : str
file path
"""
if not cls.can_ingest(path):
raise Exception(f'Cannot ingest {path}')
quotes = []
doc = Document(path)
for para in doc.paragraphs:
if para.text != "":
parse = para.text.split(' - ')
new_quote = QuoteModel(str(parse[0]), str(parse[1]))
quotes.append(new_quote)
return quotes
|
retention_time_pipeline_parameters = {
"model_params": {"seq_length": 30},
"data_params": {
"seq_length": 30,
},
"trained_model_path": "../pretrained_models/retention_time/example_rtmodel/",
"trained_model_zipfile_name": "rtmodel.zip",
"trained_model_stats": [0.0, 1.0],
}
retention_time_pipeline_parameters.update(
{
"trained_model_url":
"https://raw.githubusercontent.com/wilhelm-lab/dlomix/develop" + retention_time_pipeline_parameters['trained_model_path'].strip("..") + retention_time_pipeline_parameters['trained_model_zipfile_name']
}
)
ALPHABET_UNMOD = {
"A": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"G": 6,
"H": 7,
"I": 8,
"K": 9,
"L": 10,
"M": 11,
"N": 12,
"P": 13,
"Q": 14,
"R": 15,
"S": 16,
"T": 17,
"V": 18,
"W": 19,
"Y": 20,
}
|
import pytest
from models.domain.resource_template import ResourceTemplate, ResourceType
from models.schemas.workspace_template import WorkspaceTemplateInCreate
@pytest.fixture
def input_workspace_template():
return WorkspaceTemplateInCreate(
name="my-tre-workspace",
version="0.0.1",
current=True,
json_schema={
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json",
"type": "object",
"title": "My Workspace Template Custom Parameters",
"description": "These parameters are specific to my workspace template",
"required": [],
"properties": {}
})
@pytest.fixture
def basic_resource_template(input_workspace_template):
return ResourceTemplate(
id="1234-5678",
name=input_workspace_template.name,
description=input_workspace_template.json_schema["description"],
version=input_workspace_template.name,
resourceType=ResourceType.Workspace,
current=True,
required=input_workspace_template.json_schema["required"],
properties=input_workspace_template.json_schema["properties"]
)
|
from sqlalchemy import Column, Integer, ForeignKey, UniqueConstraint, func, text, DECIMAL
from sqlalchemy.dialects.mysql import JSON, TIMESTAMP, VARCHAR, TEXT, TINYINT
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
created_at_default = func.current_timestamp()
updated_at_default = func.current_timestamp()
class UserMessage(Base):
__tablename__ = "user_message"
id = Column("row_id", Integer, primary_key=True, autoincrement=True)
source = Column("source", VARCHAR(128), nullable=False)
name = Column("name", VARCHAR(128), nullable=True)
address = Column("address", VARCHAR(128), nullable=True)
email = Column("email", VARCHAR(128), nullable=True)
phone_no = Column("phone_no", VARCHAR(64), nullable=True)
message_type = Column("message_type", VARCHAR(64), nullable=False)
subject = Column("subject", VARCHAR(512), nullable=True)
message = Column("message", TEXT, nullable=False)
created_at = Column("created_at", TIMESTAMP, server_default=created_at_default, nullable=False)
updated_at = Column("updated_at", TIMESTAMP, server_default=updated_at_default, nullable=False)
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers common to multiple models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensor2tensor import expert_utils as eu
import tensorflow as tf
from tensorflow.python.framework import function
# This is a global setting. When turned off, no @function.Defun is used.
allow_defun = True
def saturating_sigmoid(x):
"""Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
with tf.name_scope("saturating_sigmoid", [x]):
y = tf.sigmoid(x)
return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1))
def hard_sigmoid(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
x_shifted = 0.5 * x + 0.5
return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost
def hard_tanh(x, saturation_limit=0.9):
saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost
def inverse_exp_decay(max_step, min_value=0.01):
"""Inverse-decay exponentially from 0.01 to 1.0 reached at max_step."""
inv_base = tf.exp(tf.log(min_value) / float(max_step))
step = tf.to_float(tf.contrib.framework.get_global_step())
return inv_base**tf.maximum(float(max_step) - step, 0.0)
def standardize_images(x):
"""Image standardization on batches (tf.image.per_image_standardization)."""
with tf.name_scope("standardize_images", [x]):
x = tf.to_float(x)
x_mean = tf.reduce_mean(x, axis=[1, 2, 3], keep_dims=True)
x_variance = tf.reduce_mean(
tf.square(x - x_mean), axis=[1, 2, 3], keep_dims=True)
num_pixels = tf.to_float(tf.shape(x)[1] * tf.shape(x)[2] * 3)
x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))
# TODO(lukaszkaiser): remove hack below, needed for greedy decoding for now.
if x.shape and len(x.shape) == 4 and x.shape[3] == 1:
x = tf.concat([x, x, x], axis=3) # Not used, just a dead tf.cond branch.
x.set_shape([None, None, None, 3])
return x
def image_augmentation(images, do_colors=False):
"""Image augmentation: cropping, flipping, and color transforms."""
images = tf.random_crop(images, [299, 299, 3])
images = tf.image.random_flip_left_right(images)
if do_colors: # More augmentation, but might be slow.
images = tf.image.random_brightness(images, max_delta=32. / 255.)
images = tf.image.random_saturation(images, lower=0.5, upper=1.5)
images = tf.image.random_hue(images, max_delta=0.2)
images = tf.image.random_contrast(images, lower=0.5, upper=1.5)
return images
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = tf.shape(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
# Preserve static shapes when available.
xshape_static = x.get_shape()
result.set_shape([xshape_static[0], None, xshape_static[3]])
return result
def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0):
"""Embed x of type int64 into dense vectors, reducing to max 4 dimensions."""
with tf.variable_scope(
name, default_name="embedding", values=[x], reuse=reuse):
embedding_var = tf.get_variable("kernel", [vocab_size, dense_size])
# On the backwards pass, we want to convert the gradient from
# an indexed-slices to a regular tensor before sending it back to the
# parameter server. This avoids excess computation on the parameter server.
embedding_var = eu.ConvertGradientToTensor(embedding_var)
emb_x = tf.gather(embedding_var, x)
if multiplier != 1.0:
emb_x *= multiplier
shape, static_shape = tf.shape(emb_x), emb_x.shape.as_list()
if not static_shape or len(static_shape) < 5:
return emb_x
# If we had extra channel dimensions, assume it's 1, i.e. shape[3] == 1.
assert len(static_shape) == 5
return tf.reshape(emb_x, [shape[0], shape[1], shape[2], static_shape[4]])
def shift_left(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :]
return shifted_targets
def shift_left_3d(x, pad_value=None):
"""Shift the second dimension of x right by one."""
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]
return shifted_targets
def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):
"""Use a strided convolution to downsample x by 2, `nbr_steps` times.
We use stride and filter size 2 to avoid the checkerboard problem of deconvs.
As detailed in http://distill.pub/2016/deconv-checkerboard/.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: number of halving downsample rounds to apply
output_filters: an int specifying the filter count for the convolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or
`[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="conv_stride2_multistep", values=[x], reuse=reuse):
if nbr_steps == 0:
out = conv(x, output_filters, (1, 1))
return out, [out]
hidden_layers = [x]
for i in xrange(nbr_steps):
hidden_layers.append(
conv(
hidden_layers[-1],
output_filters, (2, 2),
strides=2,
activation=tf.nn.relu,
name="conv" + str(i)))
return hidden_layers[-1], hidden_layers
def deconv_stride2_multistep(x,
nbr_steps,
output_filters,
name=None,
reuse=None):
"""Use a deconvolution to upsample x by 2**`nbr_steps`.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: an int specifying the number of doubling upsample rounds to
apply.
output_filters: an int specifying the filter count for the deconvolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or
`[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps),
output_filters]`
"""
with tf.variable_scope(
name, default_name="deconv_stride2_multistep", values=[x], reuse=reuse):
def deconv1d(cur, i):
cur_shape = tf.shape(cur)
thicker = conv(
cur,
output_filters * 2, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv1d" + str(i))
return tf.reshape(thicker,
[cur_shape[0], cur_shape[1] * 2, 1, output_filters])
def deconv2d(cur, i):
thicker = conv(
cur,
output_filters * 4, (1, 1),
padding="SAME",
activation=tf.nn.relu,
name="deconv2d" + str(i))
return tf.depth_to_space(thicker, 2)
cur = x
for i in xrange(nbr_steps):
if cur.get_shape()[2] == 1:
cur = deconv1d(cur, i)
else:
cur = tf.cond(
tf.equal(tf.shape(cur)[2], 1),
lambda idx=i: deconv1d(cur, idx),
lambda idx=i: deconv2d(cur, idx))
return cur
def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):
"""Conditional conv_fn making kernel 1d or 2d depending on inputs shape."""
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4.")
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
# Add support for left padding.
if "padding" in kwargs and kwargs["padding"] == "LEFT":
dilation_rate = (1, 1)
if "dilation_rate" in kwargs:
dilation_rate = kwargs["dilation_rate"]
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1
height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]
cond_padding = tf.cond(
tf.equal(tf.shape(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding)
kwargs["padding"] = "VALID"
force2d = False # Special argument we use to force 2d kernels (see below).
if "force2d" in kwargs:
force2d = kwargs["force2d"]
def conv2d_kernel(kernel_size_arg, name_suffix):
"""Call conv2d but add suffix to name."""
if "name" in kwargs:
original_name = kwargs["name"]
name = kwargs.pop("name") + "_" + name_suffix
else:
original_name = None
name = "conv_" + name_suffix
original_force2d = None
if "force2d" in kwargs:
original_force2d = kwargs.pop("force2d")
result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)
if original_name is not None:
kwargs["name"] = original_name # Restore for other calls.
if original_force2d is not None:
kwargs["force2d"] = original_force2d
return result
# Manually setting the shape to be unknown in the middle two dimensions so
# that the `tf.cond` below won't throw an error based on the convolution
# kernels being too large for the data.
inputs.set_shape = tf.TensorShape([static_shape[0], None, None, static_shape[3]]) # pylint: disable=protected-access
if kernel_size[1] == 1 or force2d:
# Avoiding the cond below can speed up graph and gradient construction.
return conv2d_kernel(kernel_size, "single")
return tf.cond(
tf.equal(tf.shape(inputs)[2],
1), lambda: conv2d_kernel((kernel_size[0], 1), "small"),
lambda: conv2d_kernel(kernel_size, "std"))
def conv(inputs, filters, kernel_size, **kwargs):
return conv_internal(tf.layers.conv2d, inputs, filters, kernel_size, **kwargs)
def conv1d(inputs, filters, kernel_size, **kwargs):
return tf.squeeze(
conv(tf.expand_dims(inputs, 2), filters, (kernel_size, 1), **kwargs), 2)
def separable_conv(inputs, filters, kernel_size, **kwargs):
return conv_internal(tf.layers.separable_conv2d, inputs, filters, kernel_size,
**kwargs)
def subseparable_conv(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution. If separability == 0 it's a separable_conv."""
def conv_fn(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution, splits into separability-many blocks."""
separability = None
if "separability" in kwargs:
separability = kwargs.pop("separability")
if separability:
parts = []
abs_sep = separability if separability > 0 else -1 * separability
for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)):
with tf.variable_scope("part_%d" % split_idx):
if separability > 0:
parts.append(
tf.layers.conv2d(split, filters // separability, kernel_size, **
kwargs))
else:
parts.append(
tf.layers.separable_conv2d(split, filters // abs_sep,
kernel_size, **kwargs))
if separability > 1:
result = tf.layers.conv2d(tf.concat(parts, axis=3), filters, (1, 1))
elif abs_sep == 1: # If we have just one block, return it.
assert len(parts) == 1
result = parts[0]
else:
result = tf.concat(parts, axis=3)
else:
result = tf.layers.separable_conv2d(inputs, filters, kernel_size,
**kwargs)
if separability is not None:
kwargs["separability"] = separability
return result
return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)
def layer_norm_compute_python(x, epsilon, scale, bias):
"""Layer norm raw computation."""
mean = tf.reduce_mean(x, axis=[-1], keep_dims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keep_dims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * scale + bias
@function.Defun(compiled=True)
def layer_norm_compute_grad(x, epsilon, scale, bias, dy):
y = layer_norm_compute_python(x, epsilon, scale, bias)
dx = tf.gradients(ys=[y], xs=[x, epsilon, scale, bias], grad_ys=[dy])
return dx
@function.Defun(
compiled=True,
separate_compiled_gradients=True,
grad_func=layer_norm_compute_grad)
def layer_norm_compute(x, epsilon, scale, bias):
return layer_norm_compute_python(x, epsilon, scale, bias)
def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = x.get_shape()[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
if allow_defun:
result = layer_norm_compute(x, tf.constant(epsilon), scale, bias)
result.set_shape(x.get_shape())
else:
result = layer_norm_compute_python(x, epsilon, scale, bias)
return result
def noam_norm(x, name=None):
"""One version of layer normalization."""
with tf.name_scope(name, default_name="noam_norm", values=[x]):
shape = x.get_shape()
ndims = len(shape)
return (tf.nn.l2_normalize(x, ndims - 1, epsilon=1.0) *
tf.sqrt(tf.to_float(shape[-1])))
def residual_function(hparams):
"""Returns a function for combining layer input and layer output.
The returned function on x (layer input) and y (layer output) computes:
norm_function(x + t
Args:
hparams: model hyperparameters
Returns:
a function from x=<layer input> and y=<layer output> to computed output
"""
def residual_fn(x, y):
return hparams.norm_function(x + tf.nn.dropout(
y, 1.0 - hparams.residual_dropout))
return residual_fn
def conv_block_internal(conv_fn,
inputs,
filters,
dilation_rates_and_kernel_sizes,
first_relu=True,
use_elu=False,
separabilities=None,
**kwargs):
"""A block of convolutions.
Args:
conv_fn: convolution function, e.g. conv or separable_conv.
inputs: a Tensor
filters: an Integer
dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))
first_relu: whether to do a relu at start (defaults to True)
use_elu: whether to use ELUs instead of ReLUs (defaults to False)
separabilities: list of separability factors (per-layer).
**kwargs: additional arguments (e.g., pooling)
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
mask = kwargs.pop("mask") if "mask" in kwargs else None
norm = kwargs.pop("normalizer_fn") if "normalizer_fn" in kwargs else None
if norm is None and "normalizer_fn" not in kwargs:
norm = lambda x, name: layer_norm(x, filters, name=name)
with tf.variable_scope(name, "conv_block", [inputs]):
cur, counter = inputs, -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if first_relu or counter > 0:
cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)
if mask is not None:
cur *= mask
if separabilities:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
separability=separabilities[counter],
**kwargs)
else:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
**kwargs)
if norm is not None:
cur = norm(cur, name="conv_block_norm_%d" % counter)
return cur
def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard convolutions."""
return conv_block_internal(conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(separable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes,
**kwargs):
"""A block of separable convolutions."""
return conv_block_internal(subseparable_conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs)
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
"""Pooling (supports "LEFT")."""
with tf.name_scope("pool", [inputs]):
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4.")
# Add support for left padding.
if padding == "LEFT":
assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
if len(static_shape) == 3:
width_padding = 2 * (window_size[1] // 2)
padding_ = [[0, 0], [width_padding, 0], [0, 0]]
else:
height_padding = 2 * (window_size[0] // 2)
cond_padding = tf.cond(
tf.equal(tf.shape(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (window_size[1] // 2)))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding_)
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
padding = "VALID"
window_size_small = (window_size[0], 1)
strides_small = (strides[0], 1)
# Manually setting the shape to be unknown in the middle two dimensions so
# that the `tf.cond` below won't throw an error based on the convolution
# kernels being too large for the data.
inputs.set_shape = tf.TensorShape( # pylint: disable=protected-access
[static_shape[0], None, None, static_shape[3]])
return tf.cond(
tf.equal(tf.shape(inputs)[2], 1),
lambda: tf.nn.pool( # pylint: disable=g-long-lambda
inputs, window_size_small, pooling_type, padding,
strides=strides_small),
lambda: tf.nn.pool( # pylint: disable=g-long-lambda
inputs, window_size, pooling_type, padding, strides=strides))
def conv_block_downsample(x,
kernel,
strides,
padding,
separability=0,
name=None,
reuse=None):
"""Implements a downwards-striding conv block, like Xception exit flow."""
with tf.variable_scope(
name, default_name="conv_block_downsample", values=[x], reuse=reuse):
hidden_size = int(x.get_shape()[-1])
res = conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
strides=strides,
name="res_conv")
x = subseparable_conv_block(
x,
hidden_size, [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv0")
x = subseparable_conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv1")
x = pool(x, kernel, "MAX", padding, strides=strides)
x += res
x = subseparable_conv_block(
x,
2 * hidden_size, [((1, 1), kernel)],
first_relu=False,
padding=padding,
separability=separability,
name="conv2")
x = subseparable_conv_block(
x,
int(2.5 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv3")
return x
def decompress_seqcnn(x,
targets,
targets_vocab_size,
dilations_and_kernels,
block_size,
is_2d=False,
embedding_var=None,
name=None,
reuse=None):
"""Decompress x into targets size using a Sequence CNN at every element."""
with tf.variable_scope(
name,
default_name="decompress_batch_seqcnn",
values=[x, targets],
reuse=reuse):
# We assume targets are [batch x block_size * N x block_size * N x C] if
# is_2d=True or [batch, block_size * N, 1, C] otherwise, and C is static.
# Let's shift targets to depth and embed.
targets_shape, targets_shape_static = tf.shape(targets), targets.get_shape()
channels = int(targets_shape_static[-1])
hidden_size = int(x.get_shape()[-1])
if is_2d:
depth_targets = tf.space_to_depth(targets, block_size)
factor = channels * block_size * block_size
else:
depth_targets = tf.reshape(targets, [
targets_shape[0], targets_shape[1] // block_size, 1,
channels * block_size
])
factor = channels * block_size
if embedding_var is None:
embedding_var = tf.get_variable("targets_embedding",
[targets_vocab_size, hidden_size])
targets_emb = tf.gather(embedding_var, depth_targets)
# Flatten x and embedded targets. Flat targets are factor* larger on axis=1.
flat_x = tf.reshape(x, [-1, 1, 1, hidden_size])
flat_targets = tf.reshape(targets_emb, [-1, factor, 1, hidden_size])
shifted_targets = shift_left(flat_targets)
# Run a SeqCNN large-batch to produce factor outputs out of every target.
flat_x += tf.zeros_like(shifted_targets) # Broadcast on axis=1.
flat_outputs = conv_block(
tf.concat([flat_x, shifted_targets], axis=3),
hidden_size,
dilations_and_kernels,
padding="LEFT")
# Reshape back to embedded targets shape.
outputs = tf.reshape(flat_outputs, [
tf.shape(targets_emb)[0],
tf.shape(targets_emb)[1],
tf.shape(targets_emb)[2], factor * hidden_size
])
# Move depth back to target space.
if is_2d:
outputs = tf.depth_to_space(outputs, 2)
else:
outputs = tf.reshape(outputs, [
tf.shape(outputs)[0], block_size * tf.shape(outputs)[1], 1,
hidden_size
])
# Final reshape before prediction to ensure target size.
outputs = tf.reshape(outputs, [
targets_shape[0], targets_shape[1], targets_shape[2], channels,
hidden_size
])
return tf.layers.dense(outputs, targets_vocab_size)
def moe_layer(data_parallelism,
ps_devices,
xs,
train,
model_hidden_size,
expert_hidden_size,
n1,
n2,
loss_coef,
autoscale=True,
name=None):
"""A mixture of experts layer.
Args:
data_parallelism: a expert_utils.Parallelism object.
ps_devices: a list of strings
xs: a list of input tensors.
train: a boolean scalar.
model_hidden_size: an integer (input/output size for this layer)
expert_hidden_size: an integer (size of each expert's hidden layer)
n1: an integer - number of experts (or # of groups for hierarchical MoE)
n2: optional integer - size of each group of experts for hierarchical MoE
loss_coef: a scalar - multiplier on load-balancing losses
autoscale: a boolean
name: a string
Returns:
ys: a list of tensors:
extra_training_loss: a scalar
"""
dp = data_parallelism
with tf.variable_scope(name, default_name="moe"):
# Set up the hyperparameters for the gating networks.
primary_gating_hp = eu.NoisyTopKGatingParams()
primary_gating_hp.num_experts = n1
if n2:
# hierarchical MoE containing moe_n1 groups of moe_n2 experts.
assert n2 > 1
secondary_gating_hp = eu.NoisyTopKGatingParams()
secondary_gating_hp.num_experts = n2
else:
# flat mixture of moe_n1 experts.
secondary_gating_hp = None
# Set up the hyperparameters for the expert networks.
# Each expert contains a hidden RELU layer of size filter_size
expert_hp = eu.FeedForwardExpertParams()
expert_hp.autoscale = autoscale
expert_hp.hidden_layer_sizes = [expert_hidden_size]
# Create the mixture of experts.
moe = eu.DistributedMixtureOfExperts(primary_gating_hp, secondary_gating_hp,
expert_hp, model_hidden_size,
model_hidden_size, ps_devices, "moe")
# MoE expects input tensors to be 2d.
# Flatten out spatial dimensions.
xs_2d = dp(tf.reshape, xs, [[-1, model_hidden_size]] * dp.n)
# Call the MoE
moe_out_2d, importance, load, _, _ = moe.Eval(
dp.devices, xs_2d, train, identifiers=None, summaries=True)
# Reshape the output to the original shape.
moe_out = dp(tf.reshape, moe_out_2d, dp(tf.shape, xs))
# These losses encourage equal load on the different experts.
loss = loss_coef * (eu.CVSquared(importance) + eu.CVSquared(load))
return moe_out, loss
def simple_attention(target, source, bias=None, summaries=True):
"""A simple attention function.
Args:
target: a `Tensor` with shape `[batch, target_timesteps, depth]` or
`[batch, target_timesteps_1, target_timesteps_2, depth]`
source: a `Tensor` with shape `[batch, source_timesteps, depth]` or
`[batch, source_timesteps_1, source_timesteps_2, depth]`
bias: an optional `Tensor` with shape `[batch, timesteps, 1, 1]` used
to mask the attention to not attend to padding of input.
summaries: Boolean, whether to output summaries.
Returns:
a `Tensor` with same shape as `target`
"""
with tf.name_scope("simple_attention", [target, source]):
target_shape = tf.shape(target)
source_shape = tf.shape(source)
target = tf.reshape(target, [
target_shape[0], target_shape[1] * target_shape[2], target_shape[3]
])
source = tf.reshape(source, [
source_shape[0], source_shape[1] * source_shape[2], source_shape[3]
])
attention = tf.matmul(target, source, transpose_b=True)
attention *= tf.rsqrt(tf.to_float(tf.shape(target)[2]))
if bias is not None:
attention += tf.expand_dims(tf.squeeze(bias, axis=[2, 3]), axis=1)
attention = tf.nn.softmax(attention)
if summaries and not tf.get_variable_scope().reuse:
tf.summary.image("attention", tf.expand_dims(attention, 3), max_outputs=5)
attended = tf.matmul(attention, source)
return tf.reshape(attended, target_shape)
def multiscale_conv_sum(inputs, output_size, dilation_rates_and_kernel_sizes,
pooling_type, **kwargs):
"""Sum of several dilated convolutions.
For all convolutions with dilation_rate > 1, we first pool the input with
width dilation_rate.
Args:
inputs: a Tensor
output_size: an Integer
dilation_rates_and_kernel_sizes: a list of pairs (dilation, kernel_size)
pooling_type: "AVG" or "MAX"
**kwargs: additional
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "multiscale_conv_sum", [inputs]):
padding = kwargs["padding"]
results, counter = [], -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if dilation_rate > 1:
pooled = pool(inputs, kernel_size, pooling_type, padding)
else:
pooled = inputs
results.append(
conv(
pooled,
output_size,
kernel_size,
dilation_rate=dilation_rate,
name="conv_layer%d" % counter,
**kwargs))
return tf.add_n(results) * (len(results)**-0.5)
def multiscale_conv_and_attention(x,
padding,
hparams,
source=None,
summaries=True):
"""A common part of t2t layers.
First, do a linear multiscale convolution
Second, do attention (if source is not None)
Applies residuals and normalization on both steps.
Args:
x: a Tensor.
padding: a padding type
hparams: hyperparameters for model
source: optional source tensor for attention. (encoder output)
summaries: Boolean, whether to output summaries.
Returns:
a Tensor.
"""
# TODO(noam): The number of different scales should be a hyperparameter.
conv_sum = multiscale_conv_sum(
x,
hparams.hidden_size, [((hparams.kernel_height**i, hparams.kernel_width**
i), (hparams.kernel_height, hparams.kernel_width))
for i in xrange(3)],
"AVG",
padding=padding)
# For residuals a rescale if necessary if channels differ.
if x.get_shape().as_list()[-1] != conv_sum.get_shape().as_list()[-1]:
x = conv(x, hparams.hidden_size, (1, 1))
x = noam_norm(x + conv_sum)
if source is not None:
x = noam_norm(x + simple_attention(x, source, summaries=summaries))
return x
def conv_with_pools(inputs, output_size, kernel_size, pool_sizes, pooling_type,
**kwargs):
"""Convolution plus 1x1 convolution applied to specified pools.
For example we might do a regular convolution with kernel size (3, 1),
and pools of sizes [(9, 1), (27, 1)].
Args:
inputs: a Tensor
output_size: an Integer
kernel_size: a tuple of integers
pool_sizes: a list of tuples of integers.
pooling_type: "AVG" or "MAX"
**kwargs: additional keyword args for conv
Returns:
a Tensor.
"""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "conv_with_pools", [inputs]):
padding = kwargs["padding"]
results = []
results.append(conv(inputs, output_size, kernel_size, **kwargs))
for i, pool_size in enumerate(pool_sizes):
pooled = pool(inputs, pool_size, pooling_type, padding)
results.append(
conv(pooled, output_size, (1, 1), name="pool_%d" % i, **kwargs))
return tf.add_n(results) * (len(results)**-0.5)
def conv_with_pools_and_attention(x,
padding,
hparams,
source=None,
summaries=True):
"""A common part of t2t layers.
First, do conv_with_pools
Second, do attention (if source is not None)
Applies residuals and normalization on both steps.
Args:
x: a Tensor.
padding: a padding type
hparams: hyperparameters for model
source: optional source tensor for attention. (encoder output)
summaries: Boolean, whether to output summaries.
Returns:
a Tensor.
"""
conv_sum = conv_with_pools(
x,
hparams.hidden_size, (hparams.kernel_height, hparams.kernel_width),
hparams.pool_sizes,
"AVG",
padding=padding)
if x.get_shape().as_list()[-1] == conv_sum.get_shape().as_list()[-1]:
conv_sum += x
x = noam_norm(conv_sum)
if source is not None:
x = noam_norm(x + simple_attention(x, source, summaries=summaries))
return x
def get_timing_signal(length,
min_timescale=1,
max_timescale=1e4,
num_timescales=16):
"""Create Tensor of sinusoids of different frequencies.
Args:
length: Length of the Tensor to create, i.e. Number of steps.
min_timescale: a float
max_timescale: a float
num_timescales: an int
Returns:
Tensor of shape (length, 2*num_timescales)
"""
positions = tf.to_float(tf.range(length))
log_timescale_increment = (math.log(max_timescale / min_timescale) /
(num_timescales - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
This allows attention to learn to use absolute and relative positions.
The timing signal should be added to some precursor of both the source
and the target of the attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
experessed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the depth dimension, padded with zeros to be the same depth as the input,
and added into input.
Args:
x: a Tensor with shape [?, length, ?, depth]
min_timescale: a float
max_timescale: a float
num_timescales: an int <= depth/2
Returns:
a Tensor the same shape as x.
"""
length = tf.shape(x)[1]
depth = tf.shape(x)[3]
signal = get_timing_signal(length, min_timescale, max_timescale,
num_timescales)
padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]])
return x + tf.reshape(padded_signal, [1, length, 1, depth])
def mask_from_embedding(emb):
"""Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
"""
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keep_dims=True))
def mask_leq(target_length, source_length):
"""A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere.
Args:
target_length: an integer
source_length: an integer
Returns:
a Tensor with shape [1, target_length, source_length]
"""
return tf.expand_dims(
tf.matrix_band_part(tf.ones([target_length, source_length]), -1, 0), 0)
def attention_1d_v0(source,
target,
attention_size,
output_size,
num_heads,
mask=None,
transform_source=True,
transform_target=True,
transform_output=True,
summaries=True,
name=None):
"""multi-headed attention.
TODO(noam): this could probably be extended to 2d.
Args:
source: a Tensor of shape [batch, source_length, source_depth]
target: a Tensor of shape [batch, target_length, target_depth]
attention_size: an integer
output_size: an integer
num_heads: an integer divisor of attention_size
mask: a float32 Tensor of shape [batch, target_length, source_length]
1.0 means can-see; 0.0 means can't-see.
Any dimension can be 1 (supports broadcasting).
transform_source: a boolean
transform_target: a boolean
transform_output: a boolean
summaries: a boolean
name: an optional string
Returns:
a Tensor of shape [batch, length, output_size]
"""
with tf.variable_scope(name, default_name="attention", values=[target]):
source_length = tf.shape(source)[1]
target_length = tf.shape(target)[1]
batch = tf.shape(source)[0]
def _maybe_transform(t, size, should_transform, name):
if should_transform:
return conv1d(t, size, 1, name=name)
else:
assert t.get_shape()[-1] == size
return t
source_attention = _maybe_transform(source, attention_size,
transform_source, "source_attention")
target_attention = _maybe_transform(target, attention_size,
transform_target, "target_attention")
assert attention_size % num_heads == 0
size_per_head = attention_size // num_heads
source_attention = tf.reshape(
source_attention, [batch, source_length, num_heads, size_per_head])
target_attention = tf.reshape(
target_attention, [batch, target_length, num_heads, size_per_head])
# [batch, num_heads, length, size_per_head]
source_attention = tf.transpose(source_attention, [0, 2, 1, 3])
target_attention = tf.transpose(target_attention, [0, 2, 1, 3])
# [batch, num_heads, target_length, source_length]
attention = tf.matmul(target_attention, source_attention, transpose_b=True)
attention *= size_per_head**-0.5
if mask is not None:
mask = tf.expand_dims(mask, 1)
mask = (1.0 - mask) * -1e9
attention += mask
attention = tf.nn.softmax(attention)
if summaries and not tf.get_variable_scope().reuse:
# Compute a color image summary.
image = tf.reshape(attention,
[batch, num_heads, target_length, source_length])
image = tf.transpose(image, [0, 2, 3, 1])
image = tf.pow(image, 0.2) # for high-dynamic-range
# Each head will correspond to one of RGB.
# pad the heads to be a multiple of 3
extra_heads = -num_heads % 3
image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, -num_heads % 3]])
image = tf.reshape(image, [
batch, target_length, source_length, 3, (num_heads + extra_heads) // 3
])
image = tf.reduce_max(image, 4)
tf.summary.image("local_attention", image, max_outputs=1)
# output: [batch, num_heads, target_length, size_per_head]
output = tf.matmul(attention, source_attention)
output = tf.transpose(output, [0, 2, 1, 3])
output = tf.reshape(output, [batch, target_length, attention_size])
output = _maybe_transform(output, output_size, transform_output,
"attention_output")
return output
def relu_density_logit(x, reduce_dims):
"""logit(density(x)).
Useful for histograms.
Args:
x: a Tensor, typilcally the output of tf.relu
reduce_dims: a list of dimensions
Returns:
a Tensor
"""
frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)
scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))
return scaled
def conv_hidden_relu(inputs,
hidden_size,
output_size,
kernel_size=(1, 1),
summaries=True,
dropout=0.0,
**kwargs):
"""Hidden layer with RELU activation followed by linear projection."""
name = kwargs.pop("name") if "name" in kwargs else None
with tf.variable_scope(name, "conv_hidden_relu", [inputs]):
if inputs.get_shape().ndims == 3:
is_3d = True
inputs = tf.expand_dims(inputs, 2)
else:
is_3d = False
h = conv(
inputs,
hidden_size,
kernel_size,
activation=tf.nn.relu,
name="conv1",
**kwargs)
if dropout != 0.0:
h = tf.nn.dropout(h, 1.0 - dropout)
if summaries and not tf.get_variable_scope().reuse:
tf.summary.histogram("hidden_density_logit",
relu_density_logit(
h, list(range(inputs.shape.ndims - 1))))
ret = conv(h, output_size, (1, 1), name="conv2", **kwargs)
if is_3d:
ret = tf.squeeze(ret, 2)
return ret
def conv_gru(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional GRU in 1 dimension."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start, padding):
return conv(
args,
filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate,
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="conv_gru", values=[x], reuse=reuse):
reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding))
gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding))
return gate * x + (1 - gate) * candidate
def conv_lstm(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional LSTM in 1 dimension."""
with tf.variable_scope(
name, default_name="conv_lstm", values=[x], reuse=reuse):
gates = conv(
x,
4 * filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate)
g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
return tf.sigmoid(g[2]) * tf.tanh(new_cell)
def diagonal_conv_gru(x,
kernel_size,
filters,
train,
dropout=0.0,
name=None,
reuse=None):
"""Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start):
return conv(
args,
filters,
kernel_size,
padding="SAME",
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="diagonal_conv_gru", values=[x], reuse=reuse):
reset, reset_cost = hard_sigmoid(do_conv(x, "reset", 0.5))
gate, gate_cost = hard_sigmoid(do_conv(x, "gate", 0.7))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0))
# Dropout if training.
if dropout > 0.0 and train:
candidate = tf.nn.dropout(candidate, 1.0 - dropout)
# Diagonal shift.
shift_filters = filters // 3
base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) +
[[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters)
shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32)
shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3)
x_shifted = tf.nn.depthwise_conv2d(
x, shift_filter, [1, 1, 1, 1], padding="SAME")
# Return the gated result and cost.
total_cost_avg = 0.5 * (reset_cost + gate_cost)
return gate * x_shifted + (1 - gate) * candidate, total_cost_avg
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
"""Pad tensors x and y on axis 1 so that they have the same length."""
if axis not in [1, 2]:
raise ValueError("Only axis=1 and axis=2 supported for now.")
with tf.name_scope("pad_to_same_length", [x, y]):
x_length = tf.shape(x)[axis]
y_length = tf.shape(y)[axis]
max_length = tf.maximum(x_length, y_length)
if final_length_divisible_by > 1:
# Find the nearest larger-or-equal integer divisible by given number.
max_length += final_length_divisible_by - 1
max_length //= final_length_divisible_by
max_length *= final_length_divisible_by
length_diff1 = max_length - x_length
length_diff2 = max_length - y_length
def padding_list(length_diff, arg):
if axis == 1:
return [[[0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]
return [[[0, 0], [0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]
paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)
paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)
res_x = tf.pad(x, paddings1)
res_y = tf.pad(y, paddings2)
# Static shapes are the same except for axis=1.
x_shape = x.shape.as_list()
x_shape[axis] = None
res_x.set_shape(x_shape)
y_shape = y.shape.as_list()
y_shape[axis] = None
res_y.set_shape(y_shape)
return res_x, res_y
def pad_with_zeros(logits, labels):
"""Pad labels on the length dimension to match logits length."""
with tf.name_scope("pad_with_zeros", [logits, labels]):
logits, labels = pad_to_same_length(logits, labels)
if len(labels.shape.as_list()) == 3: # 2-d labels.
logits, labels = pad_to_same_length(logits, labels, axis=2)
return labels
def weights_nonzero(labels):
"""Assign weight 1.0 to all labels except for padding (id=0)."""
return tf.to_float(tf.not_equal(labels, 0))
def weights_all(labels):
"""Assign weight 1.0 to all labels."""
return tf.ones_like(labels, dtype=tf.float32)
def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
"""
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one, [[0, 0], [2, 0], [0, 0],
[0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = tf.to_float(tf.logical_and(nonboilerplate, in_target))
return ret
def padded_cross_entropy(logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True):
"""Compute cross-entropy assuming 0s are padding.
Computes a loss numerator (the sum of losses), and loss denominator
(the number of non-padding tokens).
Args:
logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
"""
confidence = 1.0 - label_smoothing
vocab_size = tf.shape(logits)[-1]
with tf.name_scope("padded_cross_entropy", [logits, labels]):
pad_labels = pad_with_zeros(logits, labels)
xent = smoothing_cross_entropy(logits, pad_labels, vocab_size, confidence)
weights = weights_fn(pad_labels)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights)
def smoothing_cross_entropy(logits, labels, vocab_size, confidence):
"""Cross entropy with label smoothing to limit over-confidence."""
with tf.name_scope("smoothing_cross_entropy", [logits, labels]):
# Low confidence is given to all non-true labels, uniformly.
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
# Normalizing constant is the best cross-entropy value with soft targets.
# We subtract it just for readability, makes no difference on learning.
normalizing = -(confidence * tf.log(confidence) + tf.to_float(
vocab_size - 1) * low_confidence * tf.log(low_confidence + 1e-20))
# Soft targets.
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=soft_targets)
return xentropy - normalizing
|
#!/usr/bin/env python3
import collections
import itertools
import json
import random
import tempfile
import time
import unittest
import docker
import mock
from arvados_docker import cleaner
MAX_DOCKER_ID = (16 ** 64) - 1
def MockDockerId():
return '{:064x}'.format(random.randint(0, MAX_DOCKER_ID))
def MockContainer(image_hash):
return {'Id': MockDockerId(),
'Image': image_hash['Id']}
def MockImage(*, size=0, vsize=None, tags=[]):
if vsize is None:
vsize = random.randint(100, 2000000)
return {'Id': MockDockerId(),
'ParentId': MockDockerId(),
'RepoTags': list(tags),
'Size': size,
'VirtualSize': vsize}
class MockEvent(dict):
ENCODING = 'utf-8'
event_seq = itertools.count(1)
def __init__(self, status, docker_id=None, **event_data):
if docker_id is None:
docker_id = MockDockerId()
super().__init__(self, **event_data)
self['status'] = status
self['id'] = docker_id
self.setdefault('time', next(self.event_seq))
def encoded(self):
return json.dumps(self).encode(self.ENCODING)
class MockException(docker.errors.APIError):
def __init__(self, status_code):
response = mock.Mock(name='response')
response.status_code = status_code
super().__init__("mock exception", response)
class DockerImageTestCase(unittest.TestCase):
def test_used_at_sets_last_used(self):
image = cleaner.DockerImage(MockImage())
image.used_at(5)
self.assertEqual(5, image.last_used)
def test_used_at_moves_forward(self):
image = cleaner.DockerImage(MockImage())
image.used_at(6)
image.used_at(8)
self.assertEqual(8, image.last_used)
def test_used_at_does_not_go_backward(self):
image = cleaner.DockerImage(MockImage())
image.used_at(4)
image.used_at(2)
self.assertEqual(4, image.last_used)
class DockerImagesTestCase(unittest.TestCase):
def setUp(self):
self.mock_images = []
def setup_mock_images(self, *vsizes):
self.mock_images.extend(MockImage(vsize=vsize) for vsize in vsizes)
def setup_images(self, *vsizes, target_size=1000000):
self.setup_mock_images(*vsizes)
images = cleaner.DockerImages(target_size)
for image in self.mock_images:
images.add_image(image)
return images
def test_has_image(self):
images = self.setup_images(None)
self.assertTrue(images.has_image(self.mock_images[0]['Id']))
self.assertFalse(images.has_image(MockDockerId()))
def test_del_image(self):
images = self.setup_images(None)
images.del_image(self.mock_images[0]['Id'])
self.assertFalse(images.has_image(self.mock_images[0]['Id']))
def test_del_nonexistent_image(self):
images = self.setup_images(None)
images.del_image(MockDockerId())
self.assertTrue(images.has_image(self.mock_images[0]['Id']))
def test_one_image_always_kept(self):
# When crunch-job starts a job, it makes sure each compute node
# has the Docker image loaded, then it runs all the tasks with
# the assumption the image is on each node. As long as that's
# true, the cleaner should avoid removing every installed image:
# crunch-job might be counting on the most recent one to be
# available, even if it's not currently in use.
images = self.setup_images(None, None, target_size=1)
for use_time, image in enumerate(self.mock_images, 1):
user = MockContainer(image)
images.add_user(user, use_time)
images.end_user(user['Id'])
self.assertEqual([self.mock_images[0]['Id']],
list(images.should_delete()))
def test_images_under_target_not_deletable(self):
# The images are used in this order. target_size is set so it
# could hold the largest image, but not after the most recently
# used image is kept; then we have to fall back to the previous one.
images = self.setup_images(20, 30, 40, 10, target_size=45)
for use_time, image in enumerate(self.mock_images, 1):
user = MockContainer(image)
images.add_user(user, use_time)
images.end_user(user['Id'])
self.assertEqual([self.mock_images[ii]['Id'] for ii in [0, 2]],
list(images.should_delete()))
def test_images_in_use_not_deletable(self):
images = self.setup_images(None, None, target_size=1)
users = [MockContainer(image) for image in self.mock_images]
images.add_user(users[0], 1)
images.add_user(users[1], 2)
images.end_user(users[1]['Id'])
self.assertEqual([self.mock_images[1]['Id']],
list(images.should_delete()))
def test_image_deletable_after_unused(self):
images = self.setup_images(None, None, target_size=1)
users = [MockContainer(image) for image in self.mock_images]
images.add_user(users[0], 1)
images.add_user(users[1], 2)
images.end_user(users[0]['Id'])
self.assertEqual([self.mock_images[0]['Id']],
list(images.should_delete()))
def test_image_not_deletable_if_user_restarts(self):
images = self.setup_images(None, target_size=1)
user = MockContainer(self.mock_images[-1])
images.add_user(user, 1)
images.end_user(user['Id'])
images.add_user(user, 2)
self.assertEqual([], list(images.should_delete()))
def test_image_not_deletable_if_any_user_remains(self):
images = self.setup_images(None, target_size=1)
users = [MockContainer(self.mock_images[0]) for ii in range(2)]
images.add_user(users[0], 1)
images.add_user(users[1], 2)
images.end_user(users[0]['Id'])
self.assertEqual([], list(images.should_delete()))
def test_image_deletable_after_all_users_end(self):
images = self.setup_images(None, None, target_size=1)
users = [MockContainer(self.mock_images[ii]) for ii in [0, 1, 1]]
images.add_user(users[0], 1)
images.add_user(users[1], 2)
images.add_user(users[2], 3)
images.end_user(users[1]['Id'])
images.end_user(users[2]['Id'])
self.assertEqual([self.mock_images[-1]['Id']],
list(images.should_delete()))
def test_images_suggested_for_deletion_by_lru(self):
images = self.setup_images(10, 10, 10, target_size=1)
users = [MockContainer(image) for image in self.mock_images]
images.add_user(users[0], 3)
images.add_user(users[1], 1)
images.add_user(users[2], 2)
for user in users:
images.end_user(user['Id'])
self.assertEqual([self.mock_images[ii]['Id'] for ii in [1, 2]],
list(images.should_delete()))
def test_adding_user_without_image_does_not_implicitly_add_image(self):
images = self.setup_images(10)
images.add_user(MockContainer(MockImage()), 1)
self.assertEqual([], list(images.should_delete()))
def test_nonexistent_user_removed(self):
images = self.setup_images()
images.end_user('nonexistent')
# No exception should be raised.
def test_del_image_effective_with_users_present(self):
images = self.setup_images(None, target_size=1)
user = MockContainer(self.mock_images[0])
images.add_user(user, 1)
images.del_image(self.mock_images[0]['Id'])
images.end_user(user['Id'])
self.assertEqual([], list(images.should_delete()))
def setup_from_daemon(self, *vsizes, target_size=1500000):
self.setup_mock_images(*vsizes)
docker_client = mock.MagicMock(name='docker_client')
docker_client.images.return_value = iter(self.mock_images)
return cleaner.DockerImages.from_daemon(target_size, docker_client)
def test_images_loaded_from_daemon(self):
images = self.setup_from_daemon(None, None)
for image in self.mock_images:
self.assertTrue(images.has_image(image['Id']))
def test_target_size_set_from_daemon(self):
images = self.setup_from_daemon(20, 10, 5, target_size=15)
user = MockContainer(self.mock_images[-1])
images.add_user(user, 1)
self.assertEqual([self.mock_images[0]['Id']],
list(images.should_delete()))
class DockerImageUseRecorderTestCase(unittest.TestCase):
TEST_CLASS = cleaner.DockerImageUseRecorder
TEST_CLASS_INIT_KWARGS = {}
def setUp(self):
self.images = mock.MagicMock(name='images')
self.docker_client = mock.MagicMock(name='docker_client')
self.events = []
self.recorder = self.TEST_CLASS(self.images, self.docker_client,
self.encoded_events, **self.TEST_CLASS_INIT_KWARGS)
@property
def encoded_events(self):
return (event.encoded() for event in self.events)
def test_unknown_events_ignored(self):
self.events.append(MockEvent('mock!event'))
self.recorder.run()
# No exception should be raised.
def test_fetches_container_on_create(self):
self.events.append(MockEvent('create'))
self.recorder.run()
self.docker_client.inspect_container.assert_called_with(
self.events[0]['id'])
def test_adds_user_on_container_create(self):
self.events.append(MockEvent('create'))
self.recorder.run()
self.images.add_user.assert_called_with(
self.docker_client.inspect_container(), self.events[0]['time'])
def test_unknown_image_handling(self):
# The use recorder should not fetch any images.
self.events.append(MockEvent('create'))
self.recorder.run()
self.assertFalse(self.docker_client.inspect_image.called)
def test_unfetchable_containers_ignored(self):
self.events.append(MockEvent('create'))
self.docker_client.inspect_container.side_effect = MockException(404)
self.recorder.run()
self.assertFalse(self.images.add_user.called)
def test_ends_user_on_container_destroy(self):
self.events.append(MockEvent('destroy'))
self.recorder.run()
self.images.end_user.assert_called_with(self.events[0]['id'])
class DockerImageCleanerTestCase(DockerImageUseRecorderTestCase):
TEST_CLASS = cleaner.DockerImageCleaner
def test_unknown_image_handling(self):
# The image cleaner should fetch and record new images.
self.images.has_image.return_value = False
self.events.append(MockEvent('create'))
self.recorder.run()
self.docker_client.inspect_image.assert_called_with(
self.docker_client.inspect_container()['Image'])
self.images.add_image.assert_called_with(
self.docker_client.inspect_image())
def test_unfetchable_images_ignored(self):
self.images.has_image.return_value = False
self.docker_client.inspect_image.side_effect = MockException(404)
self.events.append(MockEvent('create'))
self.recorder.run()
self.docker_client.inspect_image.assert_called_with(
self.docker_client.inspect_container()['Image'])
self.assertFalse(self.images.add_image.called)
def test_deletions_after_destroy(self):
delete_id = MockDockerId()
self.images.should_delete.return_value = [delete_id]
self.events.append(MockEvent('destroy'))
self.recorder.run()
self.docker_client.remove_image.assert_called_with(delete_id)
self.images.del_image.assert_called_with(delete_id)
def test_failed_deletion_handling(self):
delete_id = MockDockerId()
self.images.should_delete.return_value = [delete_id]
self.docker_client.remove_image.side_effect = MockException(500)
self.events.append(MockEvent('destroy'))
self.recorder.run()
self.docker_client.remove_image.assert_called_with(delete_id)
self.assertFalse(self.images.del_image.called)
class DockerContainerCleanerTestCase(DockerImageUseRecorderTestCase):
TEST_CLASS = cleaner.DockerImageCleaner
TEST_CLASS_INIT_KWARGS = {'remove_containers_onexit': True}
def test_container_deletion_deletes_volumes(self):
cid = MockDockerId()
self.events.append(MockEvent('die', docker_id=cid))
self.recorder.run()
self.docker_client.remove_container.assert_called_with(cid, v=True)
@mock.patch('arvados_docker.cleaner.logger')
def test_failed_container_deletion_handling(self, mockLogger):
cid = MockDockerId()
self.docker_client.remove_container.side_effect = MockException(500)
self.events.append(MockEvent('die', docker_id=cid))
self.recorder.run()
self.docker_client.remove_container.assert_called_with(cid, v=True)
self.assertEqual("Failed to remove container %s: %s",
mockLogger.warning.call_args[0][0])
self.assertEqual(cid,
mockLogger.warning.call_args[0][1])
class HumanSizeTestCase(unittest.TestCase):
def check(self, human_str, count, exp):
self.assertEqual(count * (1024 ** exp),
cleaner.human_size(human_str))
def test_bytes(self):
self.check('1', 1, 0)
self.check('82', 82, 0)
def test_kibibytes(self):
self.check('2K', 2, 1)
self.check('3k', 3, 1)
def test_mebibytes(self):
self.check('4M', 4, 2)
self.check('5m', 5, 2)
def test_gibibytes(self):
self.check('6G', 6, 3)
self.check('7g', 7, 3)
def test_tebibytes(self):
self.check('8T', 8, 4)
self.check('9t', 9, 4)
class RunTestCase(unittest.TestCase):
def setUp(self):
self.config = cleaner.default_config()
self.config['Quota'] = 1000000
self.docker_client = mock.MagicMock(name='docker_client')
def test_run(self):
test_start_time = int(time.time())
self.docker_client.events.return_value = []
cleaner.run(self.config, self.docker_client)
self.assertEqual(2, self.docker_client.events.call_count)
event_kwargs = [args[1] for args in
self.docker_client.events.call_args_list]
self.assertIn('since', event_kwargs[0])
self.assertIn('until', event_kwargs[0])
self.assertLessEqual(test_start_time, event_kwargs[0]['until'])
self.assertIn('since', event_kwargs[1])
self.assertEqual(event_kwargs[0]['until'], event_kwargs[1]['since'])
@mock.patch('docker.Client', name='docker_client')
@mock.patch('arvados_docker.cleaner.run', name='cleaner_run')
class MainTestCase(unittest.TestCase):
def test_client_api_version(self, run_mock, docker_client):
with tempfile.NamedTemporaryFile(mode='wt') as cf:
cf.write('{"Quota":"1000T"}')
cf.flush()
cleaner.main(['--config', cf.name])
self.assertEqual(1, docker_client.call_count)
# 1.14 is the first version that's well defined, going back to
# Docker 1.2, and still supported up to at least Docker 1.9.
# See
# <https://docs.docker.com/engine/reference/api/docker_remote_api/>.
self.assertEqual('1.14',
docker_client.call_args[1].get('version'))
self.assertEqual(1, run_mock.call_count)
self.assertIs(run_mock.call_args[0][1], docker_client())
class ConfigTestCase(unittest.TestCase):
def test_load_config(self):
with tempfile.NamedTemporaryFile(mode='wt') as cf:
cf.write(
'{"Quota":"1000T", "RemoveStoppedContainers":"always", "Verbose":2}')
cf.flush()
config = cleaner.load_config(['--config', cf.name])
self.assertEqual(1000 << 40, config['Quota'])
self.assertEqual("always", config['RemoveStoppedContainers'])
self.assertEqual(2, config['Verbose'])
def test_args_override_config(self):
with tempfile.NamedTemporaryFile(mode='wt') as cf:
cf.write(
'{"Quota":"1000T", "RemoveStoppedContainers":"always", "Verbose":2}')
cf.flush()
config = cleaner.load_config([
'--config', cf.name,
'--quota', '1G',
'--remove-stopped-containers', 'never',
'--verbose',
])
self.assertEqual(1 << 30, config['Quota'])
self.assertEqual('never', config['RemoveStoppedContainers'])
self.assertEqual(1, config['Verbose'])
class ContainerRemovalTestCase(unittest.TestCase):
LIFECYCLE = ['create', 'attach', 'start', 'resize', 'die', 'destroy']
def setUp(self):
self.config = cleaner.default_config()
self.docker_client = mock.MagicMock(name='docker_client')
self.existingCID = MockDockerId()
self.docker_client.containers.return_value = [{
'Id': self.existingCID,
'Status': 'Exited (0) 6 weeks ago',
}, {
# If docker_client.containers() returns non-exited
# containers for some reason, do not remove them.
'Id': MockDockerId(),
'Status': 'Running',
}]
self.newCID = MockDockerId()
self.docker_client.events.return_value = [
MockEvent(e, docker_id=self.newCID).encoded()
for e in self.LIFECYCLE]
def test_remove_onexit(self):
self.config['RemoveStoppedContainers'] = 'onexit'
cleaner.run(self.config, self.docker_client)
self.docker_client.remove_container.assert_called_once_with(
self.newCID, v=True)
def test_remove_always(self):
self.config['RemoveStoppedContainers'] = 'always'
cleaner.run(self.config, self.docker_client)
self.docker_client.remove_container.assert_any_call(
self.existingCID, v=True)
self.docker_client.remove_container.assert_any_call(
self.newCID, v=True)
self.assertEqual(2, self.docker_client.remove_container.call_count)
def test_remove_never(self):
self.config['RemoveStoppedContainers'] = 'never'
cleaner.run(self.config, self.docker_client)
self.assertEqual(0, self.docker_client.remove_container.call_count)
def test_container_exited_between_subscribe_events_and_check_existing(self):
self.config['RemoveStoppedContainers'] = 'always'
self.docker_client.events.return_value = [
MockEvent(e, docker_id=self.existingCID).encoded()
for e in ['die', 'destroy']]
cleaner.run(self.config, self.docker_client)
# Subscribed to events before getting the list of existing
# exited containers?
self.docker_client.assert_has_calls([
mock.call.events(since=mock.ANY),
mock.call.containers(filters={'status': 'exited'})])
# Asked to delete the container twice?
self.docker_client.remove_container.assert_has_calls(
[mock.call(self.existingCID, v=True)] * 2)
self.assertEqual(2, self.docker_client.remove_container.call_count)
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import pickle
import subprocess
from py_files import make_plots
from py_files import data_prep_stack_barplots as prep
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from qiime2.plugins import feature_table
from qiime2 import Artifact
# print statements for user intro to software
print('')
print('WELCOME TO fastQCS3! Before you begin:\n',
'\n',
'1. Please make sure your .fastq.gz files are in a directory.\n',
'2. If your sequences are still multiplexed:',
'make sure your barcodes.fastq.gz file lives in the same directory as your sequences.\n',
'3. Make sure your metadata file is in the metadata directory and,\n',
'4. Make sure you know your metadata file name.\n')
demux_status = input('Are your fastq.gz sequence file(s) demultiplexed? (y/n):')
if demux_status == 'y' or 'n':
pass
else:
raise NameError('Please enter either y or n')
def import_demuxed_data(directory):
"""function to run importing of pre-demultiplexed reads"""
subprocess.run(['bash','-c','bash shell_scripts/auto_import.sh '+directory])
return
def auto_demux(directory, metadata):
"""function to run importing and demultiplexing (demux) of multiplexed reads"""
subprocess.run(['bash','-c','bash shell_scripts/auto_demux.sh '+directory+' '+metadata])
return
def auto_dada2(trimlength, metadata):
"""function to run dada2"""
subprocess.run(['bash','-c','bash shell_scripts/auto_dada2.sh '+trimlength+' '+metadata])
return
def auto_classify_phylo(sample_n_features, metadata):
"""function for classification, phylogenetic analysis, outputs data in appropriate form to work with for plotting"""
subprocess.run(['bash','-c','bash shell_scripts/auto_classify_phylo.sh '+sample_n_features+' '+metadata])
return
# prompt user to input directory path
directory = input('Please enter the name of your directory of .fastq.gz files (note: sequence data must be gzipped):')
# adding error statements
if (' ' in directory):
raise TypeError('Cannot have spaces in directory name')
elif ('.' in directory):
raise TypeError('Please avoid periods in directory name to avoid confusion')
# prompting user to add their metadata file
metadata = input('Please enter your complete (ie. filename.tsv) metadata file name (file must exist in metadata directory):')
# adding error statements
if (' ' in metadata):
raise TypeError('Cannot have spaces in filename')
# calling importing functions based on user input
if demux_status == 'n':
auto_demux(directory, metadata)
elif demux_status == 'y':
import_demuxed_data(directory)
# calling find_dropoff function to print information about sequence quality by position
# so that the user can choose their trimlength logically
make_plots.find_dropoff('data/exported_demux/', 500)
# prompting user to input trim length
trimlength = input('\nPlease choose a sequencing trim length:')
# adding error statements
if trimlength.isdigit():
pass
else:
raise TypeError('trim length input must be a positive integer')
# this second block will run dada2 and the following few commands
print('\n...running dada2...this may take a few minutes...')
auto_dada2(trimlength, metadata)
# calling get_feature_info to get some information on feature counts
# to let the user choose sampling depth
make_plots.get_feature_info('data/features/sample-frequency-detail.csv')
# prompting user to input sampling depth
sample_n_features = input('\nPlease choose a sampling depth:')
# adding error statements
if sample_n_features.isdigit():
pass
else:
raise TypeError('sampling depth input must be a positive integer')
print('\n...building phylogenetic tree and classifying ASVs...this may take a few minutes...')
# calling auto_classify_phylo to run remainder of commands
auto_classify_phylo(sample_n_features, metadata)
print('\n',
'fastQCS3 has finished processing your data! Congrats!')
# prompting user to name their .pkl file
basename = input('\nPlease give your visualization file a name:')
# adding error statements
if (' ' in basename):
raise TypeError('Cannot have spaces in filename')
elif ('.' in basename):
raise TypeError('Please avoid periods in filename to avoid file type confusion')
print('\n...packaging your visualizations...this may take a minute...')
# everything below is for creating the plotting objects
"""read in newly created taxonomy data file to pandas"""
taxonomy = pd.read_csv("data/taxonomy.tsv", sep='\t')
taxonomy[['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']] = taxonomy['Taxon'].str.split(';', expand=True)
taxonomy.set_index('Feature ID', inplace=True)
taxonomy.shape
"""reads in table.qza file from qiime2 into DataFrame"""
unrarefied_table = Artifact.load('outputs/table.qza')
rarefy_result = feature_table.methods.rarefy(table=unrarefied_table, sampling_depth=100)
rarefied_table = rarefy_result.rarefied_table
table = rarefied_table.view(pd.DataFrame)
"""pre process data into dataframes for plotting taxonomy relative abundances in stacked barplots"""
kingdom_df, phylum_df, class_df, order_df, family_df, genus_df, species_df = prep.prepare_data_stacked_barplots(table, taxonomy)
"""create plotly figures"""
king_plot = make_plots.plotly_stacked_barplot(kingdom_df, 'Kingdom Relative Abundances')
phy_plot = make_plots.plotly_stacked_barplot(phylum_df, 'Phylum Relative Abundances')
class_plot = make_plots.plotly_stacked_barplot(class_df, 'Class Relative Abundances')
ord_plot = make_plots.plotly_stacked_barplot(order_df, 'Order Relative Abundances')
fam_plot = make_plots.plotly_stacked_barplot(family_df, 'Family Relative Abundances')
gen_plot = make_plots.plotly_stacked_barplot(genus_df, 'Genus Relative Abundances')
spec_plot = make_plots.plotly_stacked_barplot(species_df, 'Species Relative Abundances')
qual_plot = make_plots.plot_qualities('data/exported_demux/', 500)
# qual_plot = make_plots.plot_qualities(directory, 500)
qual_hist = make_plots.quality_hist()
# Loading all plot files into a pkl file
filename = basename + '.pkl'
with open(filename, 'wb') as f:
pickle.dump([king_plot, phy_plot, class_plot, ord_plot, fam_plot, gen_plot, spec_plot, qual_plot, qual_hist], f)
print('\n',
'Now please run the following command to visualize your data in dash!\n',
'\n',
'python fastQCS3_dashboard.py',
'\n')
|
__all__ = ['DesignerSandbox', ]
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.uix.sandbox import Sandbox, sandbox
class DesignerSandbox(Sandbox):
'''DesignerSandbox is subclass of :class:`~kivy.uix.sandbox.Sandbox`
for use with Kivy Designer. It emits on_getting_exeption event
when code running in it will raise some exception.
'''
__events__ = ('on_getting_exception',)
error_active = False
'''If True, automatically show the error tab on getting an Exception
'''
def __init__(self, **kwargs):
super(DesignerSandbox, self).__init__(**kwargs)
self.exception = None
self.tb = None
self._context['Builder'] = object.__getattribute__(Builder, '_obj')
self._context['Clock'] = object.__getattribute__(Clock, '_obj')
Clock.unschedule(self._clock_sandbox)
Clock.unschedule(self._clock_sandbox_draw)
def __exit__(self, _type, value, tb):
'''Override of __exit__
'''
self._context.pop()
if _type is not None:
return self.on_exception(value, tb=tb)
def on_exception(self, exception, tb=None):
'''Override of on_exception
'''
self.exception = exception
self.tb = tb
self.dispatch('on_getting_exception')
return super(DesignerSandbox, self).on_exception(exception, tb)
def on_getting_exception(self, *args):
'''Default handler for 'on_getting_exception'
'''
pass
@sandbox
def _clock_sandbox(self, *args):
pass
@sandbox
def _clock_sandbox_draw(self, *args):
pass
|
from optimModels.model.kineticModel import load_kinetic_model
from optimModels.simulation.run import kinetic_simulation
from optimModels.optimization.evaluation_functions import build_evaluation_function
from optimModels.optimization.run import kinetic_strain_optim, cbm_strain_optim, gecko_strain_optim
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import json
import os
import xml.etree.ElementTree as ET
from PIL import Image
from collections import namedtuple
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from object_detection.utils import dataset_util
XMLDetail = namedtuple("XMLDetail", ['filename', 'width', 'height', 'label', 'xmin', 'ymin', 'xmax', 'ymax'])
class GeneratePaths(object):
def __init__(self, output_dir):
self.output_dir = output_dir
def create_directories(self, paths):
for key, value in paths.iteritems():
if os.path.exists(value):
print("{} path exits".format(key))
else:
print("Creating {} path".format(key))
os.makedirs(value)
def generate(self):
paths = {}
paths['tar_files'] = os.path.join(self.output_dir, 'tar_files')
paths['extracted_tars'] = os.path.join(self.output_dir, 'extracted_tars')
paths['extract_tars'] = os.path.join(self.output_dir, 'extract_tars')
paths['image_files'] = os.path.join(self.output_dir, 'image_files')
paths['image_dataset'] = os.path.join(self.output_dir, 'image_dataset')
paths['cleaned_xml_dir'] = os.path.join(self.output_dir, 'cleaned_xml_dir')
paths['cleaned_image_dir'] = os.path.join(self.output_dir, 'cleaned_image_dir')
paths['train_test_csv'] = os.path.join(self.output_dir, 'train_test_csv')
paths['tf_records'] = os.path.join(self.output_dir, 'tf_records')
self.create_directories(paths)
return paths
class XML_TO_CSV(object):
def __init__(self, base_dir, xml_dir, image_dir):
self.base_dir = base_dir
self.xml_dir = xml_dir
self.image_dir = image_dir
def xml_to_csv(self, input_path):
xmls = []
xml_files = [os.path.join(input_path, i) for i in os.listdir(input_path)]
image_list = map(lambda _: os.path.join(self.image_dir, _.split('/')[-1].split('.')[0] + '.jpg'), xml_files)
xml_list = list(map(lambda _:ET.parse(_).getroot(), xml_files))
assert len(image_list) == len(xml_list)
for root, image_path in zip(xml_list, image_list):
for member in root.findall('object'):
xmls.append(XMLDetail(image_path.format(root.find('filename').text),
int(root.find('size')[0].text),
int(root.find('size')[1].text),
root.find('folder').text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)))
return pd.DataFrame(xmls)
def convert_format(self):
output_path = os.path.join(self.base_dir, 'train_test_csv')
output_csv = os.path.join(self.base_dir, 'output_csv.csv')
if not os.path.exists(output_path):
os.makedirs(output_path)
print("XML path: {}".format(self.xml_dir))
xml_folders = [os.path.join(self.xml_dir, i) for i in os.listdir(self.xml_dir)]
for xml in xml_folders:
final_df = self.xml_to_csv(xml)
with open(output_csv, 'a') as f:
final_df.to_csv(f, header=False, index=False)
final_df = pd.read_csv(output_csv)
final_df.columns = XMLDetail._fields
final_df.rename(columns={'label': 'class'}, inplace=True)
train, test = train_test_split(final_df, test_size=0.2)
train.to_csv(os.path.join(output_path, "train.csv"), index=False)
test.to_csv(os.path.join(output_path, "test.csv"), index=False)
print("Created train and test XML to CSV convertions.")
class GenerateTFRecords(object):
def __init__(self, csv_path, image_dir, output_dir, label_map):
self.csv_path = csv_path
self.image_dir = image_dir
self.output_dir = output_dir
self.label_map = json.load(open(label_map), encoding='utf-8')
def group_annotations(self, annotations, grouping_parameter):
GroupedData = namedtuple('GroupedData', ['filename', 'object'])
return [GroupedData(filename, annotations[annotations[grouping_parameter] == filename]) \
for filename in annotations[grouping_parameter]]
def create_tf_example(self, group, image_path):
with tf.gfile.GFile(os.path.join(image_path, '{}'.format(group.filename)), 'rb') as fid:
encoded_image = fid.read()
width, height = Image.open(io.BytesIO(encoded_image)).size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins, xmaxs, ymins, ymaxs, classes_text, classes = [], [], [], [], [], []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(self.label_map[row['class']])
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_image),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def generte_tf_records(self):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
csv_files = [os.path.join(self.csv_path, _) for _ in os.listdir(self.csv_path) \
if _.endswith('.csv')]
print("CSV File: {}".format(csv_files))
for csv in csv_files:
tf_name = csv.split('/')[-1].split('.')[0]
print("Generating {} TF records".format(tf_name))
tf_record = os.path.join(self.output_dir, tf_name + '.record')
writer = tf.python_io.TFRecordWriter(tf_record)
annotations = pd.read_csv(csv)
grouped = self.group_annotations(annotations, 'filename')
for group in grouped:
tf_example = self.create_tf_example(group, self.image_dir)
writer.write(tf_example.SerializeToString())
writer.close()
print('Successfully created the TFRecords: {}'.format(tf_record))
|
# dataset distribution is specified by a distribution file containing samples
import bpy
import math
import sys
import os
print(sys.path)
import numpy as np
import random
import struct
from numpy.linalg import inv
from math import *
import mathutils
def camPosToQuaternion(cx, cy, cz):
camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
cx = cx / camDist
cy = cy / camDist
cz = cz / camDist
axis = (-cz, 0, cx)
angle = math.acos(cy)
a = math.sqrt(2) / 2
b = math.sqrt(2) / 2
w1 = axis[0]
w2 = axis[1]
w3 = axis[2]
c = math.cos(angle / 2)
d = math.sin(angle / 2)
q1 = a * c - b * d * w1
q2 = b * c + a * d * w1
q3 = a * d * w2 + b * d * w3
q4 = -b * d * w2 + a * d * w3
return (q1, q2, q3, q4)
def quaternionFromYawPitchRoll(yaw, pitch, roll):
c1 = math.cos(yaw / 2.0)
c2 = math.cos(pitch / 2.0)
c3 = math.cos(roll / 2.0)
s1 = math.sin(yaw / 2.0)
s2 = math.sin(pitch / 2.0)
s3 = math.sin(roll / 2.0)
q1 = c1 * c2 * c3 + s1 * s2 * s3
q2 = c1 * c2 * s3 - s1 * s2 * c3
q3 = c1 * s2 * c3 + s1 * c2 * s3
q4 = s1 * c2 * c3 - c1 * s2 * s3
return (q1, q2, q3, q4)
def camPosToQuaternion(cx, cy, cz):
print(sys.path)
q1a = 0
q1b = 0
q1c = math.sqrt(2) / 2
q1d = math.sqrt(2) / 2
camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
cx = cx / camDist
cy = cy / camDist
cz = cz / camDist
t = math.sqrt(cx * cx + cy * cy)
tx = cx / t
ty = cy / t
yaw = math.acos(ty)
if tx > 0:
yaw = 2 * math.pi - yaw
pitch = 0
tmp = min(max(tx*cx + ty*cy, -1),1)
#roll = math.acos(tx * cx + ty * cy)
roll = math.acos(tmp)
if cz < 0:
roll = -roll
print("%f %f %f" % (yaw, pitch, roll))
q2a, q2b, q2c, q2d = quaternionFromYawPitchRoll(yaw, pitch, roll)
q1 = q1a * q2a - q1b * q2b - q1c * q2c - q1d * q2d
q2 = q1b * q2a + q1a * q2b + q1d * q2c - q1c * q2d
q3 = q1c * q2a - q1d * q2b + q1a * q2c + q1b * q2d
q4 = q1d * q2a + q1c * q2b - q1b * q2c + q1a * q2d
return (q1, q2, q3, q4)
def camRotQuaternion(cx, cy, cz, theta):
theta = theta / 180.0 * math.pi
camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
cx = -cx / camDist
cy = -cy / camDist
cz = -cz / camDist
q1 = math.cos(theta * 0.5)
q2 = -cx * math.sin(theta * 0.5)
q3 = -cy * math.sin(theta * 0.5)
q4 = -cz * math.sin(theta * 0.5)
return (q1, q2, q3, q4)
def quaternionProduct(qx, qy):
a = qx[0]
b = qx[1]
c = qx[2]
d = qx[3]
e = qy[0]
f = qy[1]
g = qy[2]
h = qy[3]
q1 = a * e - b * f - c * g - d * h
q2 = a * f + b * e + c * h - d * g
q3 = a * g - b * h + c * e + d * f
q4 = a * h + b * g - c * f + d * e
return (q1, q2, q3, q4)
def obj_centened_camera_pos(dist, azimuth_deg, elevation_deg):
phi = float(elevation_deg) / 180 * math.pi
theta = float(azimuth_deg) / 180 * math.pi
x = (dist * math.cos(theta) * math.cos(phi))
y = (dist * math.sin(theta) * math.cos(phi))
z = (dist * math.sin(phi))
return (x, y, z)
def makeMaterial(name):
mat = bpy.data.materials.new(name)
mat.subsurface_scattering.use = True
return mat
def setMaterial(ob, mat):
me = ob.data
me.materials.append(mat)
def importParamBIN(origin_list, lookat_list, upvec_list):
paramRotList = list()
paramTransList = list()
cutList = list()
x0 = -10000
y0 = -10000
x1 = 10000
y1 = 10000
origin = np.array([eval(i) for i in origin_list.split(',')])
lookat = np.array([eval(i) for i in lookat_list.split(',')])
viewUp = np.array([eval(i) for i in upvec_list.split(',')])
viewDir = origin - lookat
viewDir = viewDir / np.linalg.norm(viewDir)
viewRight = np.cross(viewUp, viewDir)
viewRight= viewRight / np.linalg.norm(viewRight)
viewUp = np.cross(viewDir, viewRight)
viewUp = viewUp / np.linalg.norm(viewUp)
R = np.ndarray((3, 3))
R[0, 0] = viewRight[0]
R[1, 0] = viewRight[1]
R[2, 0] = viewRight[2]
R[0, 1] = viewUp[0]
R[1, 1] = viewUp[1]
R[2, 1] = viewUp[2]
R[0, 2] = viewDir[0]
R[1, 2] = viewDir[1]
R[2, 2] = viewDir[2]
R = inv(R);
paramRotList.append(R)
T = np.ndarray((3, 1))
T[0, 0] = origin[0]
T[1, 0] = origin[1]
T[2, 0] = origin[2]
T = np.dot(-R, T)
paramTransList.append(T)
cutList.append([x0, y0, x1, y1]);
return (paramRotList, paramTransList, cutList)
"""---------- main -----------"""
modelPath = sys.argv[6]
outPath = sys.argv[7]
print(sys.path)
modelId = os.path.basename(modelPath)[:-4]
bpy.ops.import_scene.obj(filepath=modelPath)
bpy.context.scene.render.alpha_mode = 'TRANSPARENT'
#bpy.context.scene.render.use_shadows = False
bpy.context.scene.render.use_raytrace = True
bpy.context.scene.render.resolution_x = 224
bpy.context.scene.render.resolution_y = 224
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.world.light_settings.use_environment_light = True
bpy.context.scene.world.light_settings.environment_energy = 0.2
bpy.context.scene.render.use_freestyle = False
bpy.context.scene.render.line_thickness = 0
bpy.context.scene.render.edge_threshold = 0
bpy.context.scene.render.edge_color = (1, 1, 1)
bpy.context.scene.render.use_edge_enhance = False
#bpy.context.mesh.show_normal_vertex = True;
# YOUR CODE START HERE
# fix mesh
scene = bpy.context.scene
for obj in scene.objects:
if obj.type == 'MESH':
scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
bpy.ops.mesh.reveal()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent()
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
# clear default lights
bpy.ops.object.select_by_type(type='LAMP')
bpy.ops.object.delete(use_global=False)
# set area lights
light_azimuth_deg = 0
light_elevation_deg = 90
lightDist = 10
lx, ly, lz = obj_centened_camera_pos(lightDist, light_azimuth_deg, light_elevation_deg)
bpy.ops.object.lamp_add(type='AREA', view_align = False, location=(lx, ly, lz))
data = bpy.data.objects['Area'].data
data.energy = 1
data.distance = 5
#data.shape = 'SQUARE'
#data.shadow_ray_samples_x = 8
light_azimuth_deg = 0
light_elevation_deg = 45
lightDist = 10
lx, ly, lz = obj_centened_camera_pos(lightDist, light_azimuth_deg, light_elevation_deg)
bpy.ops.object.lamp_add(type='AREA', view_align = False, location=(lx, ly, lz))
data = bpy.data.objects['Area.001'].data
data.energy = 1
data.distance = 5
#camObj.rotation_mode = 'XYZ'
#camObj.rotation_euler[0] = 0
#camObj.rotation_euler[1] = 0
#camObj.rotation_euler[2] = 0
outFileView = outPath;
bpy.data.objects['Area'].data.energy = 1;
bpy.data.objects['Area.001'].data.energy = 1;
bpy.context.scene.world.light_settings.environment_energy = 0.2
bpy.data.scenes['Scene'].render.filepath = outFileView;
bpy.ops.render.render( write_still=True )
|
from strongr.schedulerdomain.model import VmState
from strongr.schedulerdomain.query import RequestScheduledJobs, RequestFinishedJobs, RequestJobInfo, FindNodeWithAvailableResources, RequestResourcesRequired, RequestVmsByState
from strongr.core.exception import InvalidParameterException
import re
class QueryFactory:
""" This factory instantiates query objects to be sent to a scheduler querybus. """
def newRequestVms(self, states):
""" Generates a new RequestVms query
:returns: A RequestVms query object
:rtype: RequestVmsByState
"""
if not isinstance(states, list):
raise InvalidParameterException("states invalid")
for state in states:
if state not in VmState:
raise InvalidParameterException("{} is not a valid VmState".format(state))
return RequestVmsByState(states)
def newRequestResourcesRequired(self):
""" Generates a new RequestResourcesRequired query
:returns: A RequestResourcesRequired query object
:rtype: RequestResourcesRequired
"""
return RequestResourcesRequired()
def newFindNodeWithAvailableResources(self, cores, ram):
""" Generates a new FindNodeWithAvailableResources query
:param cores: the amount of cores needed to complete the task
:type cores: int
:param ram: the amount of ram needed to complete the task in GiB
:type ram: int
:returns: A FindNodeWithAvailableResources query object
:rtype: FindNodeWithAvailableResources
"""
return FindNodeWithAvailableResources(cores=cores, ram=ram)
def newRequestScheduledJobs(self):
""" Generates a new RequestScheduledJobs query
:returns: A RequestScheduledJobs query object
:rtype: RequestScheduledJobs
"""
return RequestScheduledJobs()
def newRequestFinishedJobs(self, jobs=None):
""" Generates a new RequestFinishedJobs query
:param jobs: a list of job id's
:returns: A RequestFinishedJobs query object
:rtype: RequestFinishedJobs
"""
jobs_sanitized = [re.sub('[^a-zA-Z0-9-]', '', x) for x in jobs] # sanitize inputs
return RequestFinishedJobs(jobs_sanitized)
def newRequestJobInfo(self, jobs=None):
""" Generates a new RequestFinishedJobs query
:param jobs: a list of job id's
:returns: A RequestFinishedJobs query object
:rtype: RequestFinishedJobs
"""
jobs_sanitized = [re.sub('[^a-zA-Z0-9-]', '', x) for x in jobs] # sanitize inputs
return RequestJobInfo(jobs_sanitized)
def newRequestTaskInfo(self, taskid):
""" Generates a new RequestTaskInfo query
:param taskid: the taskid
:type taskid: string
:returns: A RequestTaskInfo query object
:rtype: RequestJobInfo
"""
return RequestJobInfo(taskid)
|
import os
from pathlib import Path
os.environ['PATH'] = str((Path(__file__).parent.parent.parent / "lib").resolve()) + os.pathsep + os.environ['PATH']
|
"""
Configure a --skip-agent command line argument for py.test that skips agent-
dependent tests.
"""
import pytest
def pytest_addoption(parser):
parser.addoption(
"--skip-agent", action="store_true", help="run agent integration tests"
)
def pytest_configure(config):
config.addinivalue_line(
"markers", "agent: marks tests which require the blackfynn agent"
)
def pytest_runtest_setup(item):
if "agent" in item.keywords and item.config.getoption("--skip-agent"):
pytest.skip("Skipping agent tests")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BusinessTime import BusinessTime
class ServiceBusinessHours(object):
def __init__(self):
self._business_date = None
self._business_time = None
@property
def business_date(self):
return self._business_date
@business_date.setter
def business_date(self, value):
if isinstance(value, list):
self._business_date = list()
for i in value:
self._business_date.append(i)
@property
def business_time(self):
return self._business_time
@business_time.setter
def business_time(self, value):
if isinstance(value, list):
self._business_time = list()
for i in value:
if isinstance(i, BusinessTime):
self._business_time.append(i)
else:
self._business_time.append(BusinessTime.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.business_date:
if isinstance(self.business_date, list):
for i in range(0, len(self.business_date)):
element = self.business_date[i]
if hasattr(element, 'to_alipay_dict'):
self.business_date[i] = element.to_alipay_dict()
if hasattr(self.business_date, 'to_alipay_dict'):
params['business_date'] = self.business_date.to_alipay_dict()
else:
params['business_date'] = self.business_date
if self.business_time:
if isinstance(self.business_time, list):
for i in range(0, len(self.business_time)):
element = self.business_time[i]
if hasattr(element, 'to_alipay_dict'):
self.business_time[i] = element.to_alipay_dict()
if hasattr(self.business_time, 'to_alipay_dict'):
params['business_time'] = self.business_time.to_alipay_dict()
else:
params['business_time'] = self.business_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ServiceBusinessHours()
if 'business_date' in d:
o.business_date = d['business_date']
if 'business_time' in d:
o.business_time = d['business_time']
return o
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import os, sys, argparse
import math
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from torch.utils.tensorboard import SummaryWriter
from data import *
from utils.augmentations import YOLACTAugmentation, BaseTransform
#from utils.functions import MovingAverage, SavePath
#from utils.logger import Log
from layers.modules import MultiBoxLoss
from yolact import Yolact
# Oof
import eval as eval_script
# global value to record the best val loss and COCO mask AP
best_val_loss = float('inf')
best_AP = 0.0
def set_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def train(args, epoch, model, criterion, device, train_loader, optimizer, summary_writer):
# all the possible loss type in MultiBoxLoss
loss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S', 'I']
loss_dict = {}
epoch_loss = 0.0
model.train()
tbar = tqdm(train_loader)
for i, (images, labels) in enumerate(tbar):
# calculate iteration from epoch and steps
iteration = epoch * len(train_loader) + i
# Warm up by linearly interpolating the learning rate from some smaller value
if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:
set_lr(optimizer, (cfg.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)
# Adjust the learning rate at the given iterations, but also if we resume from past that iteration
for j in range(len(cfg.lr_steps)):
if iteration >= cfg.lr_steps[j]:
set_lr(optimizer, cfg.lr * (cfg.gamma ** (j+1)))
break
#while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:
#step_index += 1
#set_lr(optimizer, args.lr * (args.gamma ** step_index))
# forward propagation
optimizer.zero_grad()
targets, masks, num_crowds = labels
images = images.to(device)
targets = [target.to(device) for target in targets]
masks = [mask.to(device) for mask in masks]
preds = model(images)
# calculate loss, here losses is a dict of loss tensors
losses = criterion(model, preds, targets, masks, num_crowds)
losses = { k: (v).mean() for k,v in losses.items() } # Mean here because Dataparallel
loss = sum([losses[k] for k in losses])
# backward propagation
loss.backward() # Do this to free up vram even if loss is not finite
if torch.isfinite(loss).item():
optimizer.step()
# collect loss and accuracy
batch_loss = loss.item()
epoch_loss += batch_loss
# sum up for each loss type
for loss_type, loss_value in losses.items():
if loss_type not in loss_dict:
loss_dict[loss_type] = (loss_value).item()
else:
loss_dict[loss_type] += (loss_value).item()
# prepare loss display labels
loss_labels = sum([[k, v/(i + 1)] for k,v in loss_dict.items()], [])
tbar.set_description((('%s:%.2f |' * len(losses)) + ' Train loss:%.2f')
% tuple(loss_labels + [epoch_loss/(i + 1)]))
# log train loss
summary_writer.add_scalar('train loss', batch_loss, iteration)
# decay learning rate every epoch
#if lr_scheduler:
#lr_scheduler.step()
def validate(args, epoch, step, model, criterion, device, val_loader, log_dir, summary_writer):
global best_val_loss
# all the possible loss type in MultiBoxLoss
loss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S', 'I']
loss_dict = {}
val_loss = 0.0
# Due to the YOLACT model output is different under
# 'train' and 'eval' mode, we couldn't use model.eval()
# here to get val loss. 'train' model is kept but gradient
# will not be collected
#model.eval()
with torch.no_grad():
tbar = tqdm(val_loader)
for i, (images, labels) in enumerate(tbar):
# forward propagation
targets, masks, num_crowds = labels
images = images.to(device)
targets = [target.to(device) for target in targets]
masks = [mask.to(device) for mask in masks]
preds = model(images)
# calculate loss, here losses is a dict of loss tensors
losses = criterion(model, preds, targets, masks, num_crowds)
losses = { k: (v).mean() for k,v in losses.items() } # Mean here because Dataparallel
loss = sum([losses[k] for k in losses])
# collect val loss
val_loss += loss.item()
# sum up for each loss type
for loss_type, loss_value in losses.items():
if loss_type not in loss_dict:
loss_dict[loss_type] = (loss_value).item()
else:
loss_dict[loss_type] += (loss_value).item()
# prepare loss display labels
loss_labels = sum([[k, v/(i + 1)] for k,v in loss_dict.items()], [])
tbar.set_description((('%s:%.2f |' * len(losses)) + ' Val loss:%.2f')
% tuple(loss_labels + [val_loss/(i + 1)]))
val_loss /= len(val_loader)
print('Validate set: Average loss: {:.4f}'.format(val_loss))
# log validation loss and accuracy
summary_writer.add_scalar('val loss', val_loss, step)
# save checkpoint with best val loss
if val_loss < best_val_loss:
os.makedirs(log_dir, exist_ok=True)
checkpoint_dir = os.path.join(log_dir, 'ep{epoch:03d}-val_loss{val_loss:.3f}.pth'.format(epoch=epoch+1, val_loss=val_loss))
torch.save(model, checkpoint_dir)
print('Epoch {epoch:03d}: val_loss improved from {best_val_loss:.3f} to {val_loss:.3f}, saving model to {checkpoint_dir}'.format(epoch=epoch+1, best_val_loss=best_val_loss, val_loss=val_loss, checkpoint_dir=checkpoint_dir))
best_val_loss = val_loss
else:
print('Epoch {epoch:03d}: val_loss did not improve from {best_val_loss:.3f}'.format(epoch=epoch+1, best_val_loss=best_val_loss))
def evaluate(args, epoch, model, device, dataset, log_dir):
global best_AP
with torch.no_grad():
model.eval()
print("Computing validation mAP (this may take a while)...", flush=True)
eval_info = eval_script.evaluate(model, dataset, device, train_mode=True)
model.train()
# check COCO mask AP to store best checkpoint
eval_AP = eval_info['mask']['all']
# save checkpoint with best mask AP
if eval_AP > best_AP:
os.makedirs(log_dir, exist_ok=True)
checkpoint_dir = os.path.join(log_dir, 'ep{epoch:03d}-eval_AP{eval_AP:.3f}.pth'.format(epoch=epoch+1, eval_AP=eval_AP))
torch.save(model, checkpoint_dir)
print('Epoch {epoch:03d}: eval_AP improved from {best_AP:.3f} to {eval_AP:.3f}, saving model to {checkpoint_dir}'.format(epoch=epoch+1, best_AP=best_AP, eval_AP=eval_AP, checkpoint_dir=checkpoint_dir))
best_AP = eval_AP
else:
print('Epoch {epoch:03d}: eval_AP did not improve from {best_AP:.3f}'.format(epoch=epoch+1, best_AP=best_AP))
class ModelLoss(nn.Module):
"""
A wrapper for running the network and computing the loss
This is so we can more efficiently use DataParallel.
"""
def __init__(self, model:Yolact, criterion:MultiBoxLoss):
super().__init__()
self.model = model
self.criterion = criterion
def forward(self, images, targets, masks, num_crowds):
preds = self.model(images)
losses = self.criterion(self.model, preds, targets, masks, num_crowds)
return losses
def main():
parser = argparse.ArgumentParser(description='Yolact Training Script')
# Model definition options
parser.add_argument('--config', type=str, required=False, default=None,
help='The config object to use.')
# Data options
parser.add_argument('--dataset', type=str, required=False, default=None,
help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')
# Training settings
parser.add_argument('--batch_size', type=int, required=False, default=8,
help = "batch size for train, default=%(default)s")
parser.add_argument('--lr', type=float, required=False, default=None,
help = "Initial learning rate. Leave as None to read this from the config. default=%(default)s")
parser.add_argument('--momentum', type=float, required=False, default=None,
help='Momentum for SGD. Leave as None to read this from the config.')
parser.add_argument('--decay', type=float, required=False, default=None,
help='Weight decay for SGD. Leave as None to read this from the config.')
#parser.add_argument('--gamma', type=float, required=False, default=None,
#help='For each lr step, what to multiply the lr by. Leave as None to read this from the config.')
parser.add_argument('--num_epoch', type=int,required=False, default=100,
help = "Number of training epochs, default=%(default)s")
parser.add_argument('--validation_size', type=int, required=False, default=5000,
help='The number of images to use for validation.')
parser.add_argument('--validation_epoch', type=int, required=False, default=2,
help='Output validation information every n iterations. If -1, do no validation.')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
# Evaluation options
parser.add_argument('--eval_online', default=False, action="store_true",
help='Whether to do evaluation on validation dataset during training')
parser.add_argument('--eval_epoch_interval', type=int, required=False, default=5,
help = "Number of iteration(epochs) interval to do evaluation, default=%(default)s")
parser.add_argument('--save_eval_checkpoint', default=False, action="store_true",
help='Whether to save checkpoint with best evaluation result')
args = parser.parse_args()
log_dir = os.path.join('logs', '000')
if args.config is not None:
set_cfg(args.config)
if args.dataset is not None:
set_dataset(args.dataset)
# Update training parameters from the config if necessary
def replace(name):
if getattr(args, name) == None: setattr(args, name, getattr(cfg, name))
replace('lr')
replace('decay')
#replace('gamma')
replace('momentum')
# create running device
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
torch.manual_seed(1)
# prepare train&val dataset loader
train_dataset = COCOInstanceSegmentation(image_path=cfg.dataset.train_images,
info_file=cfg.dataset.train_info,
transform=YOLACTAugmentation(MEANS))
train_loader = data.DataLoader(train_dataset, args.batch_size,
num_workers=4,
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
if args.validation_epoch > 0:
# setup eval script config
eval_script.parse_args(['--max_images='+str(args.validation_size)])
val_dataset = COCOInstanceSegmentation(image_path=cfg.dataset.valid_images,
info_file=cfg.dataset.valid_info,
transform=BaseTransform(MEANS))
val_loader = data.DataLoader(val_dataset, args.batch_size,
num_workers=4,
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
# get tensorboard summary writer
summary_writer = SummaryWriter(os.path.join(log_dir, 'tensorboard'))
model = Yolact().to(device)
model.train()
model.init_weights(backbone_path='weights/' + cfg.backbone.path)
# optimizer and loss
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.decay)
criterion = MultiBoxLoss(num_classes=cfg.num_classes,
pos_threshold=cfg.positive_iou_threshold,
neg_threshold=cfg.negative_iou_threshold,
negpos_ratio=cfg.ohem_negpos_ratio).to(device)
#train_model = ModelLoss(model, criterion).to(device)
# Initialize everything
if not cfg.freeze_bn: model.freeze_bn() # Freeze bn so we don't kill our means
model(torch.zeros(1, 3, cfg.max_size, cfg.max_size).to(device))
if not cfg.freeze_bn: model.freeze_bn(True)
print(model)
# Train loop
for epoch in range(args.num_epoch):
print('Epoch %d/%d'%(epoch, args.num_epoch))
train(args, epoch, model, criterion, device, train_loader, optimizer, summary_writer)
validate(args, epoch, epoch*len(train_loader), model, criterion, device, val_loader, log_dir, summary_writer)
if args.eval_online and (epoch+1) % args.eval_epoch_interval == 0:
# Do eval every eval_epoch_interval epochs
evaluate(args, epoch, model, device, val_dataset, log_dir)
# Finally store model
torch.save(model, os.path.join(log_dir, 'trained_final.pth'))
if __name__ == '__main__':
main()
|
def neighbord_analysis(x_as, column = 0):
"""
Given an array xas this function compute the distance between the elements the mean distance and the variance
Author: Michele Monti
Args:
x_as: the name of the list or data set that you want:
Kwargs:
column: is the column of the data set that you need to analyze
Returns:
mean_distance: the mean distance between neighbords,
std_dev: stdeviation of the distances between neighbords.
diff_neighbor: the difference between the first-neighbours in a list
"""
x_as = np.array(x_as)
correct_axis = x_as
if shape(x_as) > 1:
correct_axis = x_as[:,column]
diff_neighbor = [itm - correct_axis[idx - 1] for idx, itm in enumerate(correct_axis)][1:]
mean_distance = np.mean(diff_neighbor)
std_dev = np.std(diff_neighbor)
return(diff_neighbor, mean_distance, std_dev)
|
# -*- coding: utf-8 -*-
from ...register import Registry
from ..aws_resources import AwsResourceSearcher
class AwsResourceSearcherRegistry(Registry):
def get_key(self, obj):
"""
:type obj: AwsResourceSearcher
:rtype: str
"""
return obj.id
aws_res_sr_registry = AwsResourceSearcherRegistry()
reg = aws_res_sr_registry
# --- Register your AWS Resource Searcher here ---
from .ec2_instances import ec2_instances_searcher
from .ec2_securitygroups import ec2_securitygroups_searcher
from .ec2_amis import ec2_amis_searcher
from .ec2_volumes import ec2_volumns_searcher
from .vpc_vpcs import vpc_vpcs_searcher
from .vpc_subnets import vpc_subnets_searcher
from .iam_roles import iam_roles_searcher
from .iam_policies import iam_policies_searcher
from .iam_users import iam_users_searcher
from .glue_databases import glue_databases_searcher
from .glue_tables import glue_tables_searcher
from .glue_jobs import glue_job_searcher
from .glue_awsgluestudio import glue_studiojob_searcher
from .s3_buckets import s3_bucket_searcher
from .lambda_functions import lambda_functions_searcher
from .lambda_layers import lambda_layers_searcher
from .lakeformation_databases import lakeformation_databases_searcher
from .lakeformation_tables import lakeformation_tables_searcher
from .cloudformation_stacks import cloudformation_stacks_searcher
from .dynamodb_tables import dynamodb_tables_searcher
from .dynamodb_items import dynamodb_items_searcher
from .rds_databases import rds_databases_searcher
from .sqs_queues import sqs_queues_searcher
from .kms_awsmanagedkeys import kms_awsmanagedkeys_searcher
from .kms_customermanagedkeys import kms_customermanagedkeys_searcher
from .secretsmanager_secrets import secretmanager_secrets_searcher
from .cloud9_environments import cloud9_environments_searcher
from .cloud9_ide import cloud9_ide_searcher
reg.check_in(ec2_instances_searcher)
reg.check_in(ec2_securitygroups_searcher)
reg.check_in(ec2_amis_searcher)
reg.check_in(ec2_volumns_searcher)
reg.check_in(vpc_vpcs_searcher)
reg.check_in(vpc_subnets_searcher)
reg.check_in(iam_roles_searcher)
reg.check_in(iam_policies_searcher)
reg.check_in(iam_users_searcher)
reg.check_in(glue_databases_searcher)
reg.check_in(glue_tables_searcher)
reg.check_in(glue_job_searcher)
reg.check_in(glue_studiojob_searcher)
reg.check_in(s3_bucket_searcher)
reg.check_in(lambda_functions_searcher)
reg.check_in(lambda_layers_searcher)
reg.check_in(lakeformation_databases_searcher)
reg.check_in(lakeformation_tables_searcher)
reg.check_in(cloudformation_stacks_searcher)
reg.check_in(dynamodb_tables_searcher)
reg.check_in(dynamodb_items_searcher)
reg.check_in(rds_databases_searcher)
reg.check_in(sqs_queues_searcher)
reg.check_in(kms_awsmanagedkeys_searcher)
reg.check_in(kms_customermanagedkeys_searcher)
reg.check_in(secretmanager_secrets_searcher)
reg.check_in(cloud9_environments_searcher)
reg.check_in(cloud9_ide_searcher)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://funmatu.wordpress.com/2017/06/01/pyautogui%EF%BC%9Fpywinauto%EF%BC%9F/
import os
import time
import numpy as np
import cv2
if (os.name == 'nt'):
import win32gui
import win32api
import win32ui
import win32con
from PIL import Image
class win_shot_class:
def __init__(self, ):
#os.mkdir('temp')
#os.mkdir('temp/_work')
#self.workPath = 'temp/_work/'
#self.proc_seq = 0
pass
def screenshot(self, ):
#self.proc_seq += 1
#if (self.proc_seq > 99):
# self.proc_seq = 1
#seq2 = '{:02}'.format(self.proc_seq)
#filename = self.workPath + 'screenshot.' + seq2 + '.bmp'
try:
SM_XVIRTUALSCREEN = 76
SM_YVIRTUALSCREEN = 77
SM_CXVIRTUALSCREEN = 78
SM_CYVIRTUALSCREEN = 79
w = win32api.GetSystemMetrics(SM_CXVIRTUALSCREEN)
h = win32api.GetSystemMetrics(SM_CYVIRTUALSCREEN)
l = win32api.GetSystemMetrics(SM_XVIRTUALSCREEN)
t = win32api.GetSystemMetrics(SM_YVIRTUALSCREEN)
hwnd = win32gui.GetDesktopWindow()
hwndDC = win32gui.GetWindowDC(hwnd)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
saveDC.BitBlt((0, 0), (w, h), mfcDC, (l, t), win32con.SRCCOPY)
bmpinfo = saveBitMap.GetInfo()
bmpstr = saveBitMap.GetBitmapBits(True)
img = Image.frombuffer('RGB', (bmpinfo['bmWidth'], bmpinfo['bmHeight']), bmpstr, 'raw', 'BGRX', 0, 1)
return img
except:
return False
def windowshot(self, ):
try:
SM_CXVIRTUALSCREEN = 78
SM_CYVIRTUALSCREEN = 79
full_w = win32api.GetSystemMetrics(SM_CXVIRTUALSCREEN)
full_h = win32api.GetSystemMetrics(SM_CYVIRTUALSCREEN)
hwnd = win32gui.GetForegroundWindow()
#hwnd = win32gui.WindowFromPoint(win32gui.GetCursorPos())
w0, h0, w1, h1 = win32gui.GetWindowRect(hwnd)
w = w1 - w0
h = h1 - h0
if (w == full_w) and (h == full_h):
return None
hwndDC = win32gui.GetWindowDC(hwnd)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
saveDC.BitBlt((0, 0), (w, h), mfcDC, (0, 0), win32con.SRCCOPY)
bmpinfo = saveBitMap.GetInfo()
bmpstr = saveBitMap.GetBitmapBits(True)
img = Image.frombuffer('RGB', (bmpinfo['bmWidth'], bmpinfo['bmHeight']), bmpstr, 'raw', 'BGRX', 0, 1)
return img
except:
return False
def pil2cv(self, pil_image=None):
try:
cv2_image = np.array(pil_image, dtype=np.uint8)
if (cv2_image.ndim == 2): # モノクロ
pass
elif (cv2_image.shape[2] == 3): # カラー
cv2_image = cv2.cvtColor(cv2_image, cv2.COLOR_RGB2BGR)
elif (cv2_image.shape[2] == 4): # 透過
cv2_image = cv2.cvtColor(cv2_image, cv2.COLOR_RGBA2BGRA)
return cv2_image
except:
pass
return None
if __name__ == '__main__':
win_shot = win_shot_class()
out_width = 640
out_height = 480
chktime = time.time()
while ((time.time() - chktime) <= 3600):
frm = win_shot.screenshot()
#frm = win_shot.windowshot()
if (not frm is None):
image_img = win_shot.pil2cv(frm)
if (not image_img is None):
image_height, image_width = image_img.shape[:2]
proc_width = image_width
proc_height = image_height
if (proc_width > out_width):
proc_width = out_width
proc_height = int(proc_width * image_height / image_width)
if (proc_width != image_width ) \
or (proc_height != image_height):
proc_img = cv2.resize(image_img, (proc_width, proc_height))
else:
proc_img = image_img.copy()
proc_height, proc_width = proc_img.shape[:2]
cv2.imshow("", proc_img)
cv2.waitKey(1)
time.sleep(1.00)
cv2.destroyAllWindows()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.