text
stringlengths 8
6.05M
|
|---|
# Basics of Python
# No need to declare a data type
my_name = "Naima"
print(my_name)
# Variables are case sensitive and use Snake case; Pascal case is used for classes
My_name = "Not Naima"
print(My_name)
# Can assign a variable with the value of another
name = "Jennifer"
jennifer = name
print(jennifer)
# Print multiple times
person = "John"
print(person * 5)
# Can use single or double quotes for strings
print('How\'s it going?')
print("How's it going?")
# Integers also don't need to be declared
numbers = 22
print(numbers)
# Int function can be used to parse to integers
number_in_string = '22'
print(int(number_in_string))
|
class Priority:
def __init__(self, higher_priority=[], lower_priority=[], non_colliding=[]):
self.higher_priority = higher_priority
self.lower_priority = lower_priority
self.non_colliding = non_colliding
def get_higher_priority(self):
return self.higher_priority
def get_lower_priority(self):
return self.lower_priority
def get_non_colliding(self):
return self.non_colliding
|
# Generated by Django 2.2.13 on 2020-07-17 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0012_auto_20200717_2055'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='Image',
field=models.ImageField(default='/static/img/default-user.png', upload_to='images/users'),
),
]
|
from django.shortcuts import render, redirect, get_object_or_404
from django.core.urlresolvers import reverse
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.views import generic
from django.utils.decorators import method_decorator
from django.contrib.auth.mixins import LoginRequiredMixin
from django.utils import timezone
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.generic.edit import FormMixin
from .models import Seller, Product
from .forms import UserForm, SellerForm, LoginForm, ProductForm, FilterForm
class IndexView(FormMixin, generic.ListView):
template_name = 'products/index.html'
filter_form = FilterForm
form_class = FilterForm
context_object_name = 'products_list'
def get_queryset(self):
request_elements = [value for key, value in self.request.GET.items()
if len(value)>0
and key != 'items_per_page'
and key != 'page']
query_set = Product.objects
if len(request_elements)>0:
filter_form = self.filter_form(self.request.GET)
if filter_form.is_valid():
cd = filter_form.cleaned_data
if cd['category']:
category = cd['category']
query_set = query_set.filter(category=category)
else: pass
if cd['min_price']:
min_price = cd['min_price']
query_set = query_set.filter(price__gt=min_price)
else: pass
if cd['max_price']:
max_price = cd['max_price']
query_set = query_set.filter(price__lt=max_price)
else: pass
if cd['geolocation']:
geolocation = cd['geolocation']
query_set = query_set.filter(seller__address=geolocation)
else: pass
return query_set
else:
query_set = query_set.all()
filter_form = self.filter_form()
self.queryset = query_set
return query_set
def get_paginate_by(self, queryset):
try:
self.paginate_by = int(self.request.GET['items_per_page'])
except KeyError:
self.paginate_by = 5
return self.paginate_by
def get_boundform(self):
filter_form = self.filter_form(self.request.GET)
self.form = filter_form
return self.form
def get(self, request):
self.form = self.get_boundform()
self.object_list = self.get_queryset()
context = self.get_context_data(object_list=self.object_list)
context['filter_form'] = self.form
#print(context)
return render(request, self.template_name, context)
def filterProducts(request, productFilter):
response = "You're at products which belong to the %s category."
return HttpResponse(response % productFilter)
class DetailProduct(generic.DetailView):
def get(self, request, product_id):
product = Product.objects.get(pk=product_id)
return render(request, 'products/detail_product.html', {'product': product, 'user': request.user})
def buyProduct(request, product_id):
return HttpResponse("You've purchased product %s" % product_id)
def register(request):
#context = RequestContext(request)
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
seller_form = SellerForm(data=request.POST)
if user_form.is_valid() and seller_form.is_valid():
user = user_form.save()
user.set_password(user.password) #hashes the password with the set_password method
user.save()
seller = seller_form.save()
seller.user = user
registered = True
#seller = authenticate(username=user.username, password=user.password)
#print(seller, ' for %s and %s' % (user.username, user.password))
#login(request, seller)
if user.first_name:
firstName = user.first_name
else:
firstName = user.username
return redirect('products:loggedIn', user=user)
else:
user_form = UserForm()
seller_form = SellerForm()
return render(request, 'products/registration.html',
{'user_form': user_form, 'seller_form': seller_form, 'registered': registered}
)
class Login(generic.View):
seller_form = LoginForm
template_name = 'products/login.html'
def get(self, request, *args, **kwargs):
seller_form = self.seller_form()
return render(request, self.template_name, {'seller_form': seller_form})
def post(self, request, *args, **kwargs):
seller_form = self.seller_form(data=request.POST)
if seller_form.is_valid():
cd = seller_form.cleaned_data
username = cd['username']
pwd = cd['password']
seller = authenticate(username=username, password=pwd)
print('seller is ', seller)
if seller is not None:
if seller.is_active:
print('Account is active')
login(request, seller)
print(seller.username)
return redirect('/products/loggedin/')
else:
print('Accunt is inactive')
# Write later something more touchy here
return HttpResponse("Your seller account has been disabled")
else:
print('Something went wrong with the input data, seller is None')
print(request.POST['username'], requsest.POST['set_password'])
return render(request, 'products/login.html',
{'seller_form': seller_form, 'error_message': "username or password wrong"}
)
else:
print('Something went really wrong with the input data, data is invalid')
return render(request, 'products/login.html',
{'error_message': "Please introduce valid data"}
)
def logout_view(request):
logout(request)
return redirect('/products/')
class LoggedIn(LoginRequiredMixin, generic.View):
login_url = "/products/login/"
permission_denied_message = 'Please provide a valid username and password'
def get(self, request):
seller = Seller.objects.get(user=request.user)
products_list = seller.product_set.all().order_by('-created')
return render(request, 'products/loggedin.html', {'user': request.user,
'products_list': products_list}
)
@method_decorator(login_required(login_url=login_url))
def dispatch(self, *args, **kwargs):
return super(LoggedIn, self).dispatch(*args, **kwargs)
@login_required(login_url='/products/login/')
def loggedIn(request):
seller = Seller.objects.get(username=user.username)
product_list = seller.product_set.all().order_by('-created')
return render(request, 'products/loggedin.html', {'user': request.user, 'products_list': products_list})
@login_required
def filterSellerItems(request, option):
if option == 'sold':
return
elif option == 'unsold':
return
else:
return HttpResponse("In here you'll soon see the items that you've added to the site!")
def detailSellerProduct(request, product_id):
return HttpResponse("You're looking at product %s" % product_id)
def removeProduct(request, product_id):
target_product = Product.objects.get(pk=product_id)
target_product.delete()
return redirect('/products/loggedin/')
class AddProduct(LoginRequiredMixin, generic.View):
login_url = "/products/login/"
product_form = ProductForm
template_name = 'products/add_product.html'
def get(self, request, *args, **kwargs):
product_form = self.product_form()
return render(request, self.template_name, {'product_form': product_form})
def post(self, request, *args, **kwargs):
product_form = self.product_form(data=request.POST)
if product_form.is_valid():
product = product_form.save(commit=False)
product.created = timezone.now()
seller = Seller.objects.get(user=request.user)
product.seller = seller
product.geolocation = product.seller.address
product.save()
return redirect('/products/loggedin/')
else: return render(request,
template_name,
{'product_form': product_form,
'error_message': 'Please introduce valid data'}
)
class EditProduct(LoginRequiredMixin, generic.View):
login_url = "/products/login/"
product_form = ProductForm
def get(self, request, product_id):
product = Product.objects.get(pk=product_id)
product_form = self.product_form(initial={
'title': product.title,
'category': product.category,
'price': product.price,
'description': product.description
})
return render(request, 'products/edit_product.html', {'product_form': product_form})
def post(self, request, product_id):
product = Product.objects.get(pk=product_id)
product_form = self.product_form(data=request.POST)
if product_form.is_valid():
cd = product_form.cleaned_data
product.title = cd['title']
product.category = cd['category']
product.price = cd['price']
product.description = cd['description']
product.save()
return redirect('/products/product/' + product_id)
|
#!/usr/bin/python3
import argparse
import json
import re
import platform
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--skip-arch', nargs=1, action='append', default=[])
parser.add_argument('--parse-only', action='store_true')
parser.add_argument('--push', action='store_true')
parser.add_argument('cross_file', nargs='+')
args = parser.parse_args()
assert len(args.cross_file) > 0
archs = [
('amd64', 'x86_64'),
('arm64', 'aarch64')
]
for skip in args.skip_arch:
for arch in archs:
if skip[0] in arch:
archs.remove(arch)
break
current_platform = platform.machine()
def parse_options(doc):
# extract options
options = {}
for line in doc.splitlines():
match = re.match(re.compile(r'#:\s*(\S+)\s*=\s*(\S+)\s*?$'), line)
if match:
options[match.group(1)] = match.group(2)
return options
def parse_file(cross_file, arch):
assert isinstance(arch, tuple)
with open(cross_file, 'r') as f:
template = f.read()
if current_platform == arch[1]:
# native build
template = re.sub(re.compile(r'^.*__CROSS_COPY.*$', re.MULTILINE), '', template)
else:
template = re.sub(re.compile(r'^(.*)__CROSS_COPY(.*)$', re.MULTILINE), r'\1COPY\2', template)
template = re.sub(re.compile(r'__BASEIMAGE_ARCH__', re.MULTILINE), arch[0], template)
template = re.sub(re.compile(r'__QEMU_ARCH__', re.MULTILINE), arch[1], template)
options = parse_options(template)
full_name = os.path.splitext(cross_file)
filename = full_name[0] + "." + arch[0]
options['filename'] = filename
with open(filename, 'w') as f:
f.write(template)
return options
def main():
options = []
for cross in args.cross_file:
ext = os.path.splitext(cross)[1]
if ext == ".cross":
print(f"Parsing .cross file: {cross}")
for arch in archs:
print(f"> parsing {arch[0]}")
options.append(parse_file(cross, arch))
else:
print(f"Adding non-cross file: {cross}")
with open(cross, 'r') as f:
o = parse_options(f.read())
o['filename'] = cross
options.append(o)
print("===============")
if args.parse_only:
sys.exit(0)
print("Building docker images ...")
dir_path = os.path.dirname(os.path.realpath(__file__))
exec_path = os.path.join(dir_path, '..')
for option in options:
print(f"Building {option['filename']}")
ret = os.system(f"docker build {exec_path} -t {option['TAG']} --squash --compress --rm -f {os.path.join(dir_path, option['filename'])}")
if ret != 0:
print(f"Building {option['filename']} failed.")
sys.exit(ret)
if args.push:
for option in options:
print(f"Pushing {option['TAG']}")
ret = os.system(f"docker push {option['TAG']}")
if ret != 0:
print(f"Building {option['filename']} failed.")
sys.exit(ret)
if __name__ == '__main__':
main()
|
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.hashers import make_password
from rest_framework.utils import json
from rest_framework.views import APIView
from rest_framework.response import Response
import requests
from rest_framework_simplejwt.tokens import RefreshToken
from django.contrib.auth.models import User
import os
from rest_framework.permissions import IsAuthenticated
from django.conf import settings
from rest_framework import status
FACEBOOK_DEBUG_TOKEN_URL = "https://graph.facebook.com/debug_token"
FACEBOOK_ACCESS_TOKEN_URL = "https://graph.facebook.com/v8.0/oauth/access_token"
FACEBOOK_URL = "https://graph.facebook.com/"
class SignInView(APIView):
def get(self, request):
# get users access token from code in the facebook login dialog redirect
# https://graph.facebook.com/v7.0/oauth/access_token?client_id={your-facebook-apps-id}&redirect_uri=http://localhost:8000/login/&client_secret={app_secret}&code={code-generated-from-login-result}
user_access_token_payload = {
"client_id": settings.FACEBOOK_APP_ID,
"redirect_uri": "http://localhost:3000/login",
"client_secret": settings.FACEBOOK_APP_SECRET,
"code": request.query_params.get("code"),
}
user_access_token_request = requests.get(
FACEBOOK_ACCESS_TOKEN_URL, params=user_access_token_payload
)
user_access_token_response = json.loads(user_access_token_request.text)
print(user_access_token_response)
if "error" in user_access_token_response:
user_access_token_error = {
"message": "wrong facebook access token / this facebook access token is already expired."
}
return Response(user_access_token_error, status=status.HTTP_400_BAD_REQUEST)
user_access_token = user_access_token_response["access_token"]
# get developers access token
# https://graph.facebook.com/v7.0/oauth/access_token?client_id={your-app-id}&client_secret={your-app-secret}&grant_type=client_credentials
developers_access_token_payload = {
"client_id": settings.FACEBOOK_APP_ID,
"client_secret": settings.FACEBOOK_APP_SECRET,
"grant_type": "client_credentials",
}
developers_access_token_request = requests.get(
FACEBOOK_ACCESS_TOKEN_URL, params=developers_access_token_payload
)
developers_access_token_response = json.loads(
developers_access_token_request.text
)
if "error" in developers_access_token_response:
developers_access_token_error = {
"message": "Invalid request for access token."
}
return Response(developers_access_token_error, status=status.HTTP_400_BAD_REQUEST)
developers_access_token = developers_access_token_response["access_token"]
# inspect the users access token --> validate to make sure its still valid
# https://graph.facebook.com/debug_token?input_token={token-to-inspect}&access_token={app-token-or-admin-token}
verify_user_access_token_payload = {
"input_token": user_access_token,
"access_token": developers_access_token,
}
verify_user_access_token_request = requests.get(
FACEBOOK_DEBUG_TOKEN_URL, params=verify_user_access_token_payload
)
verify_user_access_token_response = json.loads(
verify_user_access_token_request.text
)
if "error" in verify_user_access_token_response:
verify_user_access_token_error = {
"message": "Could not verifying user access token."
}
return Response(verify_user_access_token_error)
user_id = verify_user_access_token_response["data"]["user_id"]
# get users email
# https://graph.facebook.com/{your-user-id}?fields=id,name,email&access_token={your-user-access-token}
user_info_url = FACEBOOK_URL + user_id
user_info_payload = {
"fields": "id,name,email",
"access_token": user_access_token,
}
user_info_request = requests.get(
user_info_url, params=user_info_payload)
user_info_response = json.loads(user_info_request.text)
users_email = user_info_response["email"]
# create user if not exist
try:
user = User.objects.get(email=user_info_response["email"])
except User.DoesNotExist:
user = User()
user.username = user_info_response["email"]
# provider random default password
user.password = make_password(
BaseUserManager().make_random_password())
user.email = user_info_response["email"]
user.save()
token = RefreshToken.for_user(
user
) # generate token without username & password
response = {}
response["username"] = user.username
response["access"] = str(token.access_token)
response["refresh"] = str(token)
return Response(response)
|
import os
os.environ['SPARK_HOME'] = "/application/hadoop/app/spark_on_yarn/"
os.environ['JAVA_HOME'] = "/application/hadoop/app/jdk/"
os.environ['HADOOP_CONF_DIR'] = "/application/hadoop/app/hadoop/etc/hadoop"
import findspark
findspark.init()
from pyspark import SparkConf
from pyspark import SparkContext
if __name__ == "__main__":
conf = SparkConf().setAppName("hu1").setMaster("yarn")
sc = SparkContext(conf=conf)
data = [1, 2, 3, 4, 5]
distData = sc.parallelize(data)
sum_num = distData.reduce(lambda a, b: a + b)
print(sum_num)
distFile = sc.textFile("README.md")
sum_num = distFile.map(lambda s: len(s)).reduce(lambda a, b: a + b)
print(sum_num)
|
from django.contrib.auth.models import User
from products.models import Reviews
from django import forms
class AllProductDetailes(forms.ModelForm):
class Meta:
model = User
fields = '__all__'
class ReviewForm(forms.ModelForm):
class Meta:
model = Reviews
fields = '__all__'
|
# Fibonacci Sequence Code In Python
# Copyright © 2019, Sai K Raja, All Rights Reserved
x = input("What iteration/root in the fibonacci seqeunce do you want?")
print("0") #Starting Fibonacci Sequence with a zero (optional)
def fibonacci_sequence(a): #Creating Fibonacci Sequence Function
if a == 1: #Starting value (after zero) for Fibonacci Sequence
return 1
if a == 2: #Second value (after zero) in Fibonacci Sequence
return 1
#The first two values in the Fibonacci Sequence are 1 and 1.
return fibonacci_sequence(a-1) + fibonacci_sequence(a-2)
#This command builds on the new infinite values after 1 and 1 (the starting two values).
for i in range(1, int(x)):
print(fibonacci_sequence(i))
#This prints the fibonacci sequence for the n-th value inputted by the user.
print("The last number shown is the " + str(x) + "th root of the fibonacci seqeunce.") #Answer message
input("Thank you for using my fibonacci seqeunce program. if you would like to continue, press enter and run the program again.")
#Thanks and exit message
|
import pandas as pd
from test_grades import test_grades
# author: Kaiwen Liu
'''q4'''
def test_restaurant_grades(df_resturant,camis_id):
# this function returns value for each resturant with function test_grades
df_eachresturant=df_resturant.ix[camis_id]
df = list(df_eachresturant['GRADE'])
return test_grades(df)
|
def dig_pow(n, p):
total = sum(int(a) ** i for i, a in enumerate(str(n), start=p))
quo, rem = divmod(total, n)
return quo if rem == 0 else -1
|
import sys
sys.setrecursionlimit(2000)
def simpleSolve(A, st, en, K, sym):
symCount = 0
for c in A[st:en]:
if c == sym:
symCount += 1
if (symCount == 0):
return 0
elif (symCount == K):
return 1
return -1
def flipFromLast(A, st, en, K):
a = list(A)
for i in range(en-K, en):
if i < 0:
continue
print i
if a[i] == '+':
a[i] = '-'
else:
a[i] = '+'
print A
print ''.join(a)
return ''.join(a)
def f(A, st, en, K):
print [A, st, en]
N = len(A[st:en])
if N == K:
return simpleSolve(A, st, en, K, '-')
lastCake = A[en-1]
if lastCake == '-':
AA = flipFromLast(A, st, en, K)
subSol = f(AA, st, en-1, K)
if subSol == -1:
return -1
return 1 + subSol
else:
subSol = f(A, st, en-1, K)
if subSol == -1:
return -1
return subSol
fn = 'A-large-practice.in'
out = 'A-large-practice.out'
fp = open(fn, 'r')
fw = open(out, 'w')
T = int(fp.readline())
for i in range(0,T):
line = fp.readline()
args = line.split()
A = args[0]
K = int(args[1])
dic = {}
res = f(A, 0, len(A), K)
if res == -1:
res = 'IMPOSSIBLE'
fw.write('Case #' + str(i+1) + ': ' + str(res) + '\n')
print "=========================================="
|
# 数据准备
#初步处理照片,识别照片中的人脸,规范化成220*220jpg文件。
import cv2
XX="CM"#类型 AF AM CF CM
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
for i in range(1,751):
print(i)
#Read the image anf BGR to GRAY
imagePath = "E:\\daxue\\graduation\\SCUT-FBP5500_v2\\Images\\"+XX+str(i)+".jpg"
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4, minSize=(30, 30))
for (x, y, w, h) in faces:
new_image = image[y:y+h, x:x+w]
print(new_image.shape)
new2_image=cv2.resize(new_image,(220,220),interpolation=cv2.INTER_CUBIC)
print(new2_image.shape)
break#默认第一个
cv2.imwrite("E:\\daxue\\graduation\\face\\" + XX + str(i) + ".jpg", new2_image)
|
# Generated by Django 2.2 on 2020-01-29 11:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='banner',
name='image',
field=models.ImageField(blank=True, help_text='图片大小16:9', null=True, upload_to='banner', verbose_name='轮播图'),
),
]
|
import pytest
from flask.testing import FlaskClient
from ajdb.structure import ActSet
def test_act_valid(client: FlaskClient, fake_db: ActSet) -> None:
response = client.get('/act/2020. évi XD. törvény')
response_str = response.data.decode('utf-8')
act = fake_db.act('2020. évi XD. törvény')
assert act.identifier in response_str
assert act.subject in response_str
assert act.preamble in response_str
assert act.article("3").paragraph("1").text in response_str
def test_snippet_valid(client: FlaskClient, fake_db: ActSet) -> None:
response = client.get('/snippet/2020. évi XD. törvény/3_1__')
response_str = response.data.decode('utf-8')
act = fake_db.act('2020. évi XD. törvény')
assert act.article("3").paragraph("1").text in response_str
assert act.article("3").paragraph("2").text not in response_str
response = client.get('/snippet/2020. évi XD. törvény/3_1-2__')
response_str = response.data.decode('utf-8')
assert act.article("3").paragraph("1").text in response_str
assert act.article("3").paragraph("2").text in response_str
response = client.get('/snippet/2020. évi XD. törvény/3___')
response_str = response.data.decode('utf-8')
assert act.article("3").paragraph("1").text in response_str
assert act.article("3").paragraph("2").text in response_str
response = client.get('/snippet/2020. évi XD. törvény/2slashA___')
response_str = response.data.decode('utf-8')
assert "2/A. §" in response_str
assert act.article("2/A").paragraph().text in response_str
response = client.get('/snippet/2020. évi XD. törvény/4__a_')
response_str = response.data.decode('utf-8')
assert "4. §" in response_str
assert act.article("4").paragraph().intro in response_str
assert act.article("4").paragraph().point("a").text in response_str
assert act.article("4").paragraph().point("b").text not in response_str
assert act.article("4").paragraph().wrap_up in response_str
INVALID_CASES = (
('/act/2020. évi XX. törvény', 404),
('/act/Fully invalid', 404), # TODO: Maybe 400?
('/snippet/2020. évi XD. törvény/5___', 404),
('/snippet/2020. évi XD. törvény/5-6___', 404),
('/snippet/2018. évi XD. törvény/3___', 404),
('/snippet/2020. évi XD. törvény/3_4-5__', 404),
('/snippet/2020. évi XD. törvény/___', 400),
('/snippet/2020. évi XD. törvény/3____', 400),
('/snippet/2020. évi XD. törvény/3__', 400),
('/snippet/2020. évi XD. törvény/INVALID', 400),
('/snippet/2020. évi XD. törvény/.____', 400),
('/snippet/2020. évi XD. törvény/1-.____', 400),
('/snippet/2020. évi XD. törvény/3_.__', 404),
('/snippet/2020. évi XD. törvény/3_1_._', 404),
('/snippet/2020. évi XD. törvény/3_5-.__', 404),
('/snippet/2020. évi XD. törvény/3_1_1-._', 404),
# Paragraph doesn't have children
('/snippet/2020. évi XD. törvény/3_1_c_', 404),
('/snippet/2020. évi XD. törvény/3_1_1_', 404),
('/snippet/2020. évi XD. törvény/3_1_c-f_', 404),
('/snippet/2020. évi XD. törvény/3_1_1-2_', 404),
)
@pytest.mark.parametrize("url,expected_code", INVALID_CASES)
def test_act_invalid(client: FlaskClient, fake_db: ActSet, url: str, expected_code: int) -> None:
_ = fake_db
response = client.get(url)
assert response.status_code == expected_code
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from pants.backend.codegen.thrift.apache.python import subsystem
from pants.backend.codegen.thrift.apache.python.additional_fields import ThriftPythonResolveField
from pants.backend.codegen.thrift.apache.python.subsystem import ThriftPythonSubsystem
from pants.backend.codegen.thrift.apache.rules import (
GeneratedThriftSources,
GenerateThriftSourcesRequest,
)
from pants.backend.codegen.thrift.target_types import ThriftDependenciesField, ThriftSourceField
from pants.backend.codegen.utils import find_python_runtime_library_or_raise_error
from pants.backend.python.dependency_inference.module_mapper import ThirdPartyPythonModuleMapping
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import PythonSourceField
from pants.engine.fs import AddPrefix, Digest, Snapshot
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.target import (
FieldSet,
GeneratedSources,
GenerateSourcesRequest,
InferDependenciesRequest,
InferredDependencies,
)
from pants.engine.unions import UnionRule
from pants.source.source_root import SourceRoot, SourceRootRequest
from pants.util.logging import LogLevel
class GeneratePythonFromThriftRequest(GenerateSourcesRequest):
input = ThriftSourceField
output = PythonSourceField
@rule(desc="Generate Python from Thrift", level=LogLevel.DEBUG)
async def generate_python_from_thrift(
request: GeneratePythonFromThriftRequest,
thrift_python: ThriftPythonSubsystem,
) -> GeneratedSources:
result = await Get(
GeneratedThriftSources,
GenerateThriftSourcesRequest(
thrift_source_field=request.protocol_target[ThriftSourceField],
lang_id="py",
lang_options=thrift_python.gen_options,
lang_name="Python",
),
)
# We must add back the source root for Python imports to work properly. Note that the file
# paths will be different depending on whether `namespace py` was used. See the tests for
# examples.
source_root = await Get(
SourceRoot, SourceRootRequest, SourceRootRequest.for_target(request.protocol_target)
)
source_root_restored = (
await Get(Snapshot, AddPrefix(result.snapshot.digest, source_root.path))
if source_root.path != "."
else await Get(Snapshot, Digest, result.snapshot.digest)
)
return GeneratedSources(source_root_restored)
@dataclass(frozen=True)
class ApacheThriftPythonDependenciesInferenceFieldSet(FieldSet):
required_fields = (ThriftDependenciesField, ThriftPythonResolveField)
dependencies: ThriftDependenciesField
python_resolve: ThriftPythonResolveField
class InferApacheThriftPythonDependencies(InferDependenciesRequest):
infer_from = ApacheThriftPythonDependenciesInferenceFieldSet
@rule
async def find_apache_thrift_python_requirement(
request: InferApacheThriftPythonDependencies,
thrift_python: ThriftPythonSubsystem,
python_setup: PythonSetup,
# TODO(#12946): Make this a lazy Get once possible.
module_mapping: ThirdPartyPythonModuleMapping,
) -> InferredDependencies:
if not thrift_python.infer_runtime_dependency:
return InferredDependencies([])
resolve = request.field_set.python_resolve.normalized_value(python_setup)
addr = find_python_runtime_library_or_raise_error(
module_mapping,
request.field_set.address,
"thrift",
resolve=resolve,
resolves_enabled=python_setup.enable_resolves,
recommended_requirement_name="thrift",
recommended_requirement_url="https://pypi.org/project/thrift/",
disable_inference_option=f"[{thrift_python.options_scope}].infer_runtime_dependency",
)
return InferredDependencies([addr])
def rules():
return (
*collect_rules(),
*subsystem.rules(),
UnionRule(GenerateSourcesRequest, GeneratePythonFromThriftRequest),
UnionRule(InferDependenciesRequest, InferApacheThriftPythonDependencies),
)
|
import json
from django.contrib import messages
from django.core.urlresolvers import reverse_lazy
from django.views.generic import FormView, ListView, UpdateView
from djofx import models
from djofx.forms import CategoriseTransactionForm, CategoryForm
from djofx.utils import qs_to_monthly_report
from djofx.views.base import PageTitleMixin, UserRequiredMixin
class CategoryTransactionsView(PageTitleMixin, UserRequiredMixin, ListView):
model = models.Transaction
paginate_by = 50
def get_template_names(self):
if not self.request.is_ajax():
return ['djofx/category.html', ]
else:
return ['djofx/_transaction_list.html', ]
def get_context_data(self, **kwargs):
ctx = super(CategoryTransactionsView, self).get_context_data(**kwargs)
category = self.get_category()
ctx['category'] = category
ctx['categorise_form'] = CategoriseTransactionForm()
qs = models.Transaction.objects.filter(
transaction_category=category
)
report = qs_to_monthly_report(qs, category.category_type)
ctx['month_breakdown'] = json.dumps(report)
return ctx
def get_category(self):
return models.TransactionCategory.objects.get(
owner=self.request.user,
pk=self.kwargs['pk']
)
def get_queryset(self):
qs = super(CategoryTransactionsView, self).get_queryset()
qs = qs.filter(
transaction_category=self.get_category()
)
return qs
def get_page_title(self):
object = self.get_category()
return 'Category (%s)' % object.name
class CategoryListView(PageTitleMixin, UserRequiredMixin, ListView):
model = models.TransactionCategory
paginate_by = 50
template_name = 'djofx/categories.html'
page_title = 'Transaction Categories'
def get_queryset(self):
qs = super(CategoryListView, self).get_queryset()
return qs.filter(owner=self.request.user)
class AddCategoryView(PageTitleMixin, UserRequiredMixin, FormView):
form_class = CategoryForm
template_name = "djofx/add_category.html"
page_title = "Add category"
success_url = reverse_lazy('djofx_home')
def form_valid(self, form):
category = form.save(commit=False)
category.owner = self.request.user
category.save()
messages.success(
self.request,
'Payment category saved.'
)
return super(AddCategoryView, self).form_valid(form)
class UpdateCategoryView(PageTitleMixin, UserRequiredMixin, UpdateView):
model = models.TransactionCategory
form_class = CategoryForm
template_name = "djofx/edit_category.html"
page_title = "Edit category"
success_url = reverse_lazy('djofx_categories')
def form_valid(self, form):
messages.success(
self.request,
'Payment category saved.'
)
return super(UpdateCategoryView, self).form_valid(form)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-11-04 03:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adminapp', '0026_delete_parametrizacion'),
]
operations = [
migrations.CreateModel(
name='Parametrizacion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=200)),
('eslogan', models.CharField(max_length=200)),
('imagen', models.ImageField(upload_to='fotos_param')),
('mision', models.TextField(max_length=500)),
('vision', models.TextField(max_length=500)),
('informacion', models.CharField(max_length=200)),
('estado_directorio', models.CharField(choices=[('', (('activo', 'Activo'),)), ('', (('inactivo', 'Inactivo'),))], max_length=50)),
('estado_articulo', models.CharField(choices=[('', (('activo', 'Activo'),)), ('', (('inactivo', 'Inactivo'),))], max_length=50)),
('estado_localizacion', models.CharField(choices=[('', (('activo', 'Activo'),)), ('', (('inactivo', 'Inactivo'),))], max_length=50)),
('s_hojadevida', models.CharField(choices=[('', (('activo', 'Activo'),)), ('', (('inactivo', 'Inactivo'),))], max_length=50)),
('s_ofertaacademica', models.CharField(choices=[('', (('activo', 'Activo'),)), ('', (('inactivo', 'Inactivo'),))], max_length=50)),
('s_notasemestre', models.CharField(choices=[('', (('activo', 'Activo'),)), ('', (('inactivo', 'Inactivo'),))], max_length=50)),
('s_informacionasignaura', models.CharField(choices=[('', (('activo', 'Activo'),)), ('', (('inactivo', 'Inactivo'),))], max_length=50)),
('s_listaestudiantes', models.CharField(choices=[('', (('activo', 'Activo'),)), ('', (('inactivo', 'Inactivo'),))], max_length=50)),
],
),
]
|
import heapq
def solution(jobs):
answer, time, end, count = 0, 0, -1, 0
hq = []
n= len(jobs)
while count < n:
for job in jobs:
if end < job[0] <= time:
heapq.heappush(hq, job[1])
answer += time - job[0]
if hq:
answer += hq[0] * len(hq)
end = time
time += heapq.heappop(hq)
count += 1
else:
time+=1
return int(answer/n)
|
import sys
import os
thisdir = os.path.dirname(os.path.abspath(__file__))
benchdir = os.path.join(thisdir, "benchmarks")
sys.path.append(benchdir)
import datetime
import argparse
import time
import benchutil
TIMEOUT = 60 * 60.0 # one hour
def add_benchmarks(jobs, interpreter, args):
for name in ["jittest", "pypy-translate", "pypy-interp", "nx", "bm_sympy", "bm_hypothesis"]:
cmdline = "%s benchmarks/%s.py -n %s" % (interpreter, name, args.inprocess)
jobs.append(benchutil.CmdlineJob(name, "bench", int(args.n), cmdline, timeout=TIMEOUT))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-n", help="number of process iterations", default=3)
parser.add_argument("--inprocess", help="number of in-process iterations", default=10)
parser.add_argument("interpreters", help="interpreters to use", nargs='*', default=[sys.executable])
parser.add_argument("--output", help="where to write the results", default=benchutil.now.strftime("output-%G-%m-%dT%H_%M.json"))
args = parser.parse_args()
print args.interpreters
jobs = []
for interpreter in args.interpreters:
add_benchmarks(jobs, interpreter, args)
sched = benchutil.Scheduler(jobs, args.output)
sched.run()
if __name__ == '__main__':
main()
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms import (widgets, SelectMultipleField, IntegerField,
TextAreaField, SelectField)
from wtforms.validators import DataRequired, ValidationError, Email, EqualTo
from app.models import User, Ingredients, Cupboard
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class MultiCheckboxField(SelectMultipleField):
widget = widgets.ListWidget(prefix_label=True)
option_widget = widgets.CheckboxInput()
class AddDeleteForm(FlaskForm):
results = Ingredients.query.all()
ing_list = sorted([(x.id, x.ing_name) for x in results], key=lambda x:x[1])
example = MultiCheckboxField('Label', choices=ing_list, coerce=int)
submit1 = SubmitField('Add')
submit2 = SubmitField('Delete')
class AddRecipe(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
info = TextAreaField('Info', validators=[DataRequired()])
rec_type = SelectField('Type of recipe', choices=[('hearty', 'Hearty'),
('energizing', 'Energizing'),('enduring', 'Enduring',),
('fireproof', 'Fireproof'), ('chilly','Chilly'), ('spicy','Spicy'),
('electro','Electro'),('hasty', 'Hasty'),('sneaky', 'Sneaky'),
('mighty', 'Mighty'),('tough', 'Tough')],
default='hearty')
hearts = IntegerField('Hearts restored', default=0)
sell_price = IntegerField('Sell Price', default=0)
results = Ingredients.query.all()
ing_list = sorted([(x.id, x.ing_name) for x in results], key=lambda x:x[1])
recipe_needs = MultiCheckboxField('Label', choices=ing_list, coerce=int)
submit = SubmitField("Add")
|
import re
import PIL.Image
import pytest
import torch
from common_utils import assert_equal
from prototype_common_utils import make_label
from torchvision.prototype import transforms, tv_tensors
from torchvision.transforms.v2._utils import check_type, is_pure_tensor
from torchvision.transforms.v2.functional import clamp_bounding_boxes, InterpolationMode, pil_to_tensor, to_pil_image
from torchvision.tv_tensors import BoundingBoxes, BoundingBoxFormat, Image, Mask, Video
from transforms_v2_legacy_utils import (
DEFAULT_EXTRA_DIMS,
make_bounding_boxes,
make_detection_mask,
make_image,
make_video,
)
BATCH_EXTRA_DIMS = [extra_dims for extra_dims in DEFAULT_EXTRA_DIMS if extra_dims]
def parametrize(transforms_with_inputs):
return pytest.mark.parametrize(
("transform", "input"),
[
pytest.param(
transform,
input,
id=f"{type(transform).__name__}-{type(input).__module__}.{type(input).__name__}-{idx}",
)
for transform, inputs in transforms_with_inputs
for idx, input in enumerate(inputs)
],
)
class TestSimpleCopyPaste:
def create_fake_image(self, mocker, image_type):
if image_type == PIL.Image.Image:
return PIL.Image.new("RGB", (32, 32), 123)
return mocker.MagicMock(spec=image_type)
def test__extract_image_targets_assertion(self, mocker):
transform = transforms.SimpleCopyPaste()
flat_sample = [
# images, batch size = 2
self.create_fake_image(mocker, Image),
# labels, bboxes, masks
mocker.MagicMock(spec=tv_tensors.Label),
mocker.MagicMock(spec=BoundingBoxes),
mocker.MagicMock(spec=Mask),
# labels, bboxes, masks
mocker.MagicMock(spec=BoundingBoxes),
mocker.MagicMock(spec=Mask),
]
with pytest.raises(TypeError, match="requires input sample to contain equal sized list of Images"):
transform._extract_image_targets(flat_sample)
@pytest.mark.parametrize("image_type", [Image, PIL.Image.Image, torch.Tensor])
@pytest.mark.parametrize("label_type", [tv_tensors.Label, tv_tensors.OneHotLabel])
def test__extract_image_targets(self, image_type, label_type, mocker):
transform = transforms.SimpleCopyPaste()
flat_sample = [
# images, batch size = 2
self.create_fake_image(mocker, image_type),
self.create_fake_image(mocker, image_type),
# labels, bboxes, masks
mocker.MagicMock(spec=label_type),
mocker.MagicMock(spec=BoundingBoxes),
mocker.MagicMock(spec=Mask),
# labels, bboxes, masks
mocker.MagicMock(spec=label_type),
mocker.MagicMock(spec=BoundingBoxes),
mocker.MagicMock(spec=Mask),
]
images, targets = transform._extract_image_targets(flat_sample)
assert len(images) == len(targets) == 2
if image_type == PIL.Image.Image:
torch.testing.assert_close(images[0], pil_to_tensor(flat_sample[0]))
torch.testing.assert_close(images[1], pil_to_tensor(flat_sample[1]))
else:
assert images[0] == flat_sample[0]
assert images[1] == flat_sample[1]
for target in targets:
for key, type_ in [
("boxes", BoundingBoxes),
("masks", Mask),
("labels", label_type),
]:
assert key in target
assert isinstance(target[key], type_)
assert target[key] in flat_sample
@pytest.mark.parametrize("label_type", [tv_tensors.Label, tv_tensors.OneHotLabel])
def test__copy_paste(self, label_type):
image = 2 * torch.ones(3, 32, 32)
masks = torch.zeros(2, 32, 32)
masks[0, 3:9, 2:8] = 1
masks[1, 20:30, 20:30] = 1
labels = torch.tensor([1, 2])
blending = True
resize_interpolation = InterpolationMode.BILINEAR
antialias = None
if label_type == tv_tensors.OneHotLabel:
labels = torch.nn.functional.one_hot(labels, num_classes=5)
target = {
"boxes": BoundingBoxes(
torch.tensor([[2.0, 3.0, 8.0, 9.0], [20.0, 20.0, 30.0, 30.0]]), format="XYXY", canvas_size=(32, 32)
),
"masks": Mask(masks),
"labels": label_type(labels),
}
paste_image = 10 * torch.ones(3, 32, 32)
paste_masks = torch.zeros(2, 32, 32)
paste_masks[0, 13:19, 12:18] = 1
paste_masks[1, 15:19, 1:8] = 1
paste_labels = torch.tensor([3, 4])
if label_type == tv_tensors.OneHotLabel:
paste_labels = torch.nn.functional.one_hot(paste_labels, num_classes=5)
paste_target = {
"boxes": BoundingBoxes(
torch.tensor([[12.0, 13.0, 19.0, 18.0], [1.0, 15.0, 8.0, 19.0]]), format="XYXY", canvas_size=(32, 32)
),
"masks": Mask(paste_masks),
"labels": label_type(paste_labels),
}
transform = transforms.SimpleCopyPaste()
random_selection = torch.tensor([0, 1])
output_image, output_target = transform._copy_paste(
image, target, paste_image, paste_target, random_selection, blending, resize_interpolation, antialias
)
assert output_image.unique().tolist() == [2, 10]
assert output_target["boxes"].shape == (4, 4)
torch.testing.assert_close(output_target["boxes"][:2, :], target["boxes"])
torch.testing.assert_close(output_target["boxes"][2:, :], paste_target["boxes"])
expected_labels = torch.tensor([1, 2, 3, 4])
if label_type == tv_tensors.OneHotLabel:
expected_labels = torch.nn.functional.one_hot(expected_labels, num_classes=5)
torch.testing.assert_close(output_target["labels"], label_type(expected_labels))
assert output_target["masks"].shape == (4, 32, 32)
torch.testing.assert_close(output_target["masks"][:2, :], target["masks"])
torch.testing.assert_close(output_target["masks"][2:, :], paste_target["masks"])
class TestFixedSizeCrop:
def test__get_params(self, mocker):
crop_size = (7, 7)
batch_shape = (10,)
canvas_size = (11, 5)
transform = transforms.FixedSizeCrop(size=crop_size)
flat_inputs = [
make_image(size=canvas_size, color_space="RGB"),
make_bounding_boxes(format=BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=batch_shape),
]
params = transform._get_params(flat_inputs)
assert params["needs_crop"]
assert params["height"] <= crop_size[0]
assert params["width"] <= crop_size[1]
assert (
isinstance(params["is_valid"], torch.Tensor)
and params["is_valid"].dtype is torch.bool
and params["is_valid"].shape == batch_shape
)
assert params["needs_pad"]
assert any(pad > 0 for pad in params["padding"])
def test__transform_culling(self, mocker):
batch_size = 10
canvas_size = (10, 10)
is_valid = torch.randint(0, 2, (batch_size,), dtype=torch.bool)
mocker.patch(
"torchvision.prototype.transforms._geometry.FixedSizeCrop._get_params",
return_value=dict(
needs_crop=True,
top=0,
left=0,
height=canvas_size[0],
width=canvas_size[1],
is_valid=is_valid,
needs_pad=False,
),
)
bounding_boxes = make_bounding_boxes(
format=BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(batch_size,)
)
masks = make_detection_mask(size=canvas_size, batch_dims=(batch_size,))
labels = make_label(extra_dims=(batch_size,))
transform = transforms.FixedSizeCrop((-1, -1))
mocker.patch("torchvision.prototype.transforms._geometry.has_any", return_value=True)
output = transform(
dict(
bounding_boxes=bounding_boxes,
masks=masks,
labels=labels,
)
)
assert_equal(output["bounding_boxes"], bounding_boxes[is_valid])
assert_equal(output["masks"], masks[is_valid])
assert_equal(output["labels"], labels[is_valid])
def test__transform_bounding_boxes_clamping(self, mocker):
batch_size = 3
canvas_size = (10, 10)
mocker.patch(
"torchvision.prototype.transforms._geometry.FixedSizeCrop._get_params",
return_value=dict(
needs_crop=True,
top=0,
left=0,
height=canvas_size[0],
width=canvas_size[1],
is_valid=torch.full((batch_size,), fill_value=True),
needs_pad=False,
),
)
bounding_boxes = make_bounding_boxes(
format=BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(batch_size,)
)
mock = mocker.patch(
"torchvision.prototype.transforms._geometry.F.clamp_bounding_boxes", wraps=clamp_bounding_boxes
)
transform = transforms.FixedSizeCrop((-1, -1))
mocker.patch("torchvision.prototype.transforms._geometry.has_any", return_value=True)
transform(bounding_boxes)
mock.assert_called_once()
class TestLabelToOneHot:
def test__transform(self):
categories = ["apple", "pear", "pineapple"]
labels = tv_tensors.Label(torch.tensor([0, 1, 2, 1]), categories=categories)
transform = transforms.LabelToOneHot()
ohe_labels = transform(labels)
assert isinstance(ohe_labels, tv_tensors.OneHotLabel)
assert ohe_labels.shape == (4, 3)
assert ohe_labels.categories == labels.categories == categories
class TestPermuteDimensions:
@pytest.mark.parametrize(
("dims", "inverse_dims"),
[
(
{Image: (2, 1, 0), Video: None},
{Image: (2, 1, 0), Video: None},
),
(
{Image: (2, 1, 0), Video: (1, 2, 3, 0)},
{Image: (2, 1, 0), Video: (3, 0, 1, 2)},
),
],
)
def test_call(self, dims, inverse_dims):
sample = dict(
image=make_image(),
bounding_boxes=make_bounding_boxes(format=BoundingBoxFormat.XYXY),
video=make_video(),
str="str",
int=0,
)
transform = transforms.PermuteDimensions(dims)
transformed_sample = transform(sample)
for key, value in sample.items():
value_type = type(value)
transformed_value = transformed_sample[key]
if check_type(value, (Image, is_pure_tensor, Video)):
if transform.dims.get(value_type) is not None:
assert transformed_value.permute(inverse_dims[value_type]).equal(value)
assert type(transformed_value) == torch.Tensor
else:
assert transformed_value is value
@pytest.mark.filterwarnings("error")
def test_plain_tensor_call(self):
tensor = torch.empty((2, 3, 4))
transform = transforms.PermuteDimensions(dims=(1, 2, 0))
assert transform(tensor).shape == (3, 4, 2)
@pytest.mark.parametrize("other_type", [Image, Video])
def test_plain_tensor_warning(self, other_type):
with pytest.warns(UserWarning, match=re.escape("`torch.Tensor` will *not* be transformed")):
transforms.PermuteDimensions(dims={torch.Tensor: (0, 1), other_type: (1, 0)})
class TestTransposeDimensions:
@pytest.mark.parametrize(
"dims",
[
(-1, -2),
{Image: (1, 2), Video: None},
],
)
def test_call(self, dims):
sample = dict(
image=make_image(),
bounding_boxes=make_bounding_boxes(format=BoundingBoxFormat.XYXY),
video=make_video(),
str="str",
int=0,
)
transform = transforms.TransposeDimensions(dims)
transformed_sample = transform(sample)
for key, value in sample.items():
value_type = type(value)
transformed_value = transformed_sample[key]
transposed_dims = transform.dims.get(value_type)
if check_type(value, (Image, is_pure_tensor, Video)):
if transposed_dims is not None:
assert transformed_value.transpose(*transposed_dims).equal(value)
assert type(transformed_value) == torch.Tensor
else:
assert transformed_value is value
@pytest.mark.filterwarnings("error")
def test_plain_tensor_call(self):
tensor = torch.empty((2, 3, 4))
transform = transforms.TransposeDimensions(dims=(0, 2))
assert transform(tensor).shape == (4, 3, 2)
@pytest.mark.parametrize("other_type", [Image, Video])
def test_plain_tensor_warning(self, other_type):
with pytest.warns(UserWarning, match=re.escape("`torch.Tensor` will *not* be transformed")):
transforms.TransposeDimensions(dims={torch.Tensor: (0, 1), other_type: (1, 0)})
import importlib.machinery
import importlib.util
from pathlib import Path
def import_transforms_from_references(reference):
HERE = Path(__file__).parent
PROJECT_ROOT = HERE.parent
loader = importlib.machinery.SourceFileLoader(
"transforms", str(PROJECT_ROOT / "references" / reference / "transforms.py")
)
spec = importlib.util.spec_from_loader("transforms", loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
return module
det_transforms = import_transforms_from_references("detection")
def test_fixed_sized_crop_against_detection_reference():
def make_tv_tensors():
size = (600, 800)
num_objects = 22
pil_image = to_pil_image(make_image(size=size, color_space="RGB"))
target = {
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"labels": make_label(extra_dims=(num_objects,), categories=80),
"masks": make_detection_mask(size=size, num_objects=num_objects, dtype=torch.long),
}
yield (pil_image, target)
tensor_image = torch.Tensor(make_image(size=size, color_space="RGB"))
target = {
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"labels": make_label(extra_dims=(num_objects,), categories=80),
"masks": make_detection_mask(size=size, num_objects=num_objects, dtype=torch.long),
}
yield (tensor_image, target)
tv_tensor_image = make_image(size=size, color_space="RGB")
target = {
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
"labels": make_label(extra_dims=(num_objects,), categories=80),
"masks": make_detection_mask(size=size, num_objects=num_objects, dtype=torch.long),
}
yield (tv_tensor_image, target)
t = transforms.FixedSizeCrop((1024, 1024), fill=0)
t_ref = det_transforms.FixedSizeCrop((1024, 1024), fill=0)
for dp in make_tv_tensors():
# We should use prototype transform first as reference transform performs inplace target update
torch.manual_seed(12)
output = t(dp)
torch.manual_seed(12)
expected_output = t_ref(*dp)
assert_equal(expected_output, output)
|
# Generated by Django 3.0.1 on 2019-12-22 17:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('teams', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(verbose_name='match date')),
('winning_stat', models.CharField(default='', max_length=64, verbose_name='Winning stats')),
('team1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='team1_metch', to='teams.Team')),
('team2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='team2_match', to='teams.Team')),
('winner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='winner_team', to='teams.Team')),
],
options={
'verbose_name': 'Match',
'verbose_name_plural': 'Matchs',
'ordering': ('-date',),
},
),
]
|
# --------------------------------------------------------
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import _init_paths
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms, ToTensor
from networks.ResNet50_HICO_torch import HICO_HOI
from ult.timer import Timer
import numpy as np
import argparse
import pickle
import ipdb
from ult.config import cfg
from ult.ult import obtain_data, get_zero_shot_type, get_augment_type, generator2
import torch
import random
# seed = 10
# torch.manual_seed(seed)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# np.random.seed(seed)
# random.seed(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
# torch.cuda.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
def _init_fn(worker_id):
# np.random.seed(int(seed))
pass
class HicoDataset(Dataset):
def __init__(self, Pos_augment=15, Neg_select=60, augment_type=0, with_pose=False, zero_shot_type=0,
large_neg_for_ho=False, isalign=False, epoch=0, transform=None):
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb"), encoding='latin1')
self.transform = transform
if with_pose:
pattern_channel = 3
else:
pattern_channel = 2
from functools import partial
self.generator = generator2(Trainval_GT, Trainval_N, Pos_augment, Neg_select,
augment_type, with_pose, zero_shot_type, isalign, epoch)
def __len__(self):
return 800000
def __getitem__(self, idx):
im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern = next(self.generator)
# im_orig = im_orig.transpose([0, 3, 1, 2])
# Pattern = Pattern.transpose([0, 3, 1, 2]).astype(np.float32)
# Human_augmented = Human_augmented.astype(np.float32)
# Object_augmented = Object_augmented.astype(np.float32)
# Human_augmented = Human_augmented.astype(np.float32)
# print(im_orig.dtype, Pattern.dtype)
# print(im_orig)
# print(im_orig.shape, im_orig)
if self.transform:
im_orig = self.transform(im_orig[0])
# print('after', im_orig)
return im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern
def parse_args():
parser = argparse.ArgumentParser(description='Train VCL on VCOCO')
parser.add_argument('--num_iteration', dest='max_iters',
help='Number of iterations to perform',
default=200000, type=int)
parser.add_argument('--model', dest='model',
help='Select model',
default='VCL_humans_aug5_3_x5new_res101_1', type=str)
parser.add_argument('--Pos_augment', dest='Pos_augment',
help='Number of augmented detection for each one. (By jittering the object detections)',
default=15, type=int)
parser.add_argument('--Neg_select', dest='Neg_select',
help='Number of Negative example selected for each image',
default=60, type=int)
parser.add_argument('--Restore_flag', dest='Restore_flag',
help='How many ResNet blocks are there?',
default=5, type=int)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(args)
args.model = args.model.strip()
Trainval_GT = None
Trainval_N = None
tb_dir = cfg.ROOT_DIR + '/logs/' + args.model + '/'
# output directory where the models are saved
output_dir = cfg.LOCAL_DATA + '/Weights/' + args.model + '/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if args.Restore_flag == 5:
if os.path.exists(output_dir + 'checkpoint'):
args.Restore_flag = -1
elif args.model.__contains__('unique_weights'):
args.Restore_flag = 6
augment_type = get_augment_type(args.model)
model = HICO_HOI(args.model)
with_pose = False
# if args.model.__contains__('pose'):
# with_pose = True
coco = False
zero_shot_type = get_zero_shot_type(args.model)
large_neg_for_ho = False
dataset = HicoDataset(Pos_augment=args.Pos_augment,
Neg_select=args.Neg_select,
augment_type=augment_type,
with_pose=with_pose,
zero_shot_type=zero_shot_type,
transform=transforms.Compose([ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])]),
)
dataloader_train = DataLoader(dataset, 1,
shuffle=False, num_workers=1,
worker_init_fn=_init_fn) # num_workers=batch_size
trainables = []
not_trainables = []
for name, p in model.named_parameters():
if name.__contains__('base_model.0') or name.__contains__('base_model.1') \
or name.__contains__('base_model.4') or name.__contains__('bn')\
or name.__contains__('HOI_MLP.1') or name.__contains__('sp_MLP.1')\
or name.__contains__('HOI_MLP.5') or name.__contains__('sp_MLP.5')\
or name.__contains__('downsample.1'):
#BN
p.requires_grad = False
not_trainables.append(p)
print('not train', name, p.mean(), p.std())
else:
print('train', name, p.mean(), p.std())
p.requires_grad= True
trainables.append(p)
def set_bn_eval(m):
classname = m.__class__.__name__
# print(m)
if classname.find('BatchNorm') != -1:
m.eval()
# print(m, '======')
model.apply(set_bn_eval)
# exit()
print(model)
import torch.optim as optim
optimizer = optim.SGD(params=trainables, lr=cfg.TRAIN.LEARNING_RATE * 10,
momentum=cfg.TRAIN.MOMENTUM, weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# lambda1 = lambda epoch: 1.0 if epoch < 10 else (10 if epoch < 28 else 1)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, cfg.TRAIN.GAMMA)
device = torch.device("cuda")
model.to(device)
timer = Timer()
# (im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern)
i = 0
last_update_value = {}
for item in dataloader_train:
# for item in dataset:
im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern = item
if len(Human_augmented[0]) <= 1 or num_pos[0] <= 1:
continue
timer.tic()
step_size = int(cfg.TRAIN.STEPSIZE * 5)
if (i+1) % step_size == 0:
scheduler.step()
im_orig = im_orig.to(device)
num_pos = num_pos.to(device)
Human_augmented = Human_augmented.to(device)
Object_augmented = Object_augmented.to(device)
action_HO = action_HO.to(device)
Pattern = Pattern.to(device)
optimizer.zero_grad()
# print(im_orig.shape, Human_augmented.shape)
# print(im_orig[0].mean(), im_orig[0].std(), image_id, Human_augmented[0], len(Object_augmented[0]), len(action_HO[0]), len(Pattern[0]))
model(im_orig, image_id[0], num_pos[0], Human_augmented[0], Object_augmented[0], action_HO[0], Pattern[0],
True)
num_stop = model.get_num_stop(num_pos[0], Human_augmented[0])
model.add_loss(action_HO[0], num_stop, device)
model.losses['total_loss'].backward()
for p in model.parameters():
torch.nn.utils.clip_grad_norm_(p, 1.)
optimizer.step()
i += 1
# for name, p in model.named_parameters():
# print(name, p.mean())
# import ipdb;ipdb.set_trace()
# if i == 10 or i == 1000:
#
# for k in model.state_dict().keys():
# tmp = model.state_dict()[k].type(torch.float32).mean().detach().cpu().numpy()
# if k in last_update_value:
#
# if abs(last_update_value[k] - tmp) > 0:
# print(k, last_update_value[k], tmp, last_update_value[k] - tmp)
#
# last_update_value[k] = tmp
# # print(k, model.state_dict()[k].type(torch.float32).mean())
# print(im_orig.mean(), im_orig.std())
# print('-'*80)
# exit()
# print(model.state_dict().keys())
# print(model.state_dict());exit()
timer.toc()
if i % (cfg.TRAIN.SNAPSHOT_ITERS * 5) == 0 or i == 10 or i == 1000:
torch.save({
'iteration': i,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()}, output_dir +'{}_checkpoint.pth.tar'.format(i))
if i % 500 == 0 or i < 10:
print('\rstep {} img id: {} sp: {} hoi: {} total: {} lr: {} speed: {:.3f} s/iter \r'.format(i, image_id[0], model.losses['sp_cross_entropy'].item(),
model.losses['hoi_cross_entropy'].item(),
model.losses['total_loss'].item(),
scheduler.get_lr(), timer.average_time))
torch.cuda.empty_cache()
|
# Generated by Django 3.1.7 on 2021-04-01 00:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Persona',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=50, verbose_name='nombres')),
('pais', models.CharField(max_length=30, verbose_name='Pais')),
('pasaporte', models.CharField(max_length=50, verbose_name='Pasaporte')),
('edad', models.PositiveIntegerField()),
('apelativo', models.CharField(max_length=10, verbose_name='Apelativo')),
],
options={
'verbose_name': 'Persona',
'verbose_name_plural': 'Personas',
'db_table': 'persona',
},
),
]
|
/Users/ccummings/.pyenv/versions/2.7.13/lib/python2.7/_weakrefset.py
|
import dice
import gspread
import pygsheets
import pandas as pd
from tabulate import tabulate
from oauth2client.service_account import ServiceAccountCredentials
#########
# Öffne Worksheet in gspread
#########
scope = [
'https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive'
]
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'cred.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open("DSA_Discord - Aventurien")
sheet = wks.sheet1
pyggc = pygsheets.authorize(service_file='cred.json')
wks_pyggc = pyggc.open("DSA_Discord - Aventurien")
pyg_sheet = wks_pyggc.sheet1
pyg_karten_sheet = wks_pyggc.worksheet('title', 'Karten')
# Sheet für Pflanzen-Gebiete und Pflanzen
pflanzen_gebiete_sheet = wks_pyggc.worksheet('title', 'Gebiete_Pflanzen')
#######
# Funktionen um df für Pflanzen zu laden
#######
### Update end!
def loadPflanzendf():
pflanzen_df = pflanzen_gebiete_sheet.get_as_df(has_header=True, start='A1', end='Q750')
return pflanzen_df
#########
# Alle Funktionen rund um den Kampf
#########
# Load gsheets data into dataframe
def loadDF():
data = pyg_sheet.get_as_df(has_header=True, start='A1', end='AE30')
return data
# Make data pretty
def tabulateDF():
inidf = pyg_sheet.get_as_df(has_header=True, start='A1', end='F30')
inidf = inidf[::2]
print(inidf)
return inidf
def ermittleInitative():
inidf = None
inidf = pyg_sheet.get_as_df(has_header=True, start='A1', end='AE61')
# copy inidf in new dataframe
kampfdf = inidf
kampfdf = kampfdf[inidf.Active == 'x']
# get each third row
inidf = inidf[::3]
#print(inidf)
# get only characters which are active
inidf = inidf[inidf.Active == 'x']
inidf['INI'] = 0
# Calculate final Initiative
inidf['INI'] = inidf.apply(lambda x: int(x['INIBasis']) + int(dice.roll('1d6')), axis=1)
# Calculate Reihenfolge
reihenfolge = inidf[['ID', 'Char', 'INIBasis', 'INI']]
reihe = reihenfolge.sort_values(by=['INI', 'INIBasis', ], ascending = False)
reihe['Reihenfolge'] = range(1,len(reihe.index)+1)
return reihe, inidf
def ermittleKampfdaten():
kampfdf = None
return None
def getKarten(kat):
kartendf = None
kartendf = pyg_karten_sheet.get_as_df(has_header=True, start='A1', end='E28')
kartendf = kartendf[kartendf.Kategorie == kat]
kartendf = kartendf[kartendf.Active == 'x']
try:
pfaddf= kartendf.sample(n=1)
except:
print('No active card')
return pfaddf.Pfad.item()
def attacke(ATID, VTID, Mod, Zone = 0):
kampfwerte = pd.read_csv(filepath_or_buffer='kampfdf.csv')
# Reichweiten unterschied
ATWaffenlänge = kampfwerte.loc[kampfwerte['ID']==int(ATID), 'Reichweite'].item()
VTWaffenlänge = kampfwerte.loc[kampfwerte['ID']==int(VTID), 'Reichweite'].item()
reichweite = {'kurz' : -2, 'mittel' : 0, 'lang' : 2}
if ATWaffenlänge != 'FK' and VTWaffenlänge != 'FK':
reichweite_mod =reichweite[ATWaffenlänge] - reichweite[VTWaffenlänge]
else:
reichweite_mod = 0
print('Reichweite')
print(reichweite_mod)
# Welche Zone
print('Zone (0, K, T, A, B)')
print(Zone)
if Zone == str(0):
Zonemod = 0
else:
if kampfwerte.loc[kampfwerte['ID']==int(ATID), 'Gez. Schuss / Angriff'].item() == 'x':
divisor = 2
else: divisor = 1
if Zone == 'K':
Zonemod = 10 / divisor
elif Zone == 'T':
Zonemod = 4 / divisor
elif Zone == 'A':
Zonemod = 8 / divisor
elif Zone == 'B':
Zonemod = 8 / divisor
ATWert = int(kampfwerte.loc[kampfwerte['ID']==int(ATID), 'Eff. AT'].item()) + int(Mod) - int(Zonemod) - int(kampfwerte.loc[kampfwerte['ID']==int(ATID), 'A AT'].item()) * 3 + reichweite_mod
print('Attackebasis & Modifikation - Zonemod')
print(int(kampfwerte.loc[kampfwerte['ID']==int(ATID), 'Eff. AT'].item()))
print(int(Mod) - int(Zonemod))
ATAnzahl = kampfwerte.loc[kampfwerte['ID']==int(ATID), 'A AT'].item() + 1
print('AttackeAnzahl')
print(ATAnzahl)
#print('Dataframe')
#print(kampfwerte)
kampfwerte.at[int(ATID)-1,'A AT'] = ATAnzahl
print('Wert A AT im DF')
print(kampfwerte.at[int(ATID)-1,'A AT'])
kampfwerte.to_csv(path_or_buf='kampfdf.csv', sep=',', header=True, index=False)
wurf = sum(dice.roll('1d20'))
print('Attackewurf')
print(wurf)
#wurf = 1
if wurf == 1:
wurf_bestätigung = sum(dice.roll('1d20'))
print('AT: Wurf Bestätigung')
print(wurf_bestätigung)
if wurf_bestätigung <= ATWert:
# Geschafft, Wurf, halbe Verteidigung, Bestätigt (= doppelter Schaden)
return True, wurf, True, True
else:
# Geschafft, Wurf, halbe Verteidigung, Bestätigt (= doppelter Schaden)
return True, wurf, True, False
elif wurf == 20:
wurf_bestätigung = sum(dice.roll('1d20'))
print('AT: Wurf Bestätigung')
print(wurf_bestätigung)
if wurf_bestätigung >= ATWert:
# Daneben, Wurf, irrelevant, Bestätigt (= Patzer)
return False, wurf, False, True
else:
# Daneben, Wurf, irrelevant, Bestätigt (= Patzer)
return False, wurf, False, False
elif wurf <= ATWert:
#Attacke erfolgreich, Wurfergebnis, nicht kritisch
return True, wurf, False, False
else:
#Attacke nicht erfolgreich, Wurfergebnis, kein patzer
return False, wurf, False, False
def parade(ATID, VTID, Mod, kritisch = 1):
kampfwerte = pd.read_csv(filepath_or_buffer='kampfdf.csv')
print('Parade Divisor w/ Kritisch')
print(kritisch)
# Zweig für Verteidigung gegen FK - AW - 4 statt PA
if kampfwerte.loc[kampfwerte['ID']==int(ATID), 'Reichweite'].item() == 'FK':
PAWert = round((int(kampfwerte.loc[kampfwerte['ID']==int(VTID), 'Ausweichen'].item()) - int(kampfwerte.loc[kampfwerte['ID']==int(VTID), 'A PA'].item()) * 3 - 4) / int(kritisch),0)
else:
PAWert = round((int(kampfwerte.loc[kampfwerte['ID']==int(VTID), 'Eff. PA'].item()) - int(kampfwerte.loc[kampfwerte['ID']==int(VTID), 'A PA'].item()) * 3) / int(kritisch),0)
print('PAWert')
print(PAWert)
VTAnzahl = kampfwerte.loc[kampfwerte['ID']==int(ATID), 'A PA'].item() + 1
kampfwerte.at[int(VTID)-1,'A PA'] = VTAnzahl
#print('Dataframe')
#print(kampfwerte)
kampfwerte.to_csv(path_or_buf='kampfdf.csv', sep=',', header=True, index=False)
wurf = sum(dice.roll('1d20'))
print('Verteidigungswert')
print(int(kampfwerte.loc[kampfwerte['ID']==int(VTID), 'Eff. PA'].item()))
print('Verteidigungwurf')
print(wurf)
if wurf == 1:
wurf_bestätigung = sum(dice.roll('1d20'))
print('PA: Wurf Bestätigung')
print(wurf_bestätigung)
if wurf_bestätigung <= PAWert:
# Geschafft, Wurf, Kritisch Bestätigt (= Passierschlag), kein Patzer
print('PA: Kritisch bestätigt, Passierschlag')
return True, wurf, True, False
else:
# Geschafft, Wurf, nicht Bestätigt, kein Patzer
print('PA: Kritisch nicht bestätigt')
return True, wurf, False, False
elif wurf == 20:
wurf_bestätigung = sum(dice.roll('1d20'))
print('PA: Wurf Bestätigung')
print(wurf_bestätigung)
if wurf_bestätigung >= PAWert:
print('PA: Bestätigter Patzer')
# Nicht verteidigt, Wurf, Nicht Bestätigt kritisch, Patzer
return False, wurf, False, True
else:
# Nicht verteidigt, Wurf, Nicht Bestätigt kritisch, Patzer
print('PA: Nicht bestätigter Patzer')
return False, wurf, False, False
elif wurf <= PAWert:
print('PA: Verteidigt, kein kritisch')
#verteidigt, Wurfergebnis, nicht kritisch, kein Patzer
return True, wurf, False, False
else:
print('PA: Nicht verteidigt, kein Patzer')
# nicht verteidigt, Wurfergebnis, kein patzer
return False, wurf, False, False
def attacke_schaden(ATID, VTID, Zone = 0):
# Read CSV
kampfwerte = pd.read_csv(filepath_or_buffer='kampfdf.csv')
# Get Weapon damage
TP = kampfwerte.loc[kampfwerte['ID']==int(ATID), 'TP'].item()
# How much dice 1-9
AnzahlWürfe = int(TP[0])
print('Anzahl Würfe')
print(AnzahlWürfe)
# How many sides (d1-d9)
AnzahlSeiten = int(TP[2])
print('Anzahl Seiten')
print(AnzahlSeiten)
# Modification 0-100
Modifikator = int(TP[4:])
print('TP Modifikation')
print(Modifikator)
string = str(AnzahlWürfe) + 'd' + str(AnzahlSeiten)
print(str(string))
schaden = int(dice.roll(string)) + Modifikator
print('Schaden')
print(schaden)
print('Zone')
print(Zone)
print(type(Zone))
if Zone == str(0):
wurf = sum(dice.roll('1d20'))
print('Wurf Auswahl Zone')
print(wurf)
größe = kampfwerte.loc[kampfwerte['ID']==int(VTID), 'Größe'].item()
print('Größe gem. Sheet')
print(größe)
größe = größe_translation(größe)
print('Größe gem. Dict')
print(größe)
Zone = trefferzone_select(wurf)[größe]
print('Gew. Zone')
print(Zone)
if Zone == 'K':
trefferzone = 'Kopf'
elif Zone == 'T':
trefferzone = 'Torso'
elif Zone == 'A':
trefferzone = 'Arme'
elif Zone == 'B':
trefferzone = 'Beine'
else:
if Zone == 'K':
trefferzone = 'Kopf'
elif Zone == 'T':
trefferzone = 'Torso'
elif Zone == 'A':
trefferzone = 'Arme'
elif Zone == 'B':
trefferzone = 'Beine'
#Teste für Wundschwelle
if schaden >= round(int(kampfwerte.loc[kampfwerte['ID']==int(VTID), 'KO'].item())/2,0):
# Teste Selbstbeherrschung
if probe_selbstbeherrschung(VTID) == False:
wert = int(dice.roll('1d6'))
schlimme_verletzung = schlimme_verletzung_select(trefferzone, wert)
return schaden, trefferzone, schlimme_verletzung
def größe_translation(größe):
if größe == 'Humanoid klein':
return 'Hk'
elif größe == 'Humanoid mittel':
return 'Hm'
elif größe == 'Humanoid groß':
return 'Hg'
elif größe == 'Vierbeinig klein':
return 'Vk'
elif größe == 'Vierbeinig mittel':
return 'Vm'
elif größe == 'Vierbeinig groß':
return 'Vg'
elif größe == 'sechs Gliedmaßen groß':
return 'Sg'
elif größe == 'sechs Gliedmaßen riesig':
return 'Sr'
elif größe == 'Fangarme':
return 'F'
elif größe == 'sonstige':
return 'S'
def trefferzone_select(erg):
trefferzone = {
1: {
'Hk': 'K',
'Hm': 'K',
'Hg': 'K',
'Vk': 'K',
'Vm': 'K',
'Vg': 'K',
'Sg': 'K',
'Sr': 'K',
'F': 'K',
'S': 'S'
},
2: {
'Hk': 'K',
'Hm': 'K',
'Hg': 'K',
'Vk': 'K',
'Vm': 'K',
'Vg': 'K',
'Sg': 'K',
'Sr': 'K',
'F': 'K',
'S': 'S'
},
3: {
'Hk': 'K',
'Hm': 'T',
'Hg': 'T',
'Vk': 'K',
'Vm': 'K',
'Vg': 'K',
'Sg': 'K',
'Sr': 'T',
'F': 'K',
'S': 'S'
},
4: {
'Hk': 'K',
'Hm': 'T',
'Hg': 'T',
'Vk': 'K',
'Vm': 'K',
'Vg': 'K',
'Sg': 'K',
'Sr': 'T',
'F': 'K',
'S': 'S'
},
5: {
'Hk': 'K',
'Hm': 'T',
'Hg': 'T',
'Vk': 'T',
'Vm': 'T',
'Vg': 'K',
'Sg': 'K',
'Sr': 'T',
'F': 'T',
'S': 'S'
},
6: {
'Hk': 'K',
'Hm': 'T',
'Hg': 'T',
'Vk': 'T',
'Vm': 'T',
'Vg': 'T',
'Sg': 'K',
'Sr': 'T',
'F': 'T',
'S': 'S'
},
7: {
'Hk': 'T',
'Hm': 'T',
'Hg': 'A',
'Vk': 'T',
'Vm': 'T',
'Vg': 'T',
'Sg': 'K',
'Sr': 'T',
'F': 'A',
'S': 'S'
},
8: {
'Hk': 'T',
'Hm': 'T',
'Hg': 'A',
'Vk': 'T',
'Vm': 'T',
'Vg': 'T',
'Sg': 'T',
'Sr': 'T',
'F': 'A',
'S': 'S'
},
9: {
'Hk': 'T',
'Hm': 'T',
'Hg': 'A',
'Vk': 'T',
'Vm': 'T',
'Vg': 'T',
'Sg': 'T',
'Sr': 'T',
'F': 'A',
'S': 'S'
},
10: {
'Hk': 'T',
'Hm': 'T',
'Hg': 'A',
'Vk': 'T',
'Vm': 'T',
'Vg': 'T',
'Sg': 'T',
'Sr': 'T',
'F': 'A',
'S': 'S'
},
11: {
'Hk': 'A',
'Hm': 'T',
'Hg': 'A',
'Vk': 'T',
'Vm': 'A',
'Vg': 'T',
'Sg': 'T',
'Sr': 'A',
'F': 'A',
'S': 'S'
},
12: {
'Hk': 'A',
'Hm': 'T',
'Hg': 'A',
'Vk': 'T',
'Vm': 'A',
'Vg': 'A',
'Sg': 'T',
'Sr': 'A',
'F': 'A',
'S': 'S'
},
13: {
'Hk': 'A',
'Hm': 'A',
'Hg': 'A',
'Vk': 'A',
'Vm': 'A',
'Vg': 'A',
'Sg': 'A',
'Sr': 'A',
'F': 'A',
'S': 'S'
},
14: {
'Hk': 'A',
'Hm': 'A',
'Hg': 'A',
'Vk': 'A',
'Vm': 'A',
'Vg': 'A',
'Sg': 'A',
'Sr': 'A',
'F': 'B',
'S': 'S'
},
15: {
'Hk': 'A',
'Hm': 'A',
'Hg': 'A',
'Vk': 'A',
'Vm': 'A',
'Vg': 'A',
'Sg': 'A',
'Sr': 'A',
'F': 'B',
'S': 'S'
},
16: {
'Hk': 'A',
'Hm': 'A',
'Hg': 'A',
'Vk': 'A',
'Vm': 'A',
'Vg': 'A',
'Sg': 'A',
'Sr': 'A',
'F': 'B',
'S': 'S'
},
17: {
'Hk': 'A',
'Hm': 'B',
'Hg': 'B',
'Vk': 'B',
'Vm': 'B',
'Vg': 'B',
'Sg': 'A',
'Sr': 'A',
'F': 'B',
'S': 'S'
},
18: {
'Hk': 'A',
'Hm': 'B',
'Hg': 'B',
'Vk': 'B',
'Vm': 'B',
'Vg': 'B',
'Sg': 'A',
'Sr': 'A',
'F': 'B',
'S': 'S'
},
19: {
'Hk': 'B',
'Hm': 'B',
'Hg': 'B',
'Vk': 'B',
'Vm': 'B',
'Vg': 'B',
'Sg': 'B',
'Sr': 'B',
'F': 'B',
'S': 'S'
},
20: {
'Hk': 'B',
'Hm': 'B',
'Hg': 'B',
'Vk': 'B',
'Vm': 'B',
'Vg': 'B',
'Sg': 'B',
'Sr': 'B',
'F': 'B',
'S': 'S'
},
}
return trefferzone.get(erg, 'Kein gültiger Wurf')
# def schlimme_verletzung_select(trefferzone, wert):
# effekt = {
# 1: {
# 'Kopf':
# 'Nase', 'Der Held hat einen Treffer gegen die Nase abbekommen. Er ist etwas desorientiert und erleidet 1 SP.'
# 'Torso':
# 'Rippe', 'Ein Treffer gegen die Rippe raubt dem Helden die Luft und er erleidet 1W3 SP zusätzlich.'
# 'Arm':
# 'Oberarm', 'Ein Treffer gegen den Oberarm sorgt dafür, dass der Arm leicht gelähmt ist. 2 SP.'
# }
# }
|
from X import B
b1 = B()
b1.c()
|
from django import forms
from .models import CashWithrawal, RefCreditTransfer, C2BTransaction, CashTransfer,Checkout
from paypal.pro.forms import PaymentForm
class CashWithrawalForm(forms.ModelForm):
class Meta:
model = CashWithrawal
fields = (
"user",
"amount",
)
class ReferTranferForm(forms.ModelForm):
class Meta:
model = RefCreditTransfer
fields = (
"user",
"amount",
)
class C2BTransactionForm(forms.ModelForm):
class Meta:
model = C2BTransaction
fields = (
"phone_number",
"amount",
)
class CashTransferForm(forms.ModelForm):
class Meta:
model = CashTransfer
fields = (
"sender",
"recipient",
"amount",
)
class CheckoutForm(forms.ModelForm):
class Meta:
model = Checkout
fields = (
"user",
"email",
"amount",
)
# class PayPalmentForm(PaymentForm):
# class Meta:
# # model = Checkout
# fields = (
# "acct",
# "cvv2",
# )
|
from time import sleep
num = int(input('Digite um número: '))
print('Analisando o valor...')
sleep(2)
print('Antecessor: {}'.format(num-1))
print('Sucessor: {}'.format(num+1))
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 10 16:30:18 2014
@author: ayush488
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 10 08:25:21 2014
@author: ayush488
"""
import math
import urllib #needed for web calls
import operator #needed for sorting dictionary
N=5000000000 #assuming the number of web pages to be this
def Distance(x,y,xy):# for computing relatedness score. This funciton is given to you
if x==0.0:
x=0.01
if y==0.0:
y=0.01
if xy==0.0:
xy=0.01
fx=math.log(x)
fy=math.log(y)
fxy=math.log(xy)
M=math.log(N)
NGD=(max(fx,fy)-fxy)/(M-min(fx,fy))
return NGD
def web_count(word):
tmp1=word+"websearch.html"
Query='http://www.bing.com/search?q='+word+'&go=&qs=n&form=QBLH&pq='+word+'&sc=8-4&sp=-1&sk=&cvid=6a7d298283ba46c0ba04272487cd60e2'
urllib.urlretrieve(Query,tmp1)
file2=open(tmp1,'r')
s1=str(file2.read())
start='<span class="sb_count" id="count">'
end='results</span>'
ind1=s1.find(start)
ind2=s1.find(end)
leng=len(start)
sub_str=s1[ind1+leng:ind2]
b=sub_str.strip().split(',')
new=""
for x in b:
new+=x
a=float(new)
return a
#create a temporary file name, can be an html file (.html or .txt)
#prepare a query using the search engine url and the input word
#use the urllib function to call the web adn save the web page
#read the file and search for the hits for the input word (you may have to use some string operations for this)
#the function should return the count(hits) for the word
def web_data(key,cand_words):
keywordcount=web_count(key[0])
dic={}
scores={}
top=[0,0]
bob=[0,0]
for x in cand_words:
dic[x] = web_count(x)
dic[key[0]+" "+x] = web_count(key[0]+" "+x)
scores[key[0]+" "+x] = Distance(keywordcount,dic[x],dic[key[0]+" "+x])
#top[0]=scores["forest rain Score"]
#top[1]=top[0]
print scores
for x in scores:
if scores[x]>top[0]:
top[0]=scores[x]
bob[0]=x
elif scores[x]>top[1]:
top[1]=scores[x]
bob[1]=x
print bob
#get count(x) for the keyword
# iterate over each word in candidate words and get hit count for each word (y)
# while iterating you can also get the hits for the combination of the word with the keyword(xy)
#call the Distance function(given) with (x,y,xy)to get the relatedness score, you can use a dictionary to store the scores against each word in candidate words so that it can be used to find the 2-most related words in next step
#print the two most related words to the keyword from the list of candidate worrd
def extractor(l):
temp=l.strip().split('***')
temp2=temp[1].strip().split(',')
web_data(temp,temp2)
#takes the line as input
#split the line to get the keyword and the list of candidate words
#candidate words have to be stored in list
# make a call to the web_data function with the keyword and the candidate words list
def read_file():
file=open("test_file.txt",'r')
line=file.readlines()
for l in line:
extractor(l)
#read the test_file.txt
#read lines from the file
#iterate over each line and call the extractor function with each line
#call the read_file() function
read_file()
|
import numpy as np
from blob_mask import blob_mask, blob_mask_dim
from constants import feature_size, anchor_size, real_image_width, real_image_height
statuses = {
"normal": 0,
"hat": 1,
"ghost": 2,
}
s = 6
y_offset = 3 # The blob picture is not centered vertically around it's position
def get_localization_data(picture_data):
target = np.zeros((feature_size, feature_size), dtype=np.int)
bounding_box_target = np.zeros((feature_size, feature_size, 4))
all_blobs = picture_data["army"] + picture_data["enemy"]
blob_ids = [j for j, b in enumerate(all_blobs) if (b["alive"] == True)]
for blob_id in blob_ids:
blob = all_blobs[blob_id]
x = int(blob["x"] * feature_size)
y = int(blob["y"] * feature_size)
label = 1 + statuses[blob["status"]]
if (blob_id >= 3):
label += 3
for a in np.arange(y - s - y_offset, y + s - y_offset):
for b in np.arange(x - s, x + s):
if (a >= 0) & (a < feature_size) & (b >= 0) & (b < feature_size):
target[a][b] = label
bounding_box_target[a][b][0] = x
bounding_box_target[a][b][1] = y - y_offset
bounding_box_target[a][b][2] = 2 * s
bounding_box_target[a][b][3] = 2 * s
return target, bounding_box_target
def get_true_mask(data):
all_blobs = data["army"] + data["enemy"]
mask = np.zeros((real_image_height, real_image_width), dtype=np.int)
for (blob_id, blob) in enumerate(all_blobs):
if (blob["alive"]):
x_init = int(blob["x"] * 742) - 26
y_init = int(blob["y"] * 594) - 31
for i in range(blob_mask_dim[0]):
for j in range(blob_mask_dim[1]):
y = i + y_init
x = j + x_init
if (x >= 0) & (y >= 0) & (y < real_image_height) & (x < real_image_width):
if (blob_mask[i][j] == 1):
mask[y][x] = blob_id + 1
return mask
|
import RPi.GPIO as GPIO
import time
import matplotlib.pyplot as plt
comparator_value = 4
troyka = 17
dac = [26, 19, 13, 6, 5, 11, 9, 10]
leds = [21, 20, 16, 12, 7, 8, 25, 24]
bits = len(dac)
levels = 2**bits
maxvoltage = 3.3
listofnums =[]
temp=0
def decimal2binary(dec):
return[int(bin) for bin in bin(dec)[2:].zfill(bits)]
def dec2dac(dec):
for i in range(bits):
GPIO.output(dac[i], dec[i])
GPIO.setmode(GPIO.BCM)
GPIO.setup(dac, GPIO.OUT, initial = GPIO.LOW)
GPIO.setup(leds, GPIO.OUT, initial = GPIO.LOW)
GPIO.setup(comparator_value, GPIO.IN)
GPIO.setup(troyka, GPIO.OUT, initial = GPIO.HIGH)
comp = GPIO.input(comparator_value)
def adc():
znach = [1, 0, 0, 0, 0, 0, 0, 0, 0]
for sch in range(9):
dec2dac(znach)
time.sleep(0.007) #стоп должен стоять между подачей сигнала на компаратор и его считыванием
comp = GPIO.input(comparator_value)
if sch == 8:
val = 0
for i in range(8):
val += (2 ** (7 - i)) * znach[i]
n = int(val / 31)
for i in range (8):
if i <= n-1:
GPIO.output(leds[i], 1)
else:
GPIO.output(leds[i], 0)
print(val, znach, n)
elif comp == 0:
znach[sch] = 0
znach[sch + 1] = 1
elif comp == 1:
znach[sch + 1] = 1
return val
try:
vremya = time.time()
while(temp<250):
temp=adc()
listofnums.append(temp)
GPIO.output(troyka,0)
while(temp>2):
temp=adc()
listofnums.append(temp)
vremya2 = time.time()
valuesstr = [str(item) for item in listofnums]
with open("data.txt","w") as f:
f.write("\n".join(valuesstr))
with open("settings.txt","w") as z:
z.write("period:8ms values on V=0,0012 mV")
finally:
print(len(listofnums))
print(vremya2-vremya2,vremya2,vremya)
GPIO.cleanup()
plt.plot(listofnums)
plt.show()
|
import numpy as np
import pandas as pd
import tensorflow as tf
import pickle
import copy
from scipy.stats import wasserstein_distance
from spyro.builders import build_mlp_regressor, build_distributional_dqn
from spyro.core import BaseAgent
from spyro.losses import quantile_huber_loss
from spyro.utils import progress
from spyro.value_estimation.core import (
BaseParallelValueEstimator
)
class TabularValueEstimator(BaseParallelValueEstimator):
"""Class that gathers experiences from all states using parallel workers and stores its
performanc characteristics in a table."""
def __init__(self, name="TabularEstimator", *args, **kwargs):
super().__init__(*args, **kwargs)
self.table = {}
def process_performed_task(self, task):
"""Store results of a task in a table."""
state = tuple(task.pop("state"))
self.table[state] = task
def save_table(self, path="../results/state_value_table.pkl"):
pickle.dump(self.table, open(path, "wb"))
progress("Table saved at {}".format(path))
def load_table(self, path):
self.table = pickle.load(open(path, "rb"))
class DataSetCreator(BaseParallelValueEstimator):
"""Class that gathers experiences from parallel workers and creates a dataset with
states and responses (and targets) from those states.
This class is intended to create test (and validation) datasets to evaluate trained
estimators on.
Parameters
----------
name: str, default="DataSetCreator"
Name of the object.
*args, **kwargs: any
Parameters passed to the Base class.
"""
def __init__(self, name="DataSetCreator", *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
# use the same method when performing tasks as when obtaining random experiences
self.process_random_experience = self._process_experience
self.process_performed_task = self._process_experience
def create_data(self, env_cls, size=1000000, permutations=False, save=True,
env_params=None, save_path="../results/dataset.csv", *args, **kwargs):
"""Generate a dataset.
Parameters
----------
env_cls: Python class
The environment to obtain experiences from.
size: int, default=1000000
The number of samples to simulate.
permutations: bool, default=False
Whether to force every state to be visited (True) or simulate according to patterns
in the simulation.
save: bool, default=True
Whether to save the resulting dataset.
env_params: dict, default=None
Dictionary with key-value pairs to pass to the env_cls upon initialization.
save_path: str, default="../results/dataset.csv"
Where to save the resulting dataset.
*args, **kwargs: any
Arguments passed to the fit method of the BaseParallelValueEstimator.
"""
self.data_state = np.empty((size,) + self.state_shape)
self.data_response = np.empty(size)
self.data_target = np.empty(size)
self.index = 0
progress("Creating dataset of {} observations.".format(size))
self.fit(env_cls, permutations=permutations, total_steps=size, env_params=env_params, *args, **kwargs)
progress("Dataset created.")
if save:
self.save_data(path=save_path)
def _process_experience(self, experience):
"""Store results of a task in a table."""
try:
self.data_state[self.index, :] = experience["state"]
self.data_response[self.index] = experience["response"]
self.data_target[self.index] = experience["target"]
self.index += 1
except IndexError:
# if data template is full, skip remaining experiences.
pass
def save_data(self, path="../results/dataset.csv"):
"""Save the created data as a csv file."""
df = pd.DataFrame(
np.concatenate([self.data_state, self.data_response.reshape(-1, 1), self.data_target.reshape(-1, 1)], axis=1),
columns=["state_{}".format(j) for j in range(self.data_state.shape[1])] + ["response", "target"]
)
df.to_csv(path, index=False)
progress("Dataset saved at {}".format(path))
class NeuralValueEstimator(BaseParallelValueEstimator, BaseAgent):
"""Class that gathers experiences (values) from states using parallel workers and trains a
neural network to predict them.
Parameters
----------
n_neurons: int, default=1024
The number of neurons in each layer of the neural network.
n_layers: int, default=4
The number of hidden layers in the neural network.
quantiles: bool, default=False
Whether to use Quantile Regression instead of regular mean estimation.
num_workers: int, default=-1
The number of worker processes to use. If -1, uses one per available per CPU core.
"""
def __init__(self, memory, n_neurons=1024, n_layers=4, quantiles=False, num_atoms=51,
activation="relu", optimization="adam", learning_rate=1e-4, name="NeuralEstimator",
gradient_clip=None, train_frequency=4, warmup_steps=50000, batch_size=64,
kappa=1, log=True, logdir="./log/value_estimation", *args, **kwargs):
self.memory = memory
self.n_neurons = n_neurons
self.n_layers = n_layers
self.quantiles = quantiles
self.num_atoms = num_atoms
self.activation = activation
self.optimization = optimization
self.learning_rate = learning_rate
self.gradient_clip = gradient_clip
self.batch_size = batch_size
self.train_frequency = train_frequency
self.warmup_steps = warmup_steps
self.kappa = kappa
BaseParallelValueEstimator.__init__(self, name=name, *args, **kwargs)
BaseAgent.__init__(self, None, learning_rate=learning_rate, logdir=logdir, log=log,
log_prefix=self.name + "_run")
self._init_graph()
def _init_graph(self):
"""Initialize the Tensorflow graph based on initialization arguments."""
self.session = tf.Session()
with tf.variable_scope(self.name):
self.states_ph = tf.placeholder(tf.float64, shape=(None,) + self.state_shape, name="states_ph")
self.values_ph = tf.placeholder(tf.float64, shape=(None, 1), name="rewards_ph")
# Quantile Regression
if self.quantiles:
self.value_prediction = build_distributional_dqn(
self.states_ph, 1, self.num_atoms,
n_layers=self.n_layers, n_neurons=self.n_neurons,
activation=self.activation
)
self.targets = tf.reshape(tf.tile(self.values_ph, [1, self.num_atoms]), (-1, self.num_atoms))
self.quantile_predictions = tf.reshape(self.value_prediction, (-1, self.num_atoms))
self.errors = tf.subtract(tf.stop_gradient(self.targets), self.quantile_predictions)
self.loss = quantile_huber_loss(self.errors, kappa=self.kappa, three_dims=False)
# Regular Regression
else:
self.value_prediction = build_mlp_regressor(
self.states_ph, self.n_layers, self.n_neurons,
activation=self.activation, output_dim=1
)
self.loss = 0.5 * tf.reduce_mean(
tf.squeeze(tf.square(self.value_prediction - tf.stop_gradient(self.values_ph)))
)
# Minimize the loss using gradient descent (possibly clip gradients before applying)
self.optimizer = self.tf_optimizers[self.optimization](learning_rate=self.learning_rate)
self.weights = tf.trainable_variables(scope=self.name)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss, self.weights)
if self.gradient_clip is not None:
a_min, a_max = self.gradient_clip
self.grads_and_vars = [(tf.clip_by_value(grad, a_min, a_max), var) for grad, var in self.grads_and_vars]
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars)
if self.log:
self.summary_op = tf.summary.scalar("loss", self.loss)
self.val_loss = tf.placeholder(tf.float64, shape=())
self.epoch_summary_op = tf.summary.scalar("validation_loss", self.val_loss)
self.summary_writer = tf.summary.FileWriter(self.logdir, self.session.graph)
self.session.run(tf.global_variables_initializer())
def process_random_experience(self, experience):
"""Process an experience by storing it in memory and (at training time) sampling a
batch from that memory and train on it.
Parameters
----------
experience: dict
Contains a description of the experience: keys must be ['state', 'response', 'target'],
where state is array-like and response and target are scalars (or NaN).
"""
# store experience in memory
self.memory.store(copy.copy(experience["state"]), experience["response"], experience["target"])
# train at training time
if (self.global_counter % self.train_frequency == 0) and (self.global_counter >= self.warmup_steps):
# sample batch
states, responses, _ = self.memory.sample(self.batch_size)
# train and log
if self.log:
_, summary = self.session.run(
[self.train_op, self.summary_op],
feed_dict={
self.states_ph: states,
self.values_ph: responses
}
)
self.summary_writer.add_summary(summary, self.global_counter)
# or just train
else:
self.session.run(
self.train_op, feed_dict={
self.states_ph: states,
self.values_ph: responses
}
)
def predict_quantiles(self, X, batch_size=10000):
"""Predict quantiles of the response time distribution of a set of states.
Parameters
----------
X: array-like, 2D
The input data to predict.
batch_size: int, default=10000
The batch size to use when predicting. Influences memory and time costs.
Returns
-------
Y_hat: np.array
The predicted quantiles of the response time with shape [n_samples, n_quantiles].
"""
assert self.quantiles, ("predict_quantiles can only be done for Quantile Regression"
"networks (initialize with quantiles=True).")
if isinstance(X, pd.DataFrame):
X = X.values
if batch_size is None:
return self.session.run(self.quantile_predictions, feed_dict={self.states_ph: X})
else:
outputs = [
self.session.run(
self.quantile_predictions,
feed_dict={self.states_ph: X[(i * batch_size):((i + 1)*batch_size), :]}
)
for i in range(int(np.ceil(len(X) / batch_size)))
]
return np.concatenate(outputs, axis=0)
def predict(self, X, batch_size=10000):
"""Predict the expected value / response time of set of states.
Parameters
----------
X: array-like, 2D
The input data to predict.
batch_size: int, default=10000
The batch size to use when predicting. Influences memory and time costs.
Returns
-------
Y_hat: np.array
The predicted values / responses.
"""
if isinstance(X, pd.DataFrame):
X = X.values
if self.quantiles:
Y_hat = self.predict_quantiles(X, batch_size=batch_size)
return Y_hat.mean(axis=1).reshape(-1, 1)
elif batch_size is None:
return self.session.run(self.value_prediction, feed_dict={self.states_ph: X})
else:
outputs = [
self.session.run(
self.value_prediction,
feed_dict={self.states_ph: X[(i * batch_size):((i + 1)*batch_size), :]}
)
for i in range(int(np.ceil(len(X) / batch_size)))
]
return np.concatenate(outputs, axis=0)
def create_predicted_table(self):
"""Create a table like the TabularValueEstimator from predicted quantiles or means."""
tasks = self.define_tasks()
# create array of states to predict
X = np.array([list(task["state"]) for task in tasks])
if self.quantiles:
Y_hat = self.predict_quantiles(X)
else:
Y_hat = self.predict(X)
return {tuple(task["state"]): Y_hat[i, :] for i, task in enumerate(tasks)}
def fit(self, env_cls, epochs=100, steps_per_epoch=100000, warmup_steps=50000,
validation_freq=1, val_batch_size=10000, validation_data=None, permutations=False,
env_params=None, metric="mae", eval_quants=False, verbose=True, save_freq=0, *args, **kwargs):
"""Fit the estimator on the environment.
Parameters
----------
env_cls: Python class
The environment to train on.
epochs: int, default=100
The number of epochs to train.
steps_per_epoch: int, default=100,000
The number of steps to count as one epoch.
validation_freq: int, default=1
After how many epochs to evaluate on validation data. Set to 0 if you don't want
to validate.
val_batch_size: int, default=10,000
The batch size to use in validation.
validation_data: tuple(array-like, array-like)
The data to use for validation.
permutations: bool, default=False
Whether to sample all state-permutations for training (True) or just sample
according to distributions in the simulation (False).
env_params: dict
Parameters passed to env_cls upon initialization.
metric: str, default='mae'
Metric to use for evaluation. One of ['mae', 'mse', 'rmse' 'wasserstein'].
eval_quants: bool, default=False
Whether to evaluate on quantile values directly (True) or on expectation (False).
Only relevant when self.quantiles=True.
verbose: bool, default=True
Whether to print progress updates.
save_freq: int, default=0
After how many epochs to save the model weights to the log directory.
If save_freq=0 or self.log=False, does not save.
*args, **kwargs: any
Parameters passed to perform_tasks or gather_random_experiences.
"""
def is_time(bool_, freq):
if bool_ and (freq > 0):
if epoch % freq == 0:
return True
return False
if warmup_steps is not None:
self.warmup_steps = warmup_steps
self.verbose = verbose
if (validation_data is not None) and (validation_freq > 0):
val_x, val_y = validation_data
validate = True
else:
validate = False
for epoch in range(epochs):
# train
self.gather_random_experiences(env_cls, env_params=None, total_steps=steps_per_epoch,
start_step=epoch*steps_per_epoch, *args, **kwargs)
# evaluate
if is_time(validate, validation_freq):
loss = self.evaluate(val_x, val_y, metric=metric, raw_quantiles=eval_quants,
batch_size=val_batch_size, verbose=False)
self._log_validation_loss(loss, epoch + 1)
progress("Epoch {}/{}. Val score: {}".format(epoch + 1, epochs, loss), verbose=verbose)
# save weights
if is_time(self.log, save_freq):
self.save_weights()
progress("Completed {} epochs of training.".format(epochs), verbose=verbose)
if validate and (epochs % validation_freq != 0): # if not validated after the last epoch
loss = self.evaluate(val_x, val_y, metric=metric, raw_quantiles=eval_quants,
batch_size=val_batch_size, verbose=False)
self._log_validation_loss(loss, epoch + 1)
progress("Final validation score: {}", verbose=verbose)
def _log_validation_loss(self, loss, epoch):
if self.log:
val_summary = self.session.run(self.epoch_summary_op, feed_dict={self.val_loss: loss})
self.summary_writer.add_summary(val_summary, epoch)
def evaluate(self, X, Y, metric="mae", raw_quantiles=False, batch_size=10000, verbose=True):
"""Evaluate on provided data after training.
Parameters
----------
X, Y: array-like, 2D
The input data and corresponding labels to evaluate on.
batch_size: int, default=10000
The batch size to use when predicting. Influences memory and time costs.
"""
if isinstance(X, pd.DataFrame):
X = X.values
if isinstance(Y, pd.DataFrame):
Y = Y.values
if len(Y.shape) == 1:
Y = Y.reshape(-1, 1)
if raw_quantiles:
Y_hat = self.predict_quantiles(X, batch_size=batch_size)
else:
Y_hat = self.predict(X, batch_size=batch_size)
if metric == "mae":
loss = np.abs(Y - Y_hat).mean()
elif metric == "mse":
loss = np.square(Y - Y_hat).mean()
elif metric == "rmse":
loss = np.sqrt(np.square(Y - Y_hat).mean())
elif metric == "wasserstein":
assert raw_quantiles, "Wasserstein distance is only relevant when raw_quantiles=True"
loss = np.mean([wasserstein_distance(Y[i, :], Y_hat[i, :]) for i in range(len(Y))])
progress("Evaluation score ({}): {}".format(metric, loss), verbose=self.verbose)
return loss
def evaluate_on_quantiles(self, quantile_table):
"""Evaluate the trained quantile estimator on simulated quantiles rather than
individual observations.
Parameters
----------
quantile_table: dict
Must have states as keys and arrays of quantile values as values. These quantile
values must represent the same quantiles as the estimator has learned and must
thus be of the same length.
Returns
-------
wasserstein_distance: float
The Wasserstein distance between the learned and provided distributions.
"""
predicted_quantiles = self.create_predicted_table()
# assure quantiles are in the same order
Y_hat = np.array([predicted_quantiles[state] for state in quantile_table.keys()])
Y = np.array(list(quantile_table.values()))
# calculate Wasserstein distance
loss = np.mean([wasserstein_distance(Y[i, :], Y_hat[i, :]) for i in range(len(Y))])
return loss
def get_config(self):
"""Get the configuration of the estimator as a dictionary. Useful for reconstructing a
trained estimator."""
return {
"memory": self.memory.get_config(),
"n_neurons": self.n_neurons,
"n_layers": self.n_layers,
"quantiles": self.quantiles,
"num_atoms": self.num_atoms,
"activation": self.activation,
"optimization": self.optimization,
"learning_rate": self.learning_rate,
"name": self.name,
"gradient_clip": self.gradient_clip,
"train_frequency": self.train_frequency,
"warmup_steps": self.warmup_steps,
"batch_size": self.batch_size,
"log": self.log,
"logdir": self.logdir,
"strategy": self.strategy
}
|
from pylab import *
import numpy
ncores = numpy.arange(1,128,1)
plot(ncores, 1.0/(0.1+0.9/ncores))
show()
plot(ncores, (1.0/(0.1+0.9/ncores))/ncores)
show()
|
f = open("./dangan:ot-1228.csv")
count = 0
for line in f:
k = line.split(",")
for i in k:
count += 1
print count
|
#This program uses nested for loop to identify which meals have and don't have spam.
#Lists each ingredients in either and calculates a spam score for the relevant meal.
menu = [
["egg", "bacon"],
["egg", "sausage", "bacon"],
["egg", "spam"],
["egg", "bacon", "spam"],
["egg", "bacon", "sausage", "spam"],
["spam", "bacon", "sausage", "spam"],
["spam", "sausage", "spam", "bacon", "spam", "tomato", "spam"],
["spam", "egg", "spam", "spam", "bacon", "spam"],
]
for meal in menu:
if "spam" not in meal:
print(meal)
for item in meal:
print(item)
else:
print("{0} has a spam score of {1}"
.format(meal, meal.count("spam")))
|
total = 0
final_total = 0
final_depth_level = 10
current_depth = 1
print "Gimme a number less than 10 "
#x = int(raw_input('> '))
x=8
print "Gimme another number less than 10 "
#y = int(raw_input('> '))
y=9
def recursion_depth(depth):
if (current_depth == 1):
print "at depth of "+str(depth)
total = x +y
previous_total = y
final_total = total
current_depth += 1
elif (current_depth >= final_depth_level):
print "end depth " + str(depth)
exit()
else:
print "Exiting"
print "here"
def add_ten_times(g, h):
print g
print h
result1 = g + h
total = result1
def add_ten_times_1(g, h):
print result1
result2 = h + result1
total += result2
print result2
result3 = result1 + result2
total += result3
print result3
result4 = result3 + result2
total += result4
print result4
result5 = result4 + result3
total += result5
print result5
result6 = result5 + result4
total += result6
print result6
result7 = result6 + result5
total += result7
print result7
result8 = result7 + result6
total += result8
print result8
result9 = result8 + result7
total += result9
print result9
result10 = result9 + result8
total += result10
print result10
print "total via addition " + str(total)
total_7th_item_times_11 = int(result7)*11
print "total via 7th item times 11 " + str(total_7th_item_times_11)
#for number_of_additions in range(1, 10):
add_ten_times(x,y)
recursion_depth(current_depth)
|
import random
import math
def generatelots(n):
l = []
for x in xrange(1,n+1):
k = []
for y in xrange(0,16):
k.extend([random.randrange(0,10)])
l.append(k)
return l
def strangedistance(n,m):
count = 0
for i in xrange(0,len(m[0])):
if n[i] == m[0][i]:
count += 1
return (count-m[1])**2
def reproduction(mom,dad):
sex = []
l = len(mom)
baby = []
for x in xrange(1, l+1):
sex.extend([random.randrange(0,2)])
for x in xrange(0, l):
if sex[x] == 0:
baby.extend([mom[x]])
elif sex[x] == 1:
baby.extend([dad[x]])
return baby
def findBest(n, l):
elite = []
for x in xrange(0,n):
elite.extend([l[x]])
elite.sort()
# print l
# print elite
for x in xrange(n,len(l)):
if l[x][0] < elite[n-1][0]:
for y in xrange(0,n):
if l[x][0] < elite[y][0]:
elite.insert(y, l[x])
elite = elite[:n]
break
# print elite
return elite
# print findBest(3, [[5,[2,3,4,5]], [1,[3,4,5,6]], [1, [4,5,6,2]], [2, [3,3,3,3]], [1, [1,1,2,3]]])
m = [[[5,6,1,6,1,8,5,6,5,0,5,1,8,2,9,3],2], [[3,8,4,7,4,3,9,6,4,7,2,9,3,0,4,7],1],[[5,8,5,5,4,6,2,9,4,0,8,1,0,5,8,7],3],[[9,7,4,2,8,5,5,5,0,7,0,6,8,3,5,3],3],[[4,2,9,6,8,4,9,6,4,3,6,0,7,5,4,3], 3],[[3,1,7,4,2,4,8,4,3,9,4,6,5,8,5,8],1],[[4,5,1,3,5,5,9,0,9,4,1,4,6,1,1,7],2],[[7,8,9,0,9,7,1,5,4,8,9,0,8,0,6,7] ,3],[[8,1,5,7,3,5,6,3,4,4,1,1,8,4,8,3],1],[[2,6,1,5,2,5,0,7,4,4,3,8,6,8,9,9] ,2],[[8,6,9,0,0,9,5,8,5,1,5,2,6,2,5,4],3],[[6,3,7,5,7,1,1,9,1,5,0,7,7,0,5,0],1],[[6,9,1,3,8,5,9,1,7,3,1,2,1,3,6,0],1],[[6,4,4,2,8,8,9,0,5,5,0,4,2,7,6,8],2],[[2,3,2,1,3,8,6,1,0,4,3,0,3,8,4,5] ,0],[[2,3,2,6,5,0,9,4,7,1,2,7,1,4,4,8] ,2],[[5,2,5,1,5,8,3,3,7,9,6,4,4,3,2,2] ,2],[[1,7,4,8,2,7,0,4,7,6,7,5,8,2,7,6] ,3],[[4,8,9,5,7,2,2,6,5,2,1,9,0,3,0,6] ,1],[[3,0,4,1,6,3,1,1,1,7,2,2,4,6,3,5] ,3],[[1,8,4,1,2,3,6,4,5,4,3,2,4,5,8,9] ,3],[[2,6,5,9,8,6,2,6,3,7,3,1,6,8,6,7] ,2]]
def guess(l):
for x in xrange(1,100):
score = []
n = len(l)
for i in xrange(0,n):
score.append([sum(strangedistance(l[i],m[j]) for j in xrange (0, 22)) ** (0.5), l[i]])
elite = findBest(int(math.floor(n**(0.5))), score)
# print elite
l=[]
for i in xrange(0,len(elite)):
for j in xrange(0,len(elite)):
l.append(reproduction(score[i][1], score[j][1]))
return [l[0], score[0][0]]
shit = generatelots(100)
print shit[0]
print guess(shit)
# shit = generatelots(100)
# # print shit
# # print guess(shit)
# # print guess(guess(shit))
# # shit = 2
# for x in xrange(1,100):
# shit = guess(shit)
# print shit
|
from django.shortcuts import render
from visitations import models
from django.urls import reverse_lazy
from django.views.generic.list import ListView
from patients.models import Patient
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView, CreateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def VisitationsView(request, patient):
object_list = models.Visitation.objects.all().filter(patient=patient)
context_dict = {'object_list' : object_list, 'patient' : patient}
return render(request, 'visitations/visitation_list.html', context=context_dict)
class VisitationCreateView(LoginRequiredMixin, CreateView):
login_url = 'login'
redirect_field_name = 'redirect_to'
model = models.Visitation
template_name = 'visitations/visitation_form.html'
fields = ['appointment_date', 'appointment_time', 'patient', 'note']
def get_initial(self):
initial = super().get_initial()
initial['patient'] = Patient.objects.get(pk=self.kwargs['patient'])
return initial
class VisitationDetailView(LoginRequiredMixin, DetailView):
login_url = 'login'
redirect_field_name = 'redirect_to'
model = models.Visitation
template_name = 'visitations/visitation.html'
class VisitationUpdateView(LoginRequiredMixin, UpdateView):
login_url = 'login'
redirect_field_name = 'redirect_to'
model = models.Visitation
template_name = 'visitations/visitation_form.html'
fields = ['appointment_date', 'appointment_time', 'patient', 'note']
class VisitationDeleteView(LoginRequiredMixin, DeleteView):
login_url = 'login'
redirect_field_name = 'redirect_to'
model = models.Visitation
template_name = 'visitations/visitation_confirm_delete.html'
def get_success_url(self):
visitation = models.Visitation.objects.get(pk=self.kwargs['pk'])
return reverse_lazy("visitations:visitation_list", kwargs={'patient':visitation.patient.pk})
|
#!/usr/bin/python
class Solution(object):
def canWinNim(self, n):
if n%4 == 0:
return False
return True
|
import numpy as np
import sys
import math
import time
def partition(lista,inicio,fim):
pivo = lista[inicio]
i = inicio + 1
j = fim
while (i <= j):
if(lista[i] <= pivo):
i += 1
elif(lista[j] > pivo):
j -= 1
elif(i <= j):
lista[i],lista[j] = lista[j],lista[i]
i+= 1
j-=1
lista[inicio],lista[j] = lista[j],lista[inicio]
return j
def quickSort(lista,l,r):
if(l < r):
q = partition(lista,l,r)
quickSort(lista,l,q-1)
quickSort(lista,q+1,r)
def lerArquivo():
arquivo = 'instancias-num/' + sys.argv[1]
f = open(arquivo,'r')
conteudo = f.readlines()
entrada = []
for i in range(len(conteudo)): entrada.append(int(conteudo[i]))
return entrada
def escreveResultado(saida):
arquivo = 'resposta-quickSort-' + sys.argv[1]
f = open(arquivo, 'w')
res = []
for i in range(len(saida)): res.append(str(saida[i])+'\n')
f.writelines(res)
if __name__ == '__main__':
print("Lendo arquivo...")
entrada = lerArquivo()
print("Arquivo Lido!!")
print("\nProcessando...")
inicio,fim = 0, len(entrada)-1
start = time.time()
#print(entrada)
quickSort(entrada,inicio,fim)
finish = time.time()
print("\nProcessado em: ",(finish - start), "s")
print("Escrevendo Arquivo...")
#print(entrada)
escreveResultado(entrada)
print("Concluído!")
|
import serial
import time
# can be easily replaced with a file name for testing without servos
#ser = serial.Serial('/dev/ttyACM0', 9600)
ser = serial.Serial('COM4', 9600)
#ser = open("servo_output.txt", "w")
# tested and functional
def add_zeros_to_int(int_val):
if(len(str(int_val)) == 1):
return "00" + str(int_val)
elif(len(str(int_val)) == 2):
return "0" + str(int_val)
else:
return str(int_val)
def send(motor_hash, ser):
item_count = 0
for motor, pos in last_sent.iteritems():
if motor in motor_hash:
ser.write("%s:%s"
%(
servo_table[motor],
add_zeros_to_int(motor_hash[motor])
)
)
else:
ser.write("%s:%s"
%(
servo_table[motor],
add_zeros_to_int(pos)
)
)
if(item_count < 2):
ser.write(",")
item_count += 1
ser.write("\n")
happy_test_vals = [{"finger":20}, {"finger":3, "thumb":4}, {"finger":180, "under":0}, {"finger":0, "under":180, "thumb":90}, {"finger":900, "under":900, "thumb":900}]
error_test_vals = [{"finger":180, "under":180}, {"finger":180, "under":180, "thumb":90}, {"finger":0, "under":180, "thumb":90, "dicks":20}]
last_sent = {"finger":0, "thumb":0, "under":180}
servo_table = {"finger":0, "thumb":1, "under":2}
for test_val in happy_test_vals:
time.sleep(3)
send(test_val, ser)
|
num = 0 #초기값
while num <= 3: #조건식
print("num = %d"%num)
num += 1 #증감식(증가 또는 감소하는 식)
print("""어제 호텔 델루나 봤어?
꼭 봐라 두 번 봐라!!""")
count = 2
while count: #숫자는 0이 거짓
print("재방송을 시작합니다.")
count -= 1
print("두 번 다봤어")
print("열 번 찍어 안넘어가는 나무 없다.")
hit = 0
while hit < 10:
hit += 1
print("나무를 %d번 찍었습니다."%hit)
print("나무가 넘어갔습니다.")
|
# Copyright 2017 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch, os, re, requests
from .tasks import sh, get, project, Elidable, Secret, TaskError
def next_page(response):
if "Link" in response.headers:
links = requests.utils.parse_header_links(response.headers["Link"])
for link in links:
if link['rel'] == 'next':
return link['url']
return None
def inject_token(url, token):
if not token: return url
parts = url.split("://", 1)
if len(parts) == 2:
return Elidable("%s://" % parts[0], Secret(token), "@%s" % parts[1])
else:
return Elidable(Secret(token), "@%s" % parts[0])
class Github(object):
def __init__(self, token):
self.token = token
self._headers = {'Authorization': 'token %s' % self.token} if self.token else None
def get(self, api):
return get("https://api.github.com/%s" % api, headers=self._headers)
def paginate(self, api):
response = self.get(api)
yield response
if response.ok:
next_url = next_page(response)
while next_url:
response = get(next_url, headers=self._headers)
next_url = next_page(response)
yield response
def pull(self, url, directory):
if not os.path.exists(directory):
os.makedirs(directory)
sh("git", "init", cwd=directory)
sh("git", "pull", inject_token(url, self.token), cwd=directory)
def list(self, organization, filter="*"):
repos = []
for response in self.paginate("orgs/%s/repos" % organization):
repos.extend(response.json())
filtered = [r for r in repos if fnmatch.fnmatch(r["full_name"], filter)]
real_repos = project(lambda x: self.get(x).json(), ["repos/%s" % r["full_name"] for r in filtered])
urls = [(r["full_name"], r["clone_url"]) for r in real_repos if "id" in r]
return urls
def exists(self, url):
result = sh("git", "-c", "core.askpass=true", "ls-remote", inject_token(url, self.token), "HEAD",
expected=xrange(256))
if result.code == 0:
return True
elif re.search(r"(fatal: repository '.*' not found|ERROR: Repository not found)", result.output):
return False
else:
raise TaskError(result)
def remote(self, directory):
result = sh("git", "remote", "get-url", "origin", cwd=directory, expected=xrange(256))
if result.code == 0:
return result.output.strip()
else:
if "Not a git repository" in result.output:
return None
else:
raise TaskError(str(result))
def clone(self, url, directory):
sh("git", "-c", "core.askpass=true", "clone", "--depth=1", inject_token(url, self.token), directory)
|
import os
import json
import urllib
import urllib2
import cPickle
import codecs
import time
import datetime
import sys
reload(sys)
sys.setdefaultencoding("latin-1")
from xml2dict import XML2Dict
from unicodedata import normalize
from sets import Set
import pdb
from django.utils.timesince import timeuntil
class Ponto:
def __init__(self,numero, dados):
self.numero = numero
self.latitude = u"%s"%str(dados[5])
self.longitude = u"%s"%str(dados[4])
self.ordem = 0
def __repr__(self):
return str(self.numero)
class Previsao:
def __init__(self, xml):
if not xml.previsao.has_key('ponto'):
self.estimativas = []
return None
if xml.previsao.ponto.has_key('estimativa'):
self.estimativas = xml.previsao.ponto.estimativa
if not isinstance(self.estimativas, list):
self.estimativas = [self.estimativas,]
self.estimativas.sort(key=lambda e: e.viagem.horario)
else:
self.estimativas = []
class PontoVitoria:
pontos = []
def __init__(self,url_base="http://rast.vitoria.es.gov.br/pontovitoria/"):
self.url_base = url_base
self.pasta_dados = os.path.join(os.path.dirname(__file__),'../dados/')
self.referer = "http://rast.vitoria.es.gov.br/pontovitoria/"
self.key = None
def get_key(self):
"""
"""
if self.key:
return self.key
req = urllib2.Request("%sjs/principal/previsao.js"% self.url_base)
req.add_header('Referer',self.referer)
pg = urllib2.urlopen(req)
texto = pg.read()
self.key= texto.split('key')[1].split('|')[2]
return self.key
return "311378"
def getPontos(self):
"""
Retorna a lista de pontos do site ponto vitoria
Exemplo:
>>> pv= PontoVitoria()
>>> p = pv.getPontos()
>>> len(p)
998
"""
if PontoVitoria.pontos:
return PontoVitoria.pontos
if not os.path.exists(self.pasta_dados+"/pontos/lista_de_pontos.json") :
req = urllib2.Request("%sutilidades/retornaPontos" % self.url_base)
req.add_header('Referer',self.referer)
pg = urllib2.urlopen(req)
texto = pg.read()
with open(self.pasta_dados+'/pontos/lista_de_pontos.json','w') as fp:
fp.write(texto)
with open(self.pasta_dados+"/pontos/lista_de_pontos.json",'r') as fp:
pontos = json.loads(fp.read(),'utf-8')
PontoVitoria.pontos = []
for k in pontos.keys():
k = k.strip()
if k[0].isdigit():
p=Ponto(int(k),pontos[k])
PontoVitoria.pontos.append(p)
return PontoVitoria.pontos
def linhasQuePassamNoPonto(self,ponto):
"""
Esta Funcao detecta quais linhas de onibus passam no ponto informado como parametro.
Exemplos de uso:
>>> pv = PontoVitoria()
>>> pv.linhasQuePassamNoPonto(6043) # ponto perto de minha casa
[u'112', u'122', u'163', u'212', u'214', u'303']
>>> pv.linhasQuePassamNoPonto(6166) # ponto perto da ufes
[u'121', u'122', u'123', u'160', u'161', u'163', u'214', u'241']
"""
linhas_file= self.pasta_dados+"/pontos/%s.json" % ponto
if not os.path.exists(linhas_file) :
parametros = {'ponto_oid':ponto }
req = urllib2.Request("%sutilidades/listaLinhaPassamNoPonto" % self.url_base,urllib.urlencode(parametros))
req.add_header('Referer',self.referer)
pg = urllib2.urlopen(req)
texto = pg.read()
with open(linhas_file,'w') as fp:
fp.write(texto)
with open(linhas_file,'r') as fp:
linhas = json.loads(fp.read(),'utf-8')
passam = []
for k in linhas['data']:
oid=k['linha'].split(" -")[0]
passam.append(oid)
return passam
def linhasQueFazemPercurso(self, ponto_inicial, ponto_destino):
"""
Retora a lista de linhas que fazem determinado percurso.
Exemplos de uso:
>>> pv = PontoVitoria()
>>> pv.linhasQueFazemPercurso(6043,6166) # casa -> ufes
[u'122', u'163', u'214']
>>> pv.linhasQueFazemPercurso(6043,5059) # casa -> shopping vitoria/
[u'212', u'214']
"""
linhas_inicio = self.linhasQuePassamNoPonto(ponto_inicial)
linhas_destino = self.linhasQuePassamNoPonto(ponto_destino)
linhas_do_percurso = list(set(linhas_inicio).intersection(set(linhas_destino)))
linhas_do_percurso.sort()
return linhas_do_percurso
def _getPrevisao(self,linha="163",ponto="6039"):
url = "%sprevisao?ponto=%s&linha=%s&key=%s" % (self.url_base,ponto,linha,self.get_key())
print url
req = urllib2.Request(url)
req.add_header('Referer',self.referer)
pg = urllib2.urlopen(req)
return pg.read()
def getPrevisao(self,linha="163",ponto="6039",cache = True):
"""
>>> pv = PontoVitoria()
>>> pv.getPrevisao(cache=False)
"""
if cache:
prev_file= self.pasta_dados+"/previsoes/%s/%s.json" % (linha,ponto)
if not os.path.exists(prev_file):
if not os.path.exists(os.path.dirname(prev_file)):
os.mkdir(os.path.dirname(prev_file))
texto = self._getPrevisao(linha,ponto)
with open(prev_file,'w') as fp:
fp.write(texto)
else:
with open(prev_file,"r") as f:
texto = f.read()
else:
texto = self._getPrevisao(linha,ponto)
xml = XML2Dict()
r = xml.fromstring(texto)
previsao = Previsao(r)
return previsao
def getHorarios(self, ponto):
estimativas = []
for l in self.linhasQuePassamNoPonto(ponto):
prev = self.getPrevisao(l,ponto,False)
estimativas +=prev.estimativas
horarios ={}
for e in estimativas:
ee = formata_horario(e)
o={}
if ee == {}:
continue
if not horarios.has_key(ee['linha']):
horarios[ee['linha']] = []
o['data_estimada']="%s (%s)" % (ee['data_estimada'].strftime("%H:%M"),timeuntil(ee['data_estimada']))
o['data_horario']=ee['data_horario'].strftime("%H:%M")
o['data_pacote']=ee['data_pacote'].strftime("%H:%M")
horarios[ee['linha']].append(o)
return horarios
def getPontosDaLinha(self,linha):
"""
Retorna os pontos que pertecem a uma linha.
Exemplos de uso:
>>> pv = PontoVitoria()
>>> pontos = pv.getPontosDaLinha('163')
>>> len(pontos)
89
>>> pontos = pv.getPontosDaLinha('121')
>>> len(pontos)
141
"""
pv = PontoVitoria()
pontos = pv.getPontos()
pontos_linha = []
for p in pontos:
linhas = pv.linhasQuePassamNoPonto(p.numero)
if linha in linhas:
pontos_linha.append(p)
return pontos_linha
if __name__== "__main__":
import doctest
doctest.testmod()
pv = PontoVitoria()
p = pv.getPontosDaLinha(163)
len(p)
import pytz
tz_brasil = pytz.timezone("America/Sao_Paulo")
def formata_horario(estimativa):
dados = {}
try:
if estimativa.has_key("linha"):
dados['linha']=estimativa.linha.identificador
dados['veiculo']=estimativa.veiculo.rotulo
data = estimativa.horarioPacote
dados['data_pacote']=datetime.datetime.fromtimestamp(int(data[:-3]),tz_brasil)
data = estimativa.horarioEstimado
dados['data_estimada']=datetime.datetime.fromtimestamp(int(data[:-3]),tz_brasil)
data = estimativa.viagem.horario
dados['data_horario']=datetime.datetime.fromtimestamp(int(data[:-3]),tz_brasil)
dados['viagem']=estimativa.viagem.oid
except:
pass
return dados
|
def odder(listValues):
'''
this vill filter odds
'''
return [o for o in listValues if o%2!=0]
|
class Node:
def __init__(self,data):
self.data = data
self.left = None
self.right = None
class Tree:
def __init__(self):
self.head=None
def insert(self,data):
new_node=Node(data)
if self.head is None:
self.head=new_node
return
temp=self.head
while temp:
if new_node.data<temp.data:
prev=temp
temp=temp.left
else:
prev=temp
temp=temp.right
if new_node.data<prev.data:
prev.left=new_node
else:
prev.right=new_node
def inOrder(self,root):
if root:
self.inOrder(root.left)
print(root.data,end=' ')
self.inOrder(root.right)
def preOrder(self,root):
if root:
print(root.data,end=' ')
self.preOrder(root.left)
self.preOrder(root.right)
def postOrder(self,root):
if root:
self.postOrder(root.left)
self.postOrder(root.right)
print(root.data,end=' ')
# preorder Root left right
# Inorder Left Root right
# Postorder Left Right Root
tree=Tree()
tree.insert(4)
tree.insert(2)
tree.insert(3)
tree.insert(1)
tree.insert(5)
tree.insert(6)
print('Inorder traversal is')
tree.inOrder(tree.head)
print('\nPreorder traversal is')
tree.preOrder(tree.head)
print('\nPostorder traversal is')
tree.postOrder(tree.head)
|
'''
栈:先进后出
压栈:添加元素
出栈:删除元素
'''
'''
队列:先进先出
进队
出对
'''
import collections
queue = collections.deque()
# 进队
queue.append("A")
queue.append("B")
queue.append("C")
# 出队
data1 = queue.popleft()
print(data1)
data2 = queue.popleft()
print(data2)
data3 = queue.popleft()
print(data3)
print(queue)
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'my_bundle',
'type': 'shared_library',
'mac_bundle': 1,
'sources': [ 'bundle.c' ],
'mac_bundle_resources': [
'English.lproj/InfoPlist.strings',
],
'xcode_settings': {
'INFOPLIST_FILE': 'Info.plist',
}
},
{
'target_name': 'dependent_on_bundle',
'type': 'executable',
'sources': [ 'executable.c' ],
'dependencies': [
'my_bundle',
],
},
],
}
|
import dash_bootstrap_components as dbc
carousel = dbc.Carousel(
items=[
{
"key": "1",
"src": "/static/images/slide1.svg",
"header": "With header ",
"caption": "and caption",
},
{
"key": "2",
"src": "/static/images/slide2.svg",
"header": "With header only",
"caption": "",
},
{
"key": "3",
"src": "/static/images/slide3.svg",
"header": "",
"caption": "This slide has a caption only",
},
],
variant="dark",
)
|
from pyswagger import App
from datetime import datetime
from email._parseaddr import mktime_tz
from email.utils import parsedate_tz
cached_api: App = None
def header_to_datetime(header) -> datetime:
return datetime.fromtimestamp(mktime_tz(parsedate_tz(header)))
def get_api() -> App:
global cached_api
if cached_api is None:
cached_api = App.create('static/swagger.json')
return cached_api
|
import random
from enum import Enum
import mysql.connector
import math
playableRaces = []
selectableSubraces = []
abilitiesList = []
classesList = []
selectableClassSpecs = []
skillsList = []
backgroundsList = []
armoursList = []
weaponsList = []
numProf = 18
numStat = 6
selectedRace = {}
selectedSubrace = ""
selectedClass = {}
selectedClassSpec = {}
selectedBackground = {}
class Character():
def __init__(self, race, dndClass, background, level):
self.race = race[0]['race']
self.subrace = race[1]
self.dndClass = dndClass[0]['class']
self.dndClassSpec = dndClass[1]['class']
self.subclass = self.selectSubclass(dndClass)
#subclass
self.background = background['background']
self.level = level
self.prioDict = {}
self.stats = self.placeStats()
self.racialTraits()
self.modifiers = self.calcModifiers()
self.skills = self.placeSkills()
self.savingThrows = self.placeSavingThrows()
self.proficiencyBonus = self.calcProficiencyBonus()
self.selectArmour("Unarmoured")
self.initiative = self.modifiers['Dexterity']
self.walkSpeed = race[0]['walk_speed']
self.passivePerception = self.calcPassivePerception()
self.hitDice = dndClass[0]['hit_dice']
self.hp = self.calcHP()
self.numHitDice = self.level
self.backgroundTraits(background)
self.classTraits(dndClass)
self.weapons = []
def selectSubclass(self, dndClass):
pass
#Generates an array of ability scores and places them according to common ability priorities based on the character class
def placeStats(self):
statArray = generateStatArray()
#priorityArray = generatePriorityArray(self.class)
#todo: generatePriorityDict function
self.prioDict = {'Strength' : 5, 'Dexterity': 0, 'Constitution': 2, 'Intelligence': 3, 'Wisdom': 1, 'Charisma': 4}
prioDict = self.prioDict.copy()
stats = {}
for _ in range(numStat):
#Get next highest priority ability
currAbility = min(prioDict, key=prioDict.get)
prioDict.pop(currAbility)
#Get next highest scores
currScore = max(statArray)
statArray.remove(currScore)
#Assign score to ability
stats[currAbility] = currScore
return stats
def calcModifiers(self):
modifiers = {}
for stat in self.stats:
modifiers[stat] = ((self.stats[stat] // 2) - 5)
return modifiers
def placeSkills(self):
skills = {}
for skill in skillsList:
skills[skill['skill']] = [0, self.modifiers[getAbility(skill['abilityID'])]]
return skills
def placeSavingThrows(self):
savingThrows = {}
for savingThrow in abilitiesList:
savingThrows[savingThrow['ability']] = [0, self.modifiers[savingThrow['ability']]]
return savingThrows
def calcProficiencyBonus(self):
return math.ceil(1 + (1/4) * self.level)
def calcPassivePerception(self):
self.passivePerception = 10 + self.skills['Perception'][1]
def calcHP(self):
#hp = hit dice value + constitution
hp = self.hitDice + self.modifiers['Constitution']
return hp
def addHP(self, hp):
self.hp += hp
def levelUp(self):
#Add any ASI's taken
lvlHp = random.randint(1, self.hitDice) + self.modifiers['Constitution']
self.addHP(lvlHp)
#todo
def generatePriorityArray():
priority = []
for i in range(numStat):
priority.append(i)
return priority
def racialTraits(self):
if (self.race == "Aarakocra"):
self.stats['Dexterity'] += 2
self.stats['Wisdom'] += 1
#Flight
#Talons
#Languages
elif (self.race == "Aasimar"):
self.stats['Charisma'] += 2
#Darkvision
#Celestial Resistance
#Healing Hands
#Light Bearer
#Languages
if (self.subrace == "Protector Aasimar"):
self.stats['Wisdom'] += 1
#Radiant soul
elif (self.subrace == "Scourge Aasimar"):
self.stats['Constitution'] += 1
#Radiant consumption
elif (self.subrace == "Fallen Aasimar"):
self.stats['Strength'] += 1
#Necrotic shroud
elif (self.race == "Bugbear"):
self.stats['Strength'] += 2
self.stats['Dexterity'] += 1
#Darkvision
#Long limbed
#Powerful build
#Sneaky
#Surprise attack
#Languages
elif (self.race == "Dragonborn"):
self.stats['Strength'] += 2
self.stats['Charisma'] += 1
#Draconic ancestry
#Breath weapon
#Damage resistance
#Languages
elif (self.race == "Dwarf"):
self.stats['Constitution'] += 2
#Darkvision
#Dwarven resilience
#Dwarven combat training
#Tool proficicency
#Stonecunning
#Languages
if (self.subrace == "Hill Dwarf"):
self.stats['Wisdom'] += 1
#Dwarven toughness
elif (self.subrace == "Mountain Dwarf"):
self.stats['Strength'] += 2
#Dwarven armor training
elif (self.race == "Elf"):
self.stats['Dexterity'] += 2
#Darkvision
#Keen senses
#Fey ancestry
#Trance
#Languages
if (self.subrace == "High Elf"):
self.stats['Intelligence'] += 1
#Elf weapon training
#Cantrip
#Extra language
elif (self.subrace == "Wood Elf"):
self.stats['Wisdom'] += 1
#Elf weapon training
#Fleet of foot
#Mask of the wild
elif (self.race == "Firbolg"):
self.stats['Wisdom'] += 2
self.stats['Strength'] += 1
#Firbolg magic
#Hidden step
#Powerful build
#Speech of beast and leaf
#Languages
elif (self.race == "Genasi"):
self.stats['Constitution'] += 2
#Languages
if (self.subrace == "Air Genasi"):
self.stats['Dexterity'] += 1
#Unending breath
#Mingle with the wind
elif (self.subrace == "Earth Genasi"):
self.stats['Strength'] += 1
#Earth walk
#Merge with stone
elif (self.subrace == "Fire Genasi"):
self.stats['Intelligence'] += 1
#Darkvision
#Fire resistance
#Reach to the blaze
elif (self.subrace == "Water Genasi"):
self.stats['Wisdom'] += 1
#Acid Resistance
#Amphibious
#Swim
#Call to the wave
elif (self.race == "Gnome"):
self.stats['Intelligence'] += 2
#Darkvision
#Gnome cunning
#Languages
if (self.subrace == "Forest Gnome"):
self.stats['Dexterity'] += 1
#Natural Illusionist
#Speak with small beasts
elif (self.subrace == "Rock Gnome"):
self.stats['Constitution'] += 1
#Artificers lore
#Tinker
elif (self.subrace == "Deep Gnome"):
self.stats['Dexterity']
#Superior darkvision
#Stone camouflage
elif (self.race == "Goblin"):
self.stats['Dexterity'] += 2
self.stats['Constitution'] += 1
#Darkvision
#Fury of the small
#Nimble escape
#Languages
elif (self.race == "Goliath"):
self.stats['Strength'] += 2
self.stats['Constitution'] += 1
#Natural athlete
#Stone's endurance
#Powerful build
#Mountain born
elif (self.race == "Half-Elf"):
self.stats['Charisma'] += 2
#Choose based on abilityPriority
prioDict = self.prioDict.copy()
prioDict.pop("Charisma")
sortedPrioDict = dict(sorted(prioDict.items(), key=lambda item: item[1]))
focus1 = list(sortedPrioDict.items())[:1][0][0]
focus2 = list(sortedPrioDict.items())[1:2][0][0]
self.stats[focus1] += 1
self.stats[focus2] += 1
elif (self.race == "Half-Orc"):
self.stats['Strength'] += 2
self.stats['Constitution'] += 1
#Darkvision
#Menacing
#Relentless endurance
#Savage attacks
#Languages
elif (self.race == "Halfling"):
self.stats['Dexterity'] += 2
if (self.subrace == "Lightfoot Halfling"):
self.stats['Charisma'] += 1
#Naturally stealthy
elif (self.subrace == "Stout Halfling"):
self.stats['Constitution'] += 1
#Stout resilience
elif (self.race == "Hobgoblin"):
self.stats['Constitution'] += 2
self.stats['Intelligence'] += 1
#Darkvision
#Martial Training
#Saving face
#Languages
def classTraits(self, dndClass):
#Saving throw proficiencies
savingThrow1 = getAbility(dndClass[0]['saving_throw_1'])
savingThrow2 = getAbility(dndClass[0]['saving_throw_2'])
self.addSavingThrowProficiency(savingThrow1)
self.addSavingThrowProficiency(savingThrow2)
def backgroundTraits(self, background):
#Skill proficiencies
skill1 = getSkill(background['skillProficiency1ID'])
skill2 = getSkill(background['skillProficiency2ID'])
self.addSkillProficiency(skill1)
self.addSkillProficiency(skill2)
#Adds proficiency in a skill. Pass an optional parameter of 2 for expertise rather than regular proficiency.
def addSkillProficiency(self, skill, level=1):
self.skills[skill][0] = level
self.skills[skill][1] += self.proficiencyBonus * level
if (skill == "Perception"):
self.calcPassivePerception()
def addSavingThrowProficiency(self, savingThrow):
self.savingThrows[savingThrow][0] = 1
self.savingThrows[savingThrow][1] += self.proficiencyBonus
def addLanguage(self, language):
pass
def addEquipmentProficiency(self, equipment, proficiency):
pass
def selectArmour(self, armour):
selectedArmour = getArmour(armour)
ac = selectedArmour['AC_base']
mod1 = selectedArmour['AC_mod1']
mod2 = selectedArmour['AC_mod2']
ac += self.modifiers[getAbility(mod1)] if mod1 is not None else 0
ac += self.modifiers[getAbility(mod2)] if mod2 is not None else 0
self.ac = ac
self.armour = selectedArmour
return selectedArmour
def selectWeapon(self, weapon):
selectedWeapon = getWeapon(weapon)
self.weapons.append(selectedWeapon)
def levelUp(self):
pass
def print(self):
print(self.race)
print(self.subrace)
print(self.dndClassSpec + ",", self.level)
print(self.background)
print(self.initiative)
print(self.walkSpeed)
print(self.ac)
print(self.proficiencyBonus)
print(self.stats)
print(self.modifiers)
print(self.skills)
print(self.savingThrows)
print(self.armour)
print(self.passivePerception)
print(self.hitDice)
print(self.hp)
def rollStat():
stats = []
for _ in range(4):
stats.append(random.randint(1, 6))
stats.remove(min(stats))
return sum(stats)
def generateStatArray():
stats = []
for _ in range(numStat):
stats.append(rollStat())
return stats
def fetchRaces():
db = sqlConnect()
mycursor = db.cursor(dictionary=True)
mycursor.execute("SELECT * FROM RACES")
for race in mycursor:
playableRaces.append(race)
#Should be called after selectRace()
def fetchSubraces():
db = sqlConnect()
selectedRaceID = selectedRace['rid']
mycursor = db.cursor(dictionary=True)
query = "SELECT subrace FROM subraces WHERE rid =" + str(selectedRaceID)
mycursor.execute(query)
for subrace in mycursor:
selectableSubraces.append(subrace)
def fetchAbilities():
db = sqlConnect()
mycursor = db.cursor(dictionary=True)
query = "SELECT * FROM abilities"
mycursor.execute(query)
for ability in mycursor:
abilitiesList.append(ability)
def fetchSkills():
db = sqlConnect()
mycursor = db.cursor(dictionary=True)
query = "SELECT * FROM skills"
mycursor.execute(query)
for skill in mycursor:
skillsList.append(skill)
def fetchClasses():
db = sqlConnect()
mycursor = db.cursor(dictionary=True)
query = "SELECT * FROM classes"
mycursor.execute(query)
for dndClass in mycursor:
classesList.append(dndClass)
#Should be called after selectClass()
def fetchClassSpecs():
db = sqlConnect()
selectedClassID = selectedClass['cid']
mycursor = db.cursor(dictionary=True)
query = "SELECT class, primaryAbilityID, secondaryAbilityID FROM classspecs WHERE cid =" + str(selectedClassID)
mycursor.execute(query)
for classSpec in mycursor:
selectableClassSpecs.append(classSpec)
def fetchBackgrounds():
db = sqlConnect()
mycursor = db.cursor(dictionary=True)
query = "SELECT background, skillProficiency1ID, skillProficiency2ID FROM backgrounds"
mycursor.execute(query)
for background in mycursor:
backgroundsList.append(background)
def fetchArmours():
db = sqlConnect()
mycursor = db.cursor(dictionary=True)
query = "SELECT name, AC_base, AC_mod1, AC_mod2, classification FROM armour"
mycursor.execute(query)
for armour in mycursor:
armoursList.append(armour)
def fetchWeapons():
db = sqlConnect()
mycursor = db.cursor(dictionary=True)
query = "SELECT name, rolls, damage, damage_type, weapon_type, weapon_range, properties FROM weapons"
mycursor.execute(query)
for weapon in mycursor:
weaponsList.append(weapon)
def sqlConnect():
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="jDAN0921",
auth_plugin='mysql_native_password',
database='npc_mancer_db'
)
return db
#Should be called after fetch method for data
def selectRace(race):
global selectedRace
selectedRace = getRace(race)
#Should be called after fetch method for data
def selectSubrace(subrace):
global selectedSubrace
selectedSubrace = getSubrace(subrace)
#Should be called after fetch method for data
def selectClass(dndClass):
global selectedClass
selectedClass = getClass(dndClass)
#Should be called after fetch method for data
def selectClassSpec(dndClassSpec):
global selectedClassSpec
selectedClassSpec = getClassSpec(dndClassSpec)
def selectBackground(background):
global selectedBackground
selectedBackground = getBackground(background)
def getRace(name):
return list(filter(lambda race : race['race'] == name, playableRaces))[0]
def getSubrace(name):
return list(filter(lambda subrace : subrace['subrace'] == name, selectableSubraces))[0]['subrace']
def getAbility(id):
return list(filter(lambda ability : ability['aid'] == id, abilitiesList))[0]['ability']
def getSkill(id):
return list(filter(lambda skill : skill['sid'] == id, skillsList))[0]['skill']
def getClass(name):
return list(filter(lambda dndClass : dndClass['class'] == name, classesList))[0]
def getClassSpec(name):
return list(filter(lambda dndClassSpec : dndClassSpec['class'] == name, selectableClassSpecs))[0]
def getBackground(name):
return list(filter(lambda background : background['background'] == name, backgroundsList))[0]
def getArmour(name):
return list(filter(lambda armour : armour['name'] == name, armoursList))[0]
def getWeapon(name):
return list(filter(lambda weapon : weapon['name'] == name, weaponsList))[0]
fetchAbilities()
fetchSkills()
fetchRaces()
fetchClasses()
fetchBackgrounds()
fetchArmours()
fetchWeapons()
selectRace("Half-Elf")
fetchSubraces()
#selectSubrace("Wood Elf")
selectClass("Ranger")
fetchClassSpecs()
selectClassSpec("Ranger (Dex)")
selectBackground("Urchin")
steve = Character([selectedRace, selectedSubrace], [selectedClass, selectedClassSpec], selectedBackground, 9)
steve.addSkillProficiency("Perception")
steve.print()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from ...models import AccessToken, Grant, RefreshToken
class Command(BaseCommand):
help = 'Cleans up expires oauth2 rows'
def handle(self, *args, **options):
self._do_clean('refresh tokens', RefreshToken.objects.filter(expired=True))
self._do_clean('grants', Grant.objects.filter(expires__lt=now()))
self._do_clean('access tokens', AccessToken.objects.filter(expires__lt=now()))
def _do_clean(self, name, queryset):
self.stdout.write("Finding expired {}...".format(name), ending='')
count = queryset.count()
self.stdout.write("Removing {:d} expired {}...".format(count, name), ending='')
queryset.delete()
self.stdout.write("Removed")
|
# import random
# print(dir(random))
#
# x = random.randrange(1, 100)
# print(x)
import turtle
scr = turtle.Screen()
scr.screensize(720, 720)
trt = turtle.Turtle()
trt.seth(0)
trt.color("red")
trt.begin_fill()
trt.circle(100)
trt.end_fill()
trt.back(100)
trt.color("blue")
trt.begin_fill()
trt.circle(200)
trt.end_fill()
scr.mainloop()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 10:02:31 2020
@author: Kaja Amalie
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
tf.executing_eagerly()
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import Dense, Input, Dropout
from tensorflow.keras.models import Model
X = np.load('train_images.npy', allow_pickle=True)
y = np.load('train_labels.npy', allow_pickle=True)
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# show image
data_idx = 1
plt.imshow(X[data_idx,:,:]/255, cmap='binary')
class_number = y[data_idx]
class_text = class_names[class_number]
print(f'This is a {class_text}')
# data prep
X = X/255
X = X.reshape(-1, 28,28)
y = y.reshape(-1, 1)
y = y.astype('float64')
#Hot Encoder for y
from sklearn.preprocessing import OneHotEncoder
clothing_ohe = OneHotEncoder(sparse=False)
clothing_ohe.fit(y)
y = clothing_ohe.transform(y)
#Split the data:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 420)
# create a model and train it:
# prep validation data
X_val = np.load('val_images.npy', allow_pickle=True)
X_val = X_val/255
X_val = X_val.reshape(-1, 784)
X_val = X_val.reshape(-1, 28,28)
#############################################################################################################
#%% training the model
from tensorflow.keras.layers import Dense, Input, Conv2D, MaxPool2D, LSTM, Embedding
from tensorflow.keras.models import Model
tf.keras.layers.BatchNormalization
#1 test: 85%
input_layer = Input(shape=(28,28))
lstm_layer_1 = LSTM(100, return_sequences=True)(input_layer)
lstm_layer_2 = LSTM(80, return_sequences=True)(lstm_layer_1)
flatten_layer = tf.keras.layers.Flatten()(lstm_layer_2)
first_hidden_layer = Dense (15, activation='relu')(flatten_layer)
output_layer = Dense(10, activation='softmax')(first_hidden_layer)
model_pic = Model(inputs = input_layer, outputs=output_layer)
model_pic.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history_pic = model_pic.fit(X_train, y_train, batch_size=150, epochs=10, validation_data = (X_test, y_test))
#2 test: train: 0.8945, train 0.8917
input_layer = Input(shape=(28,28))
lstm_layer_1 = LSTM(100, return_sequences=True)(input_layer)
lstm_layer_2 = LSTM(80, return_sequences=True)(lstm_layer_1)
flatten_layer = tf.keras.layers.Flatten()(lstm_layer_2)
first_hidden_layer = Dense (100, activation='relu')(flatten_layer)
output_layer = Dense(10, activation='softmax')(first_hidden_layer)
model_pic = Model(inputs = input_layer, outputs=output_layer)
model_pic.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
history_pic = model_pic.fit(X_train, y_train, batch_size=100, epochs=9, validation_data = (X_test, y_test))
#3 test
from sklearn.metrics import accuracy_score
y_train_pred = model_pic.predict(X_train) #92,9
accuracy_score(y_train, y_train_pred>0.5)
y_test_pred = model_pic.predict(X_test)
accuracy_score(y_test, y_test_pred>0.5)
y_val_pred = model_pic.predict(X_val)
y_val_pred =y_train_pred.astype('float64')
pred1 = np.argmax(y_val_pred, axis=1)
#%%
import matplotlib.pyplot as plt
# make each plot seperatly
plt.plot(history_pic.history['loss'], label='train loss')
plt.plot(history_pic.history['val_loss'], label='test loss')
plt.legend(loc='upper right')
plt.show()
y_val_pred = model_pic.predict(X_val)
y_val_pred_argmax = np.argmax(y_val_pred, axis=1)
# predic validation data
my_prediction = y_val_pred_argmax
# save predictions
my_name = 'Kaja'
np.save(f'{my_name}_predictions_RNN2.npy', my_prediction)
|
# Configuracion de la base de datos a utilizar.
import settings
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
db = declarative_base()
# Tablas de la base de datos a definir.
# Usuario.
class User(db):
__tablename__ = 'user'
fullname = Column(String(50), nullable = False)
username = Column(String(16), primary_key = True)
password = Column(String(16), nullable = False)
email = Column(String(30), unique = True)
iddpt = Column(Integer, ForeignKey('dpt.iddpt'), nullable = False)
idrole = Column(Integer, ForeignKey('role.idrole'), unique = True)
def __init__(self,fullname, username, password, email, iddpt, idrole):
self.fullname = fullname
self.username = username
self.password = password
self.email = email
self.iddpt = iddpt
self.idrole = idrole
# Departamento.
class Dpt(db):
__tablename__ = 'dpt'
iddpt = Column(Integer, primary_key = True)
namedpt = Column(String(50), unique = True)
users = relationship('User', backref = 'dpt', cascade="all, delete, delete-orphan")
def __init__(self, iddpt, namedpt):
self.iddpt = iddpt
self.namedpt = namedpt
# Role.
class Role(db):
__tablename__ = 'role'
idrole = Column(Integer, primary_key = True)
namerole = Column(String(50), unique = True)
users = relationship('User', backref = 'role', cascade="all, delete, delete-orphan")
def __init__(self, idrole, namerole):
self.idrole = idrole
self.namerole = namerole
# Se crea el motor que almacenara los datos en el directorio local.
engine = create_engine(URL(**settings.DATABASE))
#Se eliminnan las tablas anteriormente definidas
db.metadata.drop_all(engine)
# Se crean todas las tablas definidas en el motor antes construidos.
db.metadata.create_all(engine)
|
from django.contrib import auth
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.auth.decorators import user_passes_test
from decorators import anonymous_required
""" @todo, should probably use the authentication form here. """
@anonymous_required(viewname='home.views.index')
def login(request):
""" returns custom madduck2.login page. """
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
# Correct password, and the user is marked "active"
auth.login(request, user)
# Redirect to a success page.
return HttpResponseRedirect(reverse("home.views.index"))
return render_to_response("auth/login.html", {
}, context_instance=RequestContext(request))
def logout(request):
""" returns custom madduck2.logout page. """
auth.logout(request)
return render_to_response("auth/logout.html", {
}, context_instance=RequestContext(request))
""" @todo, add support for connecting via facebook. """
@anonymous_required(viewname='home.views.index')
def signup(request):
""" returns custom madduck2.signup page. """
user_form = UserCreationForm()
if request.method == 'POST':
user_form = UserCreationForm(request.POST)
if user_form.is_valid():
""" new user account is created here"""
""" @fixme: this is a buggy peice of code; cannot do commit=False; because a M-M relation cannot be attached to a non-existing object. """
new_user = user_form.save()
""" @fixme: group is added after the account is created/commited to the DB; this is kinda bad; required two DB calls."""
# new_user.groups.add(Group.objects.get(name='student'))
return HttpResponseRedirect(reverse("home.views.index"))
return render_to_response("auth/signup.html", {
'form' : user_form
}, context_instance=RequestContext(request))
|
def duplicate_count(text):
output = 0
seen = []
for x in text.lower():
if x not in seen and text.lower().count(x) > 1:
seen.append(x)
output += 1
return output
'''
Count the number of Duplicates
Write a function that will return the count of distinct case-insensitive
alphabetic characters and numeric digits that occur more than once in the
input string. The input string can be assumed to contain only alphabets
(both uppercase and lowercase) and numeric digits.
Example:
"abcde" -> 0 # no characters repeats more than once
"aabbcde" -> 2 # 'a' and 'b'
"aabBcde" -> 2 # 'a' occurs twice and 'b' twice (bandB)
"indivisibility" -> 1 # 'i' occurs six times
"Indivisibilities" -> 2 # 'i' occurs seven times and 's' occurs twice
"aA11" -> 2 # 'a' and '1'
"ABBA" -> 2 # 'A' and 'B' each occur twice
'''
|
def func(n):
if n == 1:
return 1
if n == 2:
return 2
if arr[n] != -1:
return arr[n]
else:
return func(n-1)+func(n-2)
n = int(input())
arr = [-1]*1000
print(func(n))
def fibonacci(n):
# Taking 1st two fibonacci nubers as 0 and 1
f = [0, 1]
for i in range(2, n+1):
f.append(f[i-1] + f[i-2])
return f[n]
n = int(input())
print(fibonacci())
|
# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
#
# Copyright (c) 2014, Regents of the University of California
#
# GPL 3.0 license, see the COPYING.md file for more information
from waflib import Logs, Configure
def options(opt):
opt.add_option('--debug', '--with-debug', action='store_true', default=False, dest='debug',
help='''Compile in debugging mode without all optimizations (-O0)''')
opt.add_option('--with-c++11', action='store_true', default=False, dest='use_cxx11',
help='''Enable C++11 mode (experimental, may not work)''')
def configure(conf):
areCustomCxxflagsPresent = (len(conf.env.CXXFLAGS) > 0)
defaultFlags = []
defaultFlags += ['-std=c++0x', '-std=c++11']
defaultFlags += ['-pedantic', '-Wall', '-Wno-long-long', '-Wno-unneeded-internal-declaration']
if conf.options.debug:
conf.define('_DEBUG', 1)
defaultFlags += ['-O0',
'-Og', # gcc >= 4.8
'-g3',
'-fcolor-diagnostics', # clang
'-fdiagnostics-color', # gcc >= 4.9
'-Werror',
'-Wno-error=maybe-uninitialized', # Bug #1560
]
if areCustomCxxflagsPresent:
missingFlags = [x for x in defaultFlags if x not in conf.env.CXXFLAGS]
if len(missingFlags) > 0:
Logs.warn("Selected debug mode, but CXXFLAGS is set to a custom value '%s'"
% " ".join(conf.env.CXXFLAGS))
Logs.warn("Default flags '%s' are not activated" % " ".join(missingFlags))
else:
conf.add_supported_cxxflags(defaultFlags)
else:
defaultFlags += ['-O2', '-g']
if not areCustomCxxflagsPresent:
conf.add_supported_cxxflags(defaultFlags)
@Configure.conf
def add_supported_cxxflags(self, cxxflags):
"""
Check which cxxflags are supported by compiler and add them to env.CXXFLAGS variable
"""
self.start_msg('Checking allowed flags for c++ compiler')
supportedFlags = []
for flag in cxxflags:
if self.check_cxx(cxxflags=['-Werror', flag], mandatory=False):
supportedFlags += [flag]
self.end_msg(' '.join(supportedFlags))
self.env.CXXFLAGS = supportedFlags + self.env.CXXFLAGS
|
print('\033[7;31;40mOlá mundo!\033[m')
print('\033[7;30mOlá Mundo!\033[m')
a = 5
b = 8
print('Os valores são \033[1;36;40m{}\033[m e \033[1;31;45m{}\033[m!!!'.format(a, b))
nome = 'Danilo'
print('Muito prazer em te conhecer, {}{}{}'.format('\033[4;32m', nome, '\033[m'))
# \033[(0, 1, 4, 7);(30 à 37);(40 à 47)m
# \033[(formatação;texto;fundo)m
|
n =int(input())
alist =[]
blist=[]
for i in range(n):
alist.append(list(map(str,input().split())))
low =int(input())
high =int(input())
for i in alist:
cut = int(i[1][-3:])
if low<=cut<=high:
blist.append(tuple(i))
print(blist)
|
from flask import Flask, render_template, make_response, url_for, redirect
app = Flask(__name__)
@app.route('/')
def index():
resp = make_response(render_template('index.html'))
resp.set_cookie('username', 'the username')
return resp
@app.route('/redirect')
def redirect1():
return redirect(url_for('redirect_test'))
@app.route('/redirect_test')
def redirect_test():
return 'リダイレクトしました'
@app.route('/error_test')
def error_test():
abort(404)
@app.errorhandler(404)
def page_not_found(error):
return '404エラーですよ'
if __name__ == '__main__':
app.run()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from textwrap import dedent
import pytest
from pants.backend.go.util_rules.coverage import GoCoverMode
from pants.backend.go.util_rules.coverage_profile import (
GoCoverageProfile,
GoCoverageProfileBlock,
parse_go_coverage_profiles,
)
#
# This is a transcription of the Go coverage support library at
# https://cs.opensource.google/go/x/tools/+/master:cover/profile_test.go.
#
# Original copyright:
# // Copyright 2019 The Go Authors. All rights reserved.
# // Use of this source code is governed by a BSD-style
# // license that can be found in the LICENSE file.
#
@dataclass(frozen=True)
class ProfileTestCase:
name: str
input: str
profiles: tuple[GoCoverageProfile, ...] = ()
expect_exception: bool = False
_TEST_CASES = [
ProfileTestCase(
name="parsing an empty file produces empty output",
input="mode: set",
profiles=(),
),
ProfileTestCase(
name="simple valid file produces expected output",
input=dedent(
"""\
mode: set
some/fancy/path:42.69,44.16 2 1
"""
),
profiles=(
GoCoverageProfile(
filename="some/fancy/path",
cover_mode=GoCoverMode.SET,
blocks=(
GoCoverageProfileBlock(
start_line=42,
start_col=69,
end_line=44,
end_col=16,
num_stmt=2,
count=1,
),
),
),
),
),
ProfileTestCase(
name="file with syntax characters in path produces expected output",
input=dedent(
"""\
mode: set
some fancy:path/some,file.go:42.69,44.16 2 1
"""
),
profiles=(
GoCoverageProfile(
filename="some fancy:path/some,file.go",
cover_mode=GoCoverMode.SET,
blocks=(
GoCoverageProfileBlock(
start_line=42,
start_col=69,
end_line=44,
end_col=16,
num_stmt=2,
count=1,
),
),
),
),
),
ProfileTestCase(
name="file with multiple blocks in one file produces expected output",
input=dedent(
"""\
mode: set
some/fancy/path:42.69,44.16 2 1
some/fancy/path:44.16,46.3 1 0
"""
),
profiles=(
GoCoverageProfile(
filename="some/fancy/path",
cover_mode=GoCoverMode.SET,
blocks=(
GoCoverageProfileBlock(
start_line=42,
start_col=69,
end_line=44,
end_col=16,
num_stmt=2,
count=1,
),
GoCoverageProfileBlock(
start_line=44,
start_col=16,
end_line=46,
end_col=3,
num_stmt=1,
count=0,
),
),
),
),
),
ProfileTestCase(
name="file with multiple files produces expected output",
input=dedent(
"""\
mode: set
another/fancy/path:44.16,46.3 1 0
some/fancy/path:42.69,44.16 2 1
"""
),
profiles=(
GoCoverageProfile(
filename="another/fancy/path",
cover_mode=GoCoverMode.SET,
blocks=(
GoCoverageProfileBlock(
start_line=44,
start_col=16,
end_line=46,
end_col=3,
num_stmt=1,
count=0,
),
),
),
GoCoverageProfile(
filename="some/fancy/path",
cover_mode=GoCoverMode.SET,
blocks=(
GoCoverageProfileBlock(
start_line=42,
start_col=69,
end_line=44,
end_col=16,
num_stmt=2,
count=1,
),
),
),
),
),
ProfileTestCase(
name="intertwined files are merged correctly",
input=dedent(
"""\
mode: set
some/fancy/path:42.69,44.16 2 1
another/fancy/path:47.2,47.13 1 1
some/fancy/path:44.16,46.3 1 0
"""
),
profiles=(
GoCoverageProfile(
filename="another/fancy/path",
cover_mode=GoCoverMode.SET,
blocks=(
GoCoverageProfileBlock(
start_line=47,
start_col=2,
end_line=47,
end_col=13,
num_stmt=1,
count=1,
),
),
),
GoCoverageProfile(
filename="some/fancy/path",
cover_mode=GoCoverMode.SET,
blocks=(
GoCoverageProfileBlock(
start_line=42,
start_col=69,
end_line=44,
end_col=16,
num_stmt=2,
count=1,
),
GoCoverageProfileBlock(
start_line=44,
start_col=16,
end_line=46,
end_col=3,
num_stmt=1,
count=0,
),
),
),
),
),
ProfileTestCase(
name="duplicate blocks are merged correctly",
input=dedent(
"""\
mode: count
some/fancy/path:42.69,44.16 2 4
some/fancy/path:42.69,44.16 2 3
"""
),
profiles=(
GoCoverageProfile(
filename="some/fancy/path",
cover_mode=GoCoverMode.COUNT,
blocks=(
GoCoverageProfileBlock(
start_line=42,
start_col=69,
end_line=44,
end_col=16,
num_stmt=2,
count=7,
),
),
),
),
),
ProfileTestCase(
name="an invalid mode line is an error",
input="mode:count",
expect_exception=True,
),
ProfileTestCase(
name="a missing field is an error",
input=dedent(
"""\
mode: count
some/fancy/path:42.69,44.16 2
"""
),
expect_exception=True,
),
ProfileTestCase(
name="a missing path field is an error",
input=dedent(
"""\
mode: count
42.69,44.16 2 3
"""
),
expect_exception=True,
),
ProfileTestCase(
name="a non-numeric count is an error",
input=dedent(
"""\
mode: count
42.69,44.16 2 nope
"""
),
expect_exception=True,
),
ProfileTestCase(
name="an empty path is an error",
input=dedent(
"""\
mode: count
:42.69,44.16 2 3
"""
),
expect_exception=True,
),
ProfileTestCase(
name="a negative count is an error",
input=dedent(
"""\
mode: count
some/fancy/path:42.69,44.16 2 -1
"""
),
expect_exception=True,
),
]
@pytest.mark.parametrize("case", _TEST_CASES, ids=lambda c: c.name) # type: ignore[no-any-return]
def test_parse_go_coverage_profiles(case) -> None:
try:
profiles = parse_go_coverage_profiles(case.input.encode(), description_of_origin="test")
if case.expect_exception:
raise ValueError(f"Expected exception but did not see it for test case `{case.name}`")
assert profiles == case.profiles
except Exception:
if not case.expect_exception:
raise
|
import ais.stream
import csv
import os
import sys
from tqdm import tqdm
def data_src():
data_dir = "../data"
filename = "CCG_AIS_Log_2018-05-01.csv"
path = os.path.join(data_dir, filename)
return path
def verify_datasrc(path: str):
with open(path) as f:
try:
for msg in tqdm(enumerate(ais.stream.decode(f))):
pass
except e:
print(e)
def data_fields(path: str):
fields = set()
with open(path) as f:
for msg in tqdm(ais.stream.decode(f)):
for key in msg:
fields.add(key)
print(fields)
if __name__ == "__main__":
path = data_src()
data_fields(path)
|
import boto3
import json
# Resource access manager(ram)
def get_ram_info():
"""
A function that gives ram resource shares info and resource associations
"""
conn = boto3.client('ec2')
regions = [region['RegionName'] for region in conn.describe_regions()['Regions']]
shares_info = []
share_principles = []
share_resources = []
for region in regions:
# not available in this regions
if region == 'ap-east-1' or region == 'eu-north-1' or region == 'sa-east-1':
continue
conn = boto3.client('ram', region_name=region)
# get resource shares info
response = conn.get_resource_shares(
resourceOwner='SELF'
)['resourceShares']
for res in response:
req_info = []
req_info.append(res)
shares_info.append(req_info)
# get resource associations that are principal
response = conn.get_resource_share_associations(
associationType='PRINCIPAL'
)['resourceShareAssociations']
for res in response:
req_info = []
req_info.append(res)
share_principles.append(req_info)
# get resource associations that are resource
response = conn.get_resource_share_associations(
associationType='RESOURCE'
)['resourceShareAssociations']
for res in response:
req_info = []
req_info.append(res)
share_resources.append(req_info)
# convert all shares list into dictionaries
shares_dict = {"Shares": shares_info}
share_resource_dict = {"Share Resources": share_resources}
share_principle_dict = {"Share Principles": share_principles}
# convert dicitonaries into json
shares_json = json.dumps(shares_dict, indent=4, default=str)
share_resource_json = json.dumps(share_resource_dict, indent=4, default=str)
share_principle_json = json.dumps(share_principle_dict, indent=4, default=str)
print(shares_json)
print(share_resource_json)
print(share_principle_json)
get_ram_info()
|
from _base.downloadAndInstall import DownloadAndInstall
from _feature_objects.feature_popup import *
from _feature_objects.feature_screen import *
from _feature_objects.feature_left_menu import *
from _test_suites._variables.variables import Variables
class MainPage(BaseActions):
def check_main_page_loaded(self):
cond = self._is_element_present(BaseElements.RIBBON_BAR)
# cond2 = self._is_element_present(BaseElements.LEFT_MENU_HOME)
msg_true = "Main page is loaded\n"
msg_false = "Main page is NOT loaded\n"
self._set_log_for_true_or_false(cond, msg_true, msg_false)
return True if cond else False
def delete_device_from_the_console(self, device_name):
devices_page = DevicesScreen(self.driver)
left_menu_devices = LeftMenuDevices(self.driver)
left_menu_devices.open_menu_devices()
left_menu_devices.click_global_site_view_label()
devices_page.delete_single_device_in_devices_page_table(device_name)
cond = devices_page.check_device_is_presented(device_name)
return True if cond is not True else False
def upprove_vrep(self, device_name):
vreps_screen = VRepsScreen(self.driver)
left_menu_administration = LeftMenuAdministration(self.driver)
left_menu_administration.open_menu_administration()
left_menu_administration.click_vreps_label()
vreps_screen.upprove_single_vrep_in_vreps_page_table(device_name)
cond = vreps_screen.check_vrep_ready_for_work(device_name)
return True if cond else False
def setup_for_help_tests(self):
device_name = Variables.vrep
# devices_screen = DevicesScreen(self.driver)
# delete_vrep = self.delete_device_from_the_console(device_name)
# self.logger.info("Device is not presented in the console: " + str(device_name) + " - " + str(delete_vrep))
# desktop = DownloadAndInstall(self.driver)
# desktop.clean_up_device()
# desktop.download_agent()
# desktop.install_agent()
# devices_page.click_icon_refresh()
# install_vrep = devices_screen.check_device_is_presented(device_name)
# self.logger.info("vRep " + str(device_name) + " is installed - " + str(install_vrep))
upprove_vrep = self.upprove_vrep(device_name)
if upprove_vrep:
self.logger.info("Setup is finished successfully: " + str(upprove_vrep) + "\n")
return True
else:
self.logger.info("Setup is finished successfully: " + str(upprove_vrep) + "\n")
return False
def run_discovery_task(self):
pass
def run_discovery_task_if_not_exists(self, name):
ribbon_bar = RibbonBar(self.driver)
tasks_screen = TasksScreen(self.driver)
left_menu_tasks = LeftMenuTasks(self.driver)
discover_devices_popup = DiscoverDevicesPopup(self.driver)
left_menu_tasks.open_menu_tasks()
left_menu_tasks.expand_scheduled_tasks_list()
left_menu_tasks.click_discover_label()
cond = tasks_screen.search_task(name)
if cond is not True:
ribbon_bar.click_button_create()
discover_devices_popup.click_button_select_none()
discover_devices_popup.select_site_in_list()
discover_devices_popup.click_button_next()
discover_devices_popup.click_button_next()
cond = discover_devices_popup.check_none_patches_selected()
if cond is not True:
print "None patches is NOT selected"
discover_devices_popup.click_button_next()
cond = discover_devices_popup.check_start_now_selected()
if cond is not True:
print "Start Now is NOT selected"
discover_devices_popup.click_button_next()
discover_devices_popup.click_button_next()
discover_devices_popup.clear_text_name_text_field()
discover_devices_popup.enter_text_into_task_name_field(name)
discover_devices_popup.click_button_finish()
return True
else:
print "Task is presented"
postcond = tasks_screen.search_task(name)
return True if postcond else False
def run_patch_scan_task_on_single_device_if_not_exists(self, task_name, device_name, site_name):
ribbon_bar = RibbonBar(self.driver)
tasks_screen = TasksScreen(self.driver)
left_menu_tasks = LeftMenuTasks(self.driver)
select_targets_popup = SelectTargetsPopup(self.driver)
patch_manager_scanning_popup = PatchManagerScanningPopup(self.driver)
left_menu_tasks.open_menu_tasks()
left_menu_tasks.expand_scheduled_tasks_list()
left_menu_tasks.click_patch_manager_label()
cond = tasks_screen.search_task(task_name)
if cond is not True:
ribbon_bar.click_button_scan()
patch_manager_scanning_popup.open_select_targets_popup()
select_targets_popup.select_site_in_list(site_name)
select_targets_popup.select_device_in_list(device_name)
select_targets_popup.click_button_ok()
patch_manager_scanning_popup.click_button_next()
patch_manager_scanning_popup.select_radio_button_all()
patch_manager_scanning_popup.click_button_next()
patch_manager_scanning_popup.select_radio_button_start_now()
patch_manager_scanning_popup.click_button_next()
patch_manager_scanning_popup.clear_text_name_text_field()
patch_manager_scanning_popup.enter_text_into_task_name_field(task_name)
patch_manager_scanning_popup.click_button_finish()
postcond = tasks_screen.search_task(task_name)
return True if postcond else False
def run_software_deployment_task(self):
pass
def run_scan_updates_task(self):
pass
def create_and_config_site(self):
ribbon_bar = RibbonBar(self.driver)
left_menu_devices = LeftMenuDevices(self.driver)
devices_screen = DevicesScreen(self.driver)
configuration_popup = ConfigurationPopup(self.driver)
left_menu_devices.open_menu_devices()
left_menu_devices.click_global_site_view_label()
left_menu_devices.expand_global_site_view_list()
left_menu_devices.create_site_if_not_exists()
left_menu_devices.click_site_in_global_site_view_list()
ribbon_bar.click_button_config()
configuration_popup.click_ip_address_ranges_tab()
configuration_popup.delete_all_ip_address_ranges()
configuration_popup.add_single_ip_address_range()
configuration_popup.click_button_apply_changes()
configuration_popup.click_button_ok_on_success_popup()
configuration_popup.click_vreps_tab()
configuration_popup.apply_vrep_to_the_site()
configuration_popup.click_button_close()
cond = devices_screen.check_device_is_presented()
return True if cond else False
# def create_new_dashboard_if_not_exists(self, name=Variables.help_test):
# ribbon_bar = RibbonBar(self.driver)
# left_menu = LeftMenuReporting(self.driver)
# create_new_dashboard_popup = CreateNewDashboardPopup(self.driver)
# left_menu.click_label_my_dashboards()
# ribbon_bar.click_button_new()
# create_new_dashboard_popup.enter_text_into_name_text_field(name)
# create_new_dashboard_popup.click_button_next()
# create_new_dashboard_popup.click_button_finish()
# left_menu.expand_list_my_dashboards()
# cond = self._is_element_present()
|
import os
import sys
import shutil
def remove_file(file):
""" Remove file path is local from working dir """
try:
os.remove(file)
except Exception:
pass
def before_tag(context, tag):
if tag.startswith('before.clean') or tag.startswith('clean'):
remove_file('site.conf')
shutil.rmtree('build', ignore_errors=True)
shutil.rmtree('sources', ignore_errors=True)
def after_tag(context, tag):
if tag.startswith('after.clean') or tag.startswith('clean'):
remove_file('site.conf')
shutil.rmtree('build', ignore_errors=True)
shutil.rmtree('sources', ignore_errors=True)
|
# from __future__ import print_function
# import argparse
# import os
# import random
# import torch
# import torch.nn as nn
# import torch.nn.parallel
# import torch.backends.cudnn as cudnn
# import torch.optim as optim
# import torch.utils.data
# import torchvision.transforms as transforms
# import torchvision.utils as vutils
# from torch.autograd import Variable
# from PIL import Image
# import numpy as np
# import matplotlib.pyplot as plt
# import pdb
# import torch.nn.functional as F
# from .layers import *
# from torch.nn.init import kaiming_normal
#
# __all__= ['ae_pointnet']
#
# class STN3d(nn.Module):
# def __init__(self, num_points = 2500):
# super(STN3d, self).__init__()
# self.num_points = num_points
# self.conv1 = torch.nn.Conv1d(3, 64, 1)
# self.conv2 = torch.nn.Conv1d(64, 128, 1)
# self.conv3 = torch.nn.Conv1d(128, 1024, 1)
# self.mp1 = torch.nn.MaxPool1d(num_points)
# self.fc1 = nn.Linear(1024, 512)
# self.fc2 = nn.Linear(512, 256)
# self.fc3 = nn.Linear(256, 9)
# self.relu = nn.ReLU()
#
# self.bn1 = nn.BatchNorm1d(64)
# self.bn2 = nn.BatchNorm1d(128)
# self.bn3 = nn.BatchNorm1d(1024)
# self.bn4 = nn.BatchNorm1d(512)
# self.bn5 = nn.BatchNorm1d(256)
#
# for m in self.modules():
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
# kaiming_normal(m.weight.data) # initialize weigths with normal distribution
# if m.bias is not None:
# m.bias.data.zero_() # initialize bias as zero
# elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
#
#
# # self.att = nn.Sequential(nn.Linear(1024,1024//16),
# # nn.ReLU(inplace=True),
# # nn.Linear(1024 // 16,1024 // 16),
# # nn.BatchNorm1d(1024 // 16),
# # nn.ReLU(inplace=True),
# # nn.Linear(1024 // 16,1024),
# # nn.Sigmoid(),
# #
# # )
#
# def forward(self, x):
# batchsize = x.size()[0]
# x = F.relu(self.bn1(self.conv1(x)))
# x = F.relu(self.bn2(self.conv2(x)))
# x = F.relu(self.bn3(self.conv3(x)))
# x = self.mp1(x)
# x = x.view(-1, 1024)
#
# # x = self.att(x)
#
# x = F.relu(self.bn4(self.fc1(x)))
# x = F.relu(self.bn5(self.fc2(x)))
# x = self.fc3(x)
#
# iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)
# if x.is_cuda:
# iden = iden.cuda()
# x = x + iden
# x = x.view(-1, 3, 3)
# return x
#
#
# class Encoder(nn.Module):
# def __init__(self, num_points = 2500, global_feat = True):
# super(Encoder, self).__init__()
# self.stn = STN3d(num_points = num_points)
# self.conv1 = torch.nn.Conv1d(3, 64, 1)
# self.conv2 = torch.nn.Conv1d(64, 128, 1)
# self.conv3 = torch.nn.Conv1d(128, 128, 1)
# self.conv4 = torch.nn.Conv1d(128, 256, 1)
# self.conv5 = torch.nn.Conv1d(256, 128, 1)
#
# self.bn1 = nn.BatchNorm1d(64)
# self.bn2 = nn.BatchNorm1d(128)
# self.bn3 = nn.BatchNorm1d(128)
# self.bn4 = nn.BatchNorm1d(256)
# self.bn5 = nn.BatchNorm1d(128)
#
# self.mp1 = torch.nn.MaxPool1d(num_points)
# self.num_points = num_points
# self.global_feat = global_feat
#
# for m in self.modules():
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
# kaiming_normal(m.weight.data) # initialize weigths with normal distribution
# if m.bias is not None:
# m.bias.data.zero_() # initialize bias as zero
# elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
#
#
#
# def forward(self, x):
# batchsize = x.size()[0]
# trans = self.stn(x)
# x = x.transpose(2,1)
# x = torch.bmm(x, trans)
# x = x.transpose(2,1)
# x = F.relu(self.bn1(self.conv1(x)))
# pointfeat = x
# x = F.relu(self.bn2(self.conv2(x)))
# x = F.relu(self.bn3(self.conv3(x)))
# x = F.relu(self.bn4(self.conv4(x)))
# x = self.bn5(self.conv5(x))
# x = self.mp1(x)
# x = x.view(-1, 128)
# if self.global_feat:
# return x, trans
# else:
# x = x.view(-1, 128, 1).repeat(1, 1, self.num_points)
# return torch.cat([x, pointfeat], 1), trans
#
# class DecoderLinear(nn.Module):
# def __init__(self, opt):
# super(DecoderLinear, self).__init__()
# self.opt = opt
# self.feature_num = opt.feature_num
# self.output_point_number = opt.output_fc_pc_num
#
# self.linear1 = MyLinear(self.feature_num, self.output_point_number*2, activation=self.opt.activation, normalization=self.opt.normalization)
# self.linear2 = MyLinear(self.output_point_number*2, self.output_point_number*3, activation=self.opt.activation, normalization=self.opt.normalization)
# self.linear3 = MyLinear(self.output_point_number*3, self.output_point_number*4, activation=self.opt.activation, normalization=self.opt.normalization)
# self.linear_out = MyLinear(self.output_point_number*4, self.output_point_number*3, activation=None, normalization=None)
#
# # special initialization for linear_out, to get uniform distribution over the space
# self.linear_out.linear.bias.data.uniform_(-1, 1)
#
#
#
#
# def forward(self, x):
# # reshape from feature vector NxC, to NxC
# x = self.linear1(x)
# x = self.linear2(x)
# x = self.linear3(x)
# x = self.linear_out(x)
#
# return x.view(-1, 3, self.output_point_number)
# # class DecoderLinear(nn.Module):
# # def __init__(self, opt):
# # super(DecoderLinear, self).__init__()
# # self.opt = opt
# # self.feature_num = opt.feature_num
# # self.output_point_number = opt.output_fc_pc_num
# #
# # self.linear1 = MyLinear(self.feature_num, self.output_point_number*2, activation=self.opt.activation, normalization=self.opt.normalization)
# # self.linear2 = MyLinear(self.output_point_number*2, self.output_point_number*2, activation=self.opt.activation, normalization=self.opt.normalization)
# # # self.linear3 = MyLinear(self.output_point_number*2, opt.outpc_num*3, activation=self.opt.activation, normalization=self.opt.normalization)
# # self.linear_out = MyLinear(self.output_point_number*2, opt.outpc_num*3, activation=None, normalization=None)
# #
# # # self.linear2 = MyLinear(self.output_point_number*2, self.output_point_number*3, activation=self.opt.activation, normalization=self.opt.normalization)
# # # self.linear3 = MyLinear(self.output_point_number*3, self.output_point_number*4, activation=self.opt.activation, normalization=self.opt.normalization)
# # # self.linear_out = MyLinear(self.output_point_number*4, self.output_point_number*3, activation=None, normalization=None)
# #
# # # special initialization for linear_out, to get uniform distribution over the space
# # self.linear_out.linear.bias.data.uniform_(-1, 1)
# #
# #
# #
# #
# # def forward(self, x):
# # # reshape from feature vector NxC, to NxC
# # x = self.linear1(x)
# # x = self.linear2(x)
# # # x = self.linear3(x)
# # x = self.linear_out(x)
# #
# # #return x.view(-1, 3, self.output_point_number)
# #
# # return x.view(-1, 3, self.opt.outpc_num)
#
#
# class ConvToPC(nn.Module):
# def __init__(self, in_channels, opt):
# super(ConvToPC, self).__init__()
# self.in_channels = in_channels
# self.opt = opt
#
# self.conv1 = MyConv2d(self.in_channels, int(self.in_channels), kernel_size=1, stride=1, padding=0, bias=True, activation=opt.activation, normalization=opt.normalization)
# self.conv2 = MyConv2d(int(self.in_channels), 3, kernel_size=1, stride=1, padding=0, bias=True, activation=None, normalization=None)
#
# # special initialization for conv2, to get uniform distribution over the space
# # self.conv2.conv.bias.data.normal_(0, 0.3)
# self.conv2.conv.bias.data.uniform_(-1, 1)
#
# # self.conv2.conv.weight.data.normal_(0, 0.01)
# # self.conv2.conv.bias.data.uniform_(-3, 3)
#
# def forward(self, x):
# x = self.conv1(x)
# return self.conv2(x)
#
#
# class DecoderConv(nn.Module):
# def __init__(self, opt):
# super(DecoderConv, self).__init__()
# self.opt = opt
# self.feature_num = opt.feature_num
# self.output_point_num = opt.output_conv_pc_num
#
# # __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, output_padding=0, bias=True, activation=None, normalization=None)
# # 1x1 -> 2x2
# self.deconv1 = UpConv(self.feature_num, int(self.feature_num), activation=self.opt.activation, normalization=self.opt.normalization)
# # 2x2 -> 4x4
# self.deconv2 = UpConv(int(self.feature_num), int(self.feature_num/2), activation=self.opt.activation, normalization=self.opt.normalization)
# # 4x4 -> 8x8
# self.deconv3 = UpConv(int(self.feature_num/2), int(self.feature_num/4), activation=self.opt.activation, normalization=self.opt.normalization)
# # 8x8 -> 16x16
# self.deconv4 = UpConv(int(self.feature_num/4), int(self.feature_num/8), activation=self.opt.activation, normalization=self.opt.normalization)
# self.conv2pc4 = ConvToPC(int(self.feature_num/8), opt)
# # 16x16 -> 32x32
# self.deconv5 = UpConv(int(self.feature_num/8), int(self.feature_num/8), activation=self.opt.activation, normalization=self.opt.normalization)
# self.conv2pc5 = ConvToPC(int(self.feature_num/8), opt)
# # 32x32 -> 64x64
# self.deconv6 = UpConv(int(self.feature_num/8), int(self.feature_num/8), activation=self.opt.activation, normalization=self.opt.normalization)
# self.conv2pc6 = ConvToPC(int(self.feature_num/8), opt)
#
#
# def forward(self, x):
# # reshape from feature vector NxC, to NxCx1x1
# x = x.view(-1, self.feature_num, 1, 1)
# x = self.deconv1(x)
# x = self.deconv2(x)
# x = self.deconv3(x)
# x = self.deconv4(x)
# self.pc4 = self.conv2pc4(x)
# x = self.deconv5(x)
# self.pc5 = self.conv2pc5(x)
# x = self.deconv6(x)
# self.pc6 = self.conv2pc6(x)
#
# return self.pc6
#
#
# class Decoder(nn.Module):
# def __init__(self, opt):
# super(Decoder, self).__init__()
# self.opt = opt
# if self.opt.output_fc_pc_num > 0:
# self.fc_decoder = DecoderLinear(opt)
# self.conv_decoder = DecoderConv(opt)
#
# def forward(self, x):
# if self.opt.output_fc_pc_num > 0:
# self.linear_pc = self.fc_decoder(x)
#
# if self.opt.output_conv_pc_num > 0:
# self.conv_pc6 = self.conv_decoder(x).view(-1, 3, 4096)
# self.conv_pc4 = self.conv_decoder.pc4.view(-1, 3, 256)
# self.conv_pc5 = self.conv_decoder.pc5.view(-1, 3, 1024)
#
# if self.opt.output_fc_pc_num == 0:
# if self.opt.output_conv_pc_num == 4096:
# return self.conv_pc6
# elif self.opt.output_conv_pc_num == 1024:
# return self.conv_pc5
# else:
# if self.opt.output_conv_pc_num == 4096:
# return torch.cat([self.linear_pc, self.conv_pc6], 2), self.conv_pc5, self.conv_pc4
# elif self.opt.output_conv_pc_num == 1024:
# l = torch.cat([self.linear_pc, self.conv_pc5], 2), self.conv_pc4
# return l
# else:
# return self.linear_pc
#
#
# class AE_pointnet(nn.Module):
# def __init__(self,args,num_points=2048,global_feat= True):
# super(AE_pointnet, self).__init__()
# self.encoder = Encoder(num_points = num_points, global_feat = global_feat)
# self.decoder = Decoder(args)
#
# def forward(self, x):
# x = torch.squeeze(x,dim=1)
# x = torch.transpose(x,1,2)
# [encoder,_] = self.encoder(x)
# decoder = self.decoder(encoder)
#
#
# return decoder
#
#
#
# def ae_pointnet(args,num_points = 2048,global_feat = True,data=None):
#
# model= AE_pointnet(args,num_points,global_feat)
#
# if data is not None:
# #model.load_state_dict(data['state_dict'])
# model.encoder.load_state_dict(data['state_dict_encoder'])
# model.decoder.load_state_dict(data['state_dict_decoder'])
#
# return model
#
#
#
# #
# # if __name__ == '__main__':
# # sim_data = Variable(torch.rand(32,3,2500))
# # trans = STN3d()
# # out = trans(sim_data)
# # print('stn', out.size())
# #
# # pointfeat = PointNetfeat(global_feat=True)
# # out, _ = pointfeat(sim_data)
# # print('global feat', out.size())
# #
# # pointfeat = PointNetfeat(global_feat=False)
# # out, _ = pointfeat(sim_data)
# # print('point feat', out.size())
# #
# # cls = PointNetCls(k = 5)
# # out, _ = cls(sim_data)
# # print('class', out.size())
# #
# # seg = PointNetDenseCls(k = 3)
# # out, _ = seg(sim_data)
# # print('seg', out.size())
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import pdb
import torch.nn.functional as F
from .layers import *
from torch.nn.init import kaiming_normal
__all__= ['ae_pointnet']
class STN3d(nn.Module):
def __init__(self, num_points = 2500):
super(STN3d, self).__init__()
self.num_points = num_points
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.mp1 = torch.nn.MaxPool1d(num_points)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
kaiming_normal(m.weight.data) # initialize weigths with normal distribution
if m.bias is not None:
m.bias.data.zero_() # initialize bias as zero
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# self.att = nn.Sequential(nn.Linear(1024,1024//16),
# nn.ReLU(inplace=True),
# nn.Linear(1024 // 16,1024 // 16),
# nn.BatchNorm1d(1024 // 16),
# nn.ReLU(inplace=True),
# nn.Linear(1024 // 16,1024),
# nn.Sigmoid(),
#
# )
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.mp1(x)
x = x.view(-1, 1024)
# x = self.att(x)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class Encoder(nn.Module):
def __init__(self, num_points = 2500, global_feat = True):
super(Encoder, self).__init__()
self.stn = STN3d(num_points = num_points)
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
#self.conv2p1 = torch.nn.Conv1d(128, 128, 1)
#self.conv2p2 = torch.nn.Conv1d(128, 256, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
# self.bn2p1 = nn.BatchNorm1d(128)
# self.bn2p2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(1024)
self.mp1 = torch.nn.MaxPool1d(num_points)
self.num_points = num_points
self.global_feat = global_feat
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
kaiming_normal(m.weight.data) # initialize weigths with normal distribution
if m.bias is not None:
m.bias.data.zero_() # initialize bias as zero
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
batchsize = x.size()[0]
trans = self.stn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans)
x = x.transpose(2,1)
x = F.relu(self.bn1(self.conv1(x)))
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
# x = F.relu(self.bn2p1(self.conv2p1(x)))
# x = F.relu(self.bn2p2(self.conv2p2(x)))
x = self.bn3(self.conv3(x))
x = self.mp1(x)
x = x.view(-1, 1024)
if self.global_feat:
return x, trans
else:
x = x.view(-1, 1024, 1).repeat(1, 1, self.num_points)
return torch.cat([x, pointfeat], 1), trans
class EncoderS(nn.Module):
def __init__(self, num_points=2500, global_feat=True):
super(EncoderS, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 256, 1)
self.conv4 = torch.nn.Conv1d(256, 128, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(256)
self.bn4 = nn.BatchNorm1d(128)
self.mp1 = torch.nn.MaxPool1d(num_points)
self.num_points = num_points
self.global_feat = global_feat
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv1d) or isinstance(
m, nn.Linear):
kaiming_normal(m.weight.data) # initialize weigths with normal distribution
if m.bias is not None:
m.bias.data.zero_() # initialize bias as zero
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.bn4(self.conv4(x))
x = self.mp1(x)
x = x.view(-1, 128)
if self.global_feat:
return x,None
else:
x = x.view(-1, 1024, 1).repeat(1, 1, self.num_points)
return torch.cat([x, pointfeat], 1), trans
class DecoderLinear(nn.Module):
def __init__(self, opt):
super(DecoderLinear, self).__init__()
self.opt = opt
self.feature_num = opt.feature_num
self.output_point_number = opt.output_fc_pc_num
self.linear1 = MyLinear(self.feature_num, self.output_point_number*2, activation=self.opt.activation, normalization=self.opt.normalization)
self.linear2 = MyLinear(self.output_point_number*2, self.output_point_number*3, activation=self.opt.activation, normalization=self.opt.normalization)
self.linear3 = MyLinear(self.output_point_number*3, self.output_point_number*4, activation=self.opt.activation, normalization=self.opt.normalization)
self.linear_out = MyLinear(self.output_point_number*4, self.output_point_number*3, activation=None, normalization=None)
# special initialization for linear_out, to get uniform distribution over the space
self.linear_out.linear.bias.data.uniform_(-1, 1)
def forward(self, x):
# reshape from feature vector NxC, to NxC
x = self.linear1(x)
x = self.linear2(x)
x = self.linear3(x)
x = self.linear_out(x)
return x.view(-1, 3, self.output_point_number)
class ConvToPC(nn.Module):
def __init__(self, in_channels, opt):
super(ConvToPC, self).__init__()
self.in_channels = in_channels
self.opt = opt
self.conv1 = MyConv2d(self.in_channels, int(self.in_channels), kernel_size=1, stride=1, padding=0, bias=True, activation=opt.activation, normalization=opt.normalization)
self.conv2 = MyConv2d(int(self.in_channels), 3, kernel_size=1, stride=1, padding=0, bias=True, activation=None, normalization=None)
# special initialization for conv2, to get uniform distribution over the space
# self.conv2.conv.bias.data.normal_(0, 0.3)
self.conv2.conv.bias.data.uniform_(-1, 1)
# self.conv2.conv.weight.data.normal_(0, 0.01)
# self.conv2.conv.bias.data.uniform_(-3, 3)
def forward(self, x):
x = self.conv1(x)
return self.conv2(x)
class DecoderConv(nn.Module):
def __init__(self, opt):
super(DecoderConv, self).__init__()
self.opt = opt
self.feature_num = opt.feature_num
self.output_point_num = opt.output_conv_pc_num
# __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, output_padding=0, bias=True, activation=None, normalization=None)
# 1x1 -> 2x2
self.deconv1 = UpConv(self.feature_num, int(self.feature_num), activation=self.opt.activation, normalization=self.opt.normalization)
# 2x2 -> 4x4
self.deconv2 = UpConv(int(self.feature_num), int(self.feature_num/2), activation=self.opt.activation, normalization=self.opt.normalization)
# 4x4 -> 8x8
self.deconv3 = UpConv(int(self.feature_num/2), int(self.feature_num/4), activation=self.opt.activation, normalization=self.opt.normalization)
# 8x8 -> 16x16
self.deconv4 = UpConv(int(self.feature_num/4), int(self.feature_num/8), activation=self.opt.activation, normalization=self.opt.normalization)
self.conv2pc4 = ConvToPC(int(self.feature_num/8), opt)
# 16x16 -> 32x32
self.deconv5 = UpConv(int(self.feature_num/8), int(self.feature_num/8), activation=self.opt.activation, normalization=self.opt.normalization)
self.conv2pc5 = ConvToPC(int(self.feature_num/8), opt)
# 32x32 -> 64x64
self.deconv6 = UpConv(int(self.feature_num/8), int(self.feature_num/8), activation=self.opt.activation, normalization=self.opt.normalization)
self.conv2pc6 = ConvToPC(int(self.feature_num/8), opt)
def forward(self, x):
# reshape from feature vector NxC, to NxCx1x1
x = x.view(-1, self.feature_num, 1, 1)
x = self.deconv1(x)
x = self.deconv2(x)
x = self.deconv3(x)
x = self.deconv4(x)
self.pc4 = self.conv2pc4(x)
x = self.deconv5(x)
self.pc5 = self.conv2pc5(x)
x = self.deconv6(x)
self.pc6 = self.conv2pc6(x)
return self.pc6
class Decoder(nn.Module):
def __init__(self, opt):
super(Decoder, self).__init__()
self.opt = opt
if self.opt.output_fc_pc_num > 0:
self.fc_decoder = DecoderLinear(opt)
self.conv_decoder = DecoderConv(opt)
def forward(self, x):
if self.opt.output_fc_pc_num > 0:
self.linear_pc = self.fc_decoder(x)
if self.opt.output_conv_pc_num > 0:
self.conv_pc6 = self.conv_decoder(x).view(-1, 3, 4096)
self.conv_pc4 = self.conv_decoder.pc4.view(-1, 3, 256)
self.conv_pc5 = self.conv_decoder.pc5.view(-1, 3, 1024)
if self.opt.output_fc_pc_num == 0:
if self.opt.output_conv_pc_num == 4096:
return self.conv_pc6
elif self.opt.output_conv_pc_num == 1024:
return self.conv_pc5
else:
if self.opt.output_conv_pc_num == 4096:
return torch.cat([self.linear_pc, self.conv_pc6], 2), self.conv_pc5, self.conv_pc4
elif self.opt.output_conv_pc_num == 1024:
l = torch.cat([self.linear_pc, self.conv_pc5], 2), self.conv_pc4
return l
else:
return self.linear_pc
class DecoderS(nn.Module):
def __init__(self, opt):
super(DecoderS, self).__init__()
self.opt = opt
self.feature_num = opt.feature_num
self.output_point_number = opt.output_fc_pc_num
self.linear1 = MyLinear(128, 256, activation=self.opt.activation, normalization=self.opt.normalization)
self.linear2 = MyLinear(256, 256, activation=self.opt.activation, normalization=self.opt.normalization)
self.linear_out = MyLinear(256, 6144, activation=None, normalization=None)
# special initialization for linear_out, to get uniform distribution over the space
self.linear_out.linear.bias.data.uniform_(-1, 1)
def forward(self, x):
# reshape from feature vector NxC, to NxC
x = self.linear1(x)
x = self.linear2(x)
x = self.linear_out(x)
return x.view(-1, 3, 2048)
class AE_pointnet(nn.Module):
def __init__(self,args,num_points=2048,global_feat= True):
super(AE_pointnet, self).__init__()
self.encoder = EncoderS(num_points = num_points, global_feat = global_feat)
self.decoder = DecoderS(args)
def forward(self, x):
x = torch.squeeze(x,dim=1)
x = torch.transpose(x,1,2)
[encoder,_] = self.encoder(x)
decoder = self.decoder(encoder)
return decoder
def ae_pointnet(args,num_points = 2048,global_feat = True,data=None):
model= AE_pointnet(args,num_points,global_feat)
if data is not None:
#model.load_state_dict(data['state_dict'])
model.encoder.load_state_dict(data['state_dict_encoder'])
model.decoder.load_state_dict(data['state_dict_decoder'])
return model
|
import paho.mqtt.client as mqtt
import time
from threading import Thread
def on_message(client, userdata, message):
# print(message)
print('Recver>> Received message')
def sender(host, port, payload):
print('Sender>> Starting...')
client = mqtt.Client('measure_sender')
client.connect(host, port)
print('Sender>> Connected')
while True:
time.sleep(1)
client.publish('ping_t', payload)
print('Sender>> Published ' + payload)
def recver(host, port):
print('Recver>> Starting...')
client = mqtt.Client('measure_recver')
client.on_message = on_message
client.connect(host, port)
print('Recver>> Connected')
client.subscribe('ping_t')
print('Recver>> Subscribed')
client.loop_start()
time.sleep(100000)
client.loop_stop()
if __name__ == '__main__':
host = 'localhost'
port = 1883
payload = 'test'
sender = Thread(target=sender, args=(host, port, payload))
recver = Thread(target=recver, args=(host, port))
sender.start()
recver.start()
while True:
pass
sender.stop()
recver.stop()
|
from __future__ import annotations
import enum
import textwrap
from typing import (
Iterable,
Sequence,
Union,
)
import uuid
from ai.backend.client.auth import AuthToken, AuthTokenTypes
from ai.backend.client.request import Request
from ai.backend.client.session import api_session
from ai.backend.client.output.fields import user_fields
from ai.backend.client.output.types import FieldSpec, PaginatedResult
from ai.backend.client.pagination import generate_paginated_results
from .base import api_function, BaseFunction
__all__ = (
'User',
'UserStatus',
'UserRole',
)
_default_list_fields = (
user_fields['uuid'],
user_fields['role'],
user_fields['username'],
user_fields['email'],
user_fields['is_active'],
user_fields['created_at'],
user_fields['domain_name'],
user_fields['groups'],
)
_default_detail_fields = (
user_fields['uuid'],
user_fields['username'],
user_fields['email'],
user_fields['need_password_change'],
user_fields['status'],
user_fields['status_info'],
user_fields['created_at'],
user_fields['domain_name'],
user_fields['role'],
user_fields['groups'],
)
class UserRole(str, enum.Enum):
"""
The role (privilege level) of users.
"""
SUPERADMIN = 'superadmin'
ADMIN = 'admin'
USER = 'user'
MONITOR = 'monitor'
class UserStatus(enum.Enum):
"""
The detailed status of users to represent the signup process and account lifecycles.
"""
ACTIVE = 'active'
INACTIVE = 'inactive'
DELETED = 'deleted'
BEFORE_VERIFICATION = 'before-verification'
class User(BaseFunction):
"""
Provides interactions with users.
"""
@api_function
@classmethod
async def authorize(cls, username: str, password: str, *,
token_type: AuthTokenTypes = AuthTokenTypes.KEYPAIR) -> AuthToken:
"""
Authorize the given credentials and get the API authentication token.
This function can be invoked anonymously; i.e., it does not require
access/secret keys in the session config as its purpose is to "get" them.
Its functionality will be expanded in the future to support multiple types
of authentication methods.
"""
rqst = Request('POST', '/auth/authorize')
rqst.set_json({
'type': token_type.value,
'domain': api_session.get().config.domain,
'username': username,
'password': password,
})
async with rqst.fetch() as resp:
data = await resp.json()
return AuthToken(
type=token_type,
content=data['data'],
)
@api_function
@classmethod
async def list(
cls,
status: str = None,
group: str = None,
fields: Sequence[FieldSpec] = _default_list_fields,
) -> Sequence[dict]:
"""
Fetches the list of users. Domain admins can only get domain users.
:param status: Fetches users in a specific status
(active, inactive, deleted, before-verification).
:param group: Fetch users in a specific group.
:param fields: Additional per-user query fields to fetch.
"""
query = textwrap.dedent("""\
query($status: String, $group: UUID) {
users(status: $status, group_id: $group) {$fields}
}
""")
query = query.replace('$fields', ' '.join(f.field_ref for f in fields))
variables = {
'status': status,
'group': group,
}
data = await api_session.get().Admin._query(query, variables)
return data['users']
@api_function
@classmethod
async def paginated_list(
cls,
status: str = None,
group: str = None,
*,
fields: Sequence[FieldSpec] = _default_list_fields,
page_offset: int = 0,
page_size: int = 20,
filter: str = None,
order: str = None,
) -> PaginatedResult[dict]:
"""
Fetches the list of users. Domain admins can only get domain users.
:param status: Fetches users in a specific status
(active, inactive, deleted, before-verification).
:param group: Fetch users in a specific group.
:param fields: Additional per-user query fields to fetch.
"""
return await generate_paginated_results(
'user_list',
{
'status': (status, 'String'),
'group_id': (group, 'UUID'),
'filter': (filter, 'String'),
'order': (order, 'String'),
},
fields,
page_offset=page_offset,
page_size=page_size,
)
@api_function
@classmethod
async def detail(
cls,
email: str = None,
fields: Sequence[FieldSpec] = _default_detail_fields,
) -> Sequence[dict]:
"""
Fetch information of a user. If email is not specified,
requester's information will be returned.
:param email: Email of the user to fetch.
:param fields: Additional per-user query fields to fetch.
"""
if email is None:
query = textwrap.dedent("""\
query {
user {$fields}
}
""")
else:
query = textwrap.dedent("""\
query($email: String) {
user(email: $email) {$fields}
}
""")
query = query.replace('$fields', ' '.join(f.field_ref for f in fields))
variables = {'email': email}
data = await api_session.get().Admin._query(query, variables if email is not None else None)
return data['user']
@api_function
@classmethod
async def detail_by_uuid(
cls,
user_uuid: Union[str, uuid.UUID] = None,
fields: Sequence[FieldSpec] = _default_detail_fields,
) -> Sequence[dict]:
"""
Fetch information of a user by user's uuid. If user_uuid is not specified,
requester's information will be returned.
:param user_uuid: UUID of the user to fetch.
:param fields: Additional per-user query fields to fetch.
"""
if user_uuid is None:
query = textwrap.dedent("""\
query {
user {$fields}
}
""")
else:
query = textwrap.dedent("""\
query($user_id: ID) {
user_from_uuid(user_id: $user_id) {$fields}
}
""")
query = query.replace('$fields', ' '.join(f.field_ref for f in fields))
variables = {'user_id': str(user_uuid)}
data = await api_session.get().Admin._query(query, variables if user_uuid is not None else None)
return data['user_from_uuid']
@api_function
@classmethod
async def create(
cls,
domain_name: str,
email: str,
password: str,
username: str = None,
full_name: str = None,
role: UserRole | str = UserRole.USER,
status: UserStatus | str = UserStatus.ACTIVE,
need_password_change: bool = False,
description: str = '',
group_ids: Iterable[str] = None,
fields: Iterable[str] = None,
) -> dict:
"""
Creates a new user with the given options.
You need an admin privilege for this operation.
"""
if fields is None:
fields = ('domain_name', 'email', 'username', 'uuid')
query = textwrap.dedent("""\
mutation($email: String!, $input: UserInput!) {
create_user(email: $email, props: $input) {
ok msg user {$fields}
}
}
""")
query = query.replace('$fields', ' '.join(fields))
variables = {
'email': email,
'input': {
'password': password,
'username': username,
'full_name': full_name,
'role': role.value if isinstance(role, UserRole) else role,
'status': status.value if isinstance(status, UserStatus) else status,
'need_password_change': need_password_change,
'description': description,
'domain_name': domain_name,
'group_ids': group_ids,
},
}
data = await api_session.get().Admin._query(query, variables)
return data['create_user']
@api_function
@classmethod
async def update(
cls,
email: str,
password: str = None, username: str = None,
full_name: str = None,
domain_name: str = None,
role: UserRole | str = UserRole.USER,
status: UserStatus | str = UserStatus.ACTIVE,
need_password_change: bool = None,
description: str = None,
group_ids: Iterable[str] = None,
fields: Iterable[str] = None,
) -> dict:
"""
Update existing user.
You need an admin privilege for this operation.
"""
query = textwrap.dedent("""\
mutation($email: String!, $input: ModifyUserInput!) {
modify_user(email: $email, props: $input) {
ok msg
}
}
""")
variables = {
'email': email,
'input': {
'password': password,
'username': username,
'full_name': full_name,
'domain_name': domain_name,
'role': role.value if isinstance(role, UserRole) else role,
'status': status.value if isinstance(status, UserStatus) else status,
'need_password_change': need_password_change,
'description': description,
'group_ids': group_ids,
},
}
data = await api_session.get().Admin._query(query, variables)
return data['modify_user']
@api_function
@classmethod
async def delete(cls, email: str):
"""
Inactivates an existing user.
"""
query = textwrap.dedent("""\
mutation($email: String!) {
delete_user(email: $email) {
ok msg
}
}
""")
variables = {'email': email}
data = await api_session.get().Admin._query(query, variables)
return data['delete_user']
@api_function
@classmethod
async def purge(cls, email: str, purge_shared_vfolders=False):
"""
Deletes an existing user.
User's virtual folders are also deleted, except the ones shared with other users.
Shared virtual folder's ownership will be transferred to the requested admin.
To delete shared folders as well, set ``purge_shared_vfolders`` to ``True``.
"""
query = textwrap.dedent("""\
mutation($email: String!, $input: PurgeUserInput!) {
purge_user(email: $email, props: $input) {
ok msg
}
}
""")
variables = {
'email': email,
'input': {
'purge_shared_vfolders': purge_shared_vfolders,
},
}
data = await api_session.get().Admin._query(query, variables)
return data['purge_user']
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.docker.goals.tailor import rules as tailor_rules
from pants.backend.docker.rules import rules as docker_rules
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.docker.target_types import rules as target_types_rules
def rules():
return (
*docker_rules(),
*tailor_rules(),
*target_types_rules(),
)
def target_types():
return (DockerImageTarget,)
|
"""-----------------------------------------------------------------
The user is asked to enter one or more keywords.
Input: none
Output: a list containing all the keywords that the user has entered (where each keyword is a string)
-----------------------------------------------------------------"""
def user_keywords():
keyword_list = [] # stores the keywords that user has typed in (keywords will all be lowercase)
finished = False # the user has not finished searching keywords
while not finished:
user_input = input('Enter a keyword: ').lower()
while user_input.strip() == '': # If the user enters an empty string as their keyword
user_input = input('Enter a keyword: ').lower()
if user_input in keyword_list:
print("You have already entered that keyword.")
else: # if the user has not entered the keyword yet
keyword_list.append(user_input.strip()) # assuming if the user adds any random spaces (eg. ' no')
ask_again = input('Would you like to enter another keyword (y/n): ').lower()
while ask_again not in ('y','n'): # user has not entered a valid input
ask_again = input('Invalid input. Please enter a valid input (y/n): ').lower()
if ask_again == 'n': # the user is done searching for keywords
finished = True
return keyword_list
"""-----------------------------------------------------------------
Returns the pid of the question post (the unique pid) selected by the user
Input: None
Output: returns the unique question id of the post that the user has selected
-----------------------------------------------------------------"""
def searchQuestions(posts):
keyword_list = user_keywords()
q_options = {} # stores the possible options for the user to select from
q_num = 1 # will be used to display the results (eg. Question 1, Question 2, etc)
posts_list = [] # keeps track of the posts and prevents the same posts from being displayed more than once
limit_num = 5
post_count = 0
selected = False
while not selected:
for keyword in keyword_list: # for each keyword entered by the user
# checks if the keyword exists in either the body, title, or tag (if the length is less than 3)
if len(keyword) < 3:
k_posts = posts.find({'PostTypeId': '1', '$or': [
{'Body': {'$regex': keyword, '$options': 'i'}}, # 'i' is an option for case-insensitive
{'Title': {'$regex': keyword, '$options': 'i'}},
{'Tags':{'$regex': keyword, '$options': 'i'}}
]}).limit(limit_num)
else:
k_posts = posts.find({'PostTypeId': '1', 'terms': {'$in': keyword.split()}}).limit(limit_num)
# used to search the terms array
# checks if the Terms field contains the keyword
# for each post, some information about the post is displayed
for post in k_posts:
if post['Id'] not in posts_list:
posts_list.append(post['Id'])
post_count +=1
print('------------------------------')
print('Question ' + str(q_num) + ': ')
print('Title: ', post['Title'])
print('Creation Date: ', post['CreationDate'])
print('Score: ', post['Score'])
print('AnswerCount: ', post['AnswerCount'])
print('------------------------------')
# added to the possible options for the user to select from
# eg. {'1': '123'} means that if the user enters '1', they have selected the question post with a pid of '123'
q_options[str(q_num)] = post['Id']
q_num +=1
if len(posts_list) == 0: # no posts have been found
print('The keyword(s) you have provided did not return any results. Please try using other keywords.')
return # will return None if no posts were displayed to the user
# the user chooses from the results displayed above
user_select = input("Select the question by typing in the number, or type in 's' to see more: ")
while user_select not in (tuple(q_options.keys()) + tuple('s')): # all the possible options in a tuple ('1','2',...'s')
user_select = input('Please enter a valid input: ')
if user_select == 's':
limit_num += 5
continue
else: # the user entered one of the question numbers
print('You have chosen question ' + user_select + '!')
break
# the following displays all the fields of the question post selected by the user
print('\n==================================================================')
print('POST INFO: \n')
selected_post = posts.find_one({'Id': q_options[user_select]}, {'_id': 0}) # Does not display the '_id' or ObjectId
# increases the view count of the question post by 1 (before displaying all the fields)
posts.update_one({'Id': q_options[user_select]}, {'$set': {'ViewCount': selected_post['ViewCount'] + 1}})
#used to make sure our viewCount is our new and updated one
selected_post = posts.find_one({'Id': q_options[user_select]}, {'_id': 0}) # Does not display the '_id' or ObjectId
for key,value in selected_post.items(): # prints out all the fields of a question post
print(key + ':', value)
print('==================================================================')
print('\nYou have selected question ' + user_select + '!')
return q_options[user_select] # returns the question id
|
import os
import sys
import unittest
try:
sys.path.append(os.environ.get('PY_DEV_HOME'))
from webTest_pro.common.mysqlKit import sqlOperating
from webTest_pro.common.logger import logger,T_INFO
from webTest_pro.common.initData import init
except ImportError as e:
print e
host = init.db_conf['host']
active_code, service = init.active_code, 'interact'
streaming_media = init.streaming_media
hostadd = init.db_conf['hostadd']
mediaAddr = streaming_media['serverIps']
# set log info
# LOG_INIT(logFile)
# logger = LOG_MODULE_DEFINE('Platform')
# SET_LOG_LEVEL(logger, 'info')
sqlpara = [{'col_name': 'live_server_url', 'col_value': 'rtmp://' + hostadd + ':1935/live/'},
{'col_name': 'web_server_resource', 'col_value': 'http://' + hostadd},
{'col_name': 'file_server_url', 'col_value': mediaAddr + '/filesrv'},
{'col_name': 'mcu_center_host', 'col_value': hostadd},
{'col_name': 'file_server_url_visit', 'col_value': mediaAddr + ':11194'},
{'col_name': 'message_center_host', 'col_value': hostadd},
{'col_name': 'file_server_ftp_host', 'col_value': mediaAddr},
{'col_name': 'web_server_client', 'col_value': 'http://' + hostadd + '/middleclient/index.do'},
{'col_name': 'centerfile_host', 'col_value': hostadd}
]
class dbMgr(unittest.TestCase):
def test_updateDB(self):
T_INFO( logger, 'start case dbMgr')
for s in sqlpara:
conn = sqlOperating(init.db_conf['host'],
init.db_conf['user'],
init.db_conf['passwd'],
init.db_conf['db'])
# c = sqlOperating()
# print s['col_name'], s['col_value']
conn.updaeDb("UPDATE base_sys_config set CONFIG_VALUE = '%s' where CONFIG_KEY = '%s'" % (s['col_value'], s['col_name']))
T_INFO(logger, 'exec: test_updateDB end.')
if __name__ == '__main__':
unittest.main()
|
from datetime import datetime
import random
from pie_logger import get_logger
log = get_logger()
# socket.emit('callback', {action: 'oven', unique_pie_id: pie.unique_pie_id, heat_time: game.time.now})
# socket.emit('callback', {action: 'bake', baketype: 'apple'});
# socket.emit('callback', {action: 'bake', baketype: 'cherry'});
# socket.emit('callback', {action: 'bake', baketype: 'raseberry'});
# socket.emit('callback', {action: 'restock'});
class MockApp:
def __init__(self, belt):
self.logger = log
self.belt = belt
def get_random_pie():
return random.choice(["apple", "cherry", "raseberry"])
def simulate(belt, count, delay=5):
callback_app = MockApp(belt)
for callback in belt.callbacks.get("restock", []):
callback(callback_app, {})
for n in range(count):
log.debug("testing bake callback")
for callback in belt.callbacks.get('bake', []):
bake_out = callback(callback_app, dict(baketype=get_random_pie()))
if bake_out:
for callback in belt.callbacks.get('oven', []):
callback(callback_app, dict(
unique_pie_id=bake_out.get('unique_pie_id'),
heat_time=4))
log.debug("\n"+belt.get_totals())
|
import sys
sys.path.append('../')
from BDA import stats
stats.binom_test(x = 10, n = 30, p = 0.5, alternative = "greater")
stats.binom_test(x = [10, 20], p = 0.5, alternative = "greater")
#binom.test(x = 10, n = 30, p = 0.5, alternative = "greater") ## R
stats.binom_test(x = 10, n = 30, p = 0.5, alternative = "less")
stats.binom_test(x = [10, 20], p = 0.5, alternative = "less")
#binom.test(x = 10, n = 30, p = 0.5, alternative = "less") ## R
stats.binom_test(x = 10, n = 30, p = 0.5, alternative = "two-sided")
stats.binom_test(x = [10, 20], p = 0.5, alternative = "two-sided")
#binom.test(x = 10, n = 30, p = 0.5, alternative = "two.sided") ## R
|
def oddOrEven(arr):
if sum(arr) % 2 == 0:
return "even"
else:
return "odd"
|
# to run this script type
# python3 hello_world.py
def hello_world():
"""
This is a function which
returns the greeting 'hello world'
"""
greeting = "hello world!"
print(greeting)
return greeting
def whatever():
return (20 + 10)
if __name__ == '__main__':
hello_world()
|
# web评分服务端
# -*-coding:utf-8-*-
from flask import Flask, render_template, request
import os
import base64
import cv2
from keras.models import Sequential
from keras.models import load_model
import numpy as np
import time
def sc(imagePath, current):
global model
# imagePath=q.get()
frame = cv2.imread(imagePath)
sh = frame.shape
print(sh)
if sh[0] > 1079: # 图片过大时,缩小图片
frame = cv2.resize(frame, (int(sh[1] * 850 / sh[0]), 850), interpolation=cv2.INTER_AREA)
print(frame.shape)
print("change size 1")
elif sh[1] > 1920:
frame = cv2.resize(frame, (1500, int(sh[0] * 1500 / sh[1])), interpolation=cv2.INTER_AREA)
print(frame.shape)
print("change size 2")
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# Detect faces in the image
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
print("Found {0} faces!".format(len(faces)))
print(frame.shape)
print(faces)
for (x, y, w, h) in faces:
new_image = frame[y:y + h, x:x + w]
new_image = cv2.resize(new_image, (220, 220), interpolation=cv2.INTER_CUBIC)
new_image = np.array([new_image]) # (1,220,220,3)
print(new_image.shape)
# k=Modle(model,new_image)
# 注意!此处一定要/25,统一数量级!与训练时的神经网络保持一致
k = model.predict((new_image / 25), batch_size=None, verbose=0, steps=None)
print(k)
print("!!!!!")
# j = model.predict((new_image / 25), batch_size=None, verbose=0, steps=None)
# print (j)
text = str(round(k[0][0], 3))
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3)
cv2.putText(frame, text, (x, y), cv2.FONT_HERSHEY_DUPLEX, 1.5, (0, 0, 255), 1)
cv2.imwrite('static/images2/' + current + '.png', frame)
# cv2.imshow('frame', frame)
# cv2.waitKey(0)
cv2.destroyAllWindows()
print("end!!")
app = Flask(__name__)
basepath = os.path.dirname(__file__)
html = '''<img src="data:image/png;base64,{}" style="width:100%;height:100%;"/>'''
@app.route('/upload', methods=['GET', 'POST']) # 接受并存储文件
def up_file():
if request.method == "POST":
current = str(round(time.time()))
# 保存图片
# request.files['file'].save(os.path.join(basepath, 'static/images', current + ".png"))
upload_path = os.path.join(basepath, 'static/images', current + ".png")
request.files['file'].save(upload_path)
# 处理图片
sc(upload_path, current)
# 发送图片
return html.format(base64.b64encode(open("static/images2/" + current + ".png", 'rb').read()).decode())
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('upload.html')
if __name__ == "__main__":
model = Sequential()
model = load_model('DenseNet121_2_model.h5')
print("111111111111111111111111")
test = np.zeros((1, 220, 220, 3))
k = model.predict(test, batch_size=None, verbose=0, steps=None)
print(k)
app.run(host='localhost', port=5000, debug=False)
|
from pprint import pformat
from contextlib import contextmanager
import numpy as np
import signal
import time
import re
import os
import traceback
import pdb
from collections.abc import MutableMapping
import subprocess
import copy
import datetime
import psutil
import resource
import sys
import shutil
import errno
import tempfile
import dill
from functools import wraps
import inspect
import hashlib
from zipfile import ZipFile
import importlib
import json
import gc
import matplotlib.pyplot as plt
from matplotlib import animation
import imageio
from skimage.transform import resize
import clify
import dps
def resize_image(img, shape, mode=None, preserve_range=True, anti_aliasing=None):
if anti_aliasing is None:
anti_aliasing = any(ns < s for ns, s in zip(shape, img.shape))
if mode is None:
mode = 'reflect' if anti_aliasing else 'edge'
return resize(
img, shape, mode=mode, preserve_range=preserve_range,
anti_aliasing=anti_aliasing)
def video_stack(video):
""" Take an ndarray with shape (*batch_shape, n_frames, H, W, D) representing a video
and stack the frames together as different channel dimensions, resulting
in an ndarray with shape (*batch_shape, H, W, D*n_frames) """
video = np.array(video)
*batch_shape, n_frames, H, W, D = video.shape
perm = tuple(range(len(batch_shape))) + tuple(np.array([1, 2, 0, 3]) + len(batch_shape))
return np.transpose(video, perm).reshape(*batch_shape, H, W, n_frames*D)
def video_unstack(stacked_video, n_frames):
""" Inverse of the function `video_stack`. """
stacked_video = np.array(stacked_video)
*batch_shape, H, W, _D = stacked_video.shape
D = int(_D / n_frames)
assert D * n_frames == _D
video = stacked_video.reshape(*batch_shape, H, W, n_frames, D)
perm = tuple(range(len(batch_shape))) + tuple(np.array([2, 0, 1, 3]) + len(batch_shape))
return np.transpose(video, perm)
def liang_barsky(bottom, top, left, right, y0, x0, y1, x1):
""" Compute the intersection between a rectangle and a line segment.
rect is sepcified by (bottom, top, left, right)
line segment specified by (y0, x0), (y1, x1)
If no intersection, returns None.
Otherwise, returns (r, s) where (y0, x0) + r * (y1 - y0, x1 - x0) is the location of the "ingoing"
intersection, and (y0, x0) + s * (y1 - y0, x1 - x0) is the location of the "outgoing" intersection.
It will always hold that 0 <= r <= s <= 1. If the line segment starts inside the rectangle then r = 0;
and if it stops inside the rectangle then s = 1.
"""
assert bottom < top
assert left < right
dx = x1 - x0
dy = y1 - y0
checks = ((-dx, -(left - x0)),
(dx, right - x0),
(-dy, -(bottom - y0)),
(dy, top - y0))
out_in = [0]
in_out = [1]
for p, q in checks:
if p == 0 and q < 0:
return None
if p != 0:
target_list = out_in if p < 0 else in_out
target_list.append(q / p)
_out_in = max(out_in)
_in_out = min(in_out)
if _out_in < _in_out:
return _out_in, _in_out
else:
return None
# NoAnswer = object()
# def _test_liang_barsky(*args, ref_answer=NoAnswer):
# answer = liang_barsky(*args)
# print("{}: {}".format(args, answer))
#
# if ref_answer is not NoAnswer:
# assert answer == ref_answer
# if __name__ == "__main__":
# _test_liang_barsky(1, 2, 1, 2, 1.5, 0.5, 1.5, 2.5, ref_answer=(1/4, 3/4))
# _test_liang_barsky(1, 2, 1, 2, 1.5, 0.5, 1.5, .99, ref_answer=None)
# _test_liang_barsky(1, 2, 1, 2, 1.5, 0.5, 1.5, 1, ref_answer=None)
# _test_liang_barsky(1, 2, 1, 2, 1.5, 0.5, 1.5, 1.01, ref_answer=(0.5 / 0.51, 1))
# _test_liang_barsky(1, 2, 1, 2, 1.5, 0.5, -1.5, -2.5, ref_answer=None)
# _test_liang_barsky(1, 2, 1, 2, 2.5, 0.5, 2.5, 2.5, ref_answer=None)
# _test_liang_barsky(1, 2, 1, 2, 0.5, 2.5, 2.5, 2.5, ref_answer=None)
# _test_liang_barsky(1, 2, 1, 2, 0, 0, 2, 2, ref_answer=(0.5, 1))
# _test_liang_barsky(1, 2, 1, 2, 0, .99, 2, 2.99, ref_answer=(0.5, 0.505))
# _test_liang_barsky(1, 2, 1, 2, 1.5, 1.5, 3, 3, ref_answer=(0, 1/3))
def create_maze(shape):
# Random Maze Generator using Depth-first Search
# http://en.wikipedia.org/wiki/Maze_generation_algorithm
# FB - 20121214
my, mx = shape
maze = np.zeros(shape)
dirs = [(0, 1), (0, -1), (1, 0), (-1, 0)]
# start the maze from a random cell
stack = [(np.random.randint(0, mx), np.random.randint(0, my))]
while len(stack) > 0:
(cy, cx) = stack[-1]
maze[cy, cx] = 1
# find a new cell to add
nlst = [] # list of available neighbors
for i, (dy, dx) in enumerate(dirs):
ny = cy + dy
nx = cx + dx
if ny >= 0 and ny < my and nx >= 0 and nx < mx:
if maze[ny, nx] == 0:
# of occupied neighbors must be 1
ctr = 0
for _dy, _dx in dirs:
ex = nx + _dx
ey = ny + _dy
if ex >= 0 and ex < mx and ey >= 0 and ey < my:
if maze[ey, ex] == 1:
ctr += 1
if ctr == 1:
nlst.append(i)
# if 1 or more neighbors available then randomly select one and move
if len(nlst) > 0:
ir = np.random.choice(nlst)
dy, dx = dirs[ir]
cy += dy
cx += dx
stack.append((cy, cx))
else:
stack.pop()
return maze
def header(message, n, char, nl=True):
assert isinstance(char, str)
banner = char * n
newline = "\n" if nl else ""
return "{}{} {} {}{}".format(newline, banner, message.strip(), banner, newline)
def print_header(message, n, char, nl=True):
print(header(message, n, char, nl))
def exactly_2d(x, return_leading_shape=False):
leading_shape = x.shape[:-1]
if return_leading_shape:
return leading_shape, x.reshape(-1, x.shape[-1])
else:
return x.reshape(-1, x.shape[-1])
def generate_perlin_noise_2d(shape, res, normalize=False):
""" each dim of shape must be divisible by corresponding dim of res
from https://pvigier.github.io/2018/06/13/perlin-noise-numpy.html
"""
def f(t):
return 6*t**5 - 15*t**4 + 10*t**3
delta = (res[0] / shape[0], res[1] / shape[1])
d = (shape[0] // res[0], shape[1] // res[1])
grid = np.mgrid[0:res[0]:delta[0], 0:res[1]:delta[1]].transpose(1, 2, 0) % 1
# Gradients
angles = 2*np.pi*np.random.rand(res[0]+1, res[1]+1)
gradients = np.dstack((np.cos(angles), np.sin(angles)))
g00 = gradients[0:-1, 0:-1].repeat(d[0], 0).repeat(d[1], 1)
g10 = gradients[1:, 0:-1].repeat(d[0], 0).repeat(d[1], 1)
g01 = gradients[0:-1, 1:].repeat(d[0], 0).repeat(d[1], 1)
g11 = gradients[1:, 1:].repeat(d[0], 0).repeat(d[1], 1)
# Ramps
n00 = np.sum(grid * g00, 2)
n10 = np.sum(np.dstack((grid[:, :, 0]-1, grid[:, :, 1])) * g10, 2)
n01 = np.sum(np.dstack((grid[:, :, 0], grid[:, :, 1]-1)) * g01, 2)
n11 = np.sum(np.dstack((grid[:, :, 0]-1, grid[:, :, 1]-1)) * g11, 2)
# Interpolation
t = f(grid)
n0 = n00*(1-t[:, :, 0]) + t[:, :, 0]*n10
n1 = n01*(1-t[:, :, 0]) + t[:, :, 0]*n11
result = np.sqrt(2)*((1-t[:, :, 1])*n0 + t[:, :, 1]*n1)
if normalize:
result -= result.min()
mx = result.max()
if mx >= 1e-6:
result /= mx
return result
def prime_factors(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
def animate(
images, *other_images, labels=None, interval=500,
path=None, square_grid=True, **kwargs):
""" Assumes `images` has shape (batch_size, n_frames, H, W, D) """
all_images = [images, *other_images]
n_image_sets = len(all_images)
B, T = images.shape[:2]
if square_grid:
fig, _axes = square_subplots(B, n_repeats=n_image_sets, repeat_horizontal=True)
else:
fig, axes = plt.subplots(B, n_image_sets)
axes = _axes.reshape(-1, n_image_sets)
plots = np.zeros((B, n_image_sets), dtype=np.object)
for i in range(B):
if labels is not None:
axes[i, 0].set_title(str(labels[i]))
for j in range(n_image_sets):
ax = axes[i, j]
ax.set_axis_off()
plots[i, j] = ax.imshow(np.squeeze(all_images[j][i, 0]))
plt.subplots_adjust(top=0.95, bottom=0, left=0, right=1, wspace=0.1, hspace=0.1)
def func(t):
for i in range(B):
for j in range(n_image_sets):
plots[i, j].set_array(np.squeeze(all_images[j][i, t]))
anim = animation.FuncAnimation(fig, func, frames=T, interval=interval)
if path is not None:
anim.save(path, writer='imagemagick')
return fig, _axes, anim
def square_subplots(N, n_repeats=1, repeat_horizontal=True, **kwargs):
sqrt_N = int(np.ceil(np.sqrt(N)))
m = int(np.ceil(N / sqrt_N))
import matplotlib.pyplot as plt
if repeat_horizontal:
fig, axes = plt.subplots(m, sqrt_N*n_repeats, **kwargs)
else:
fig, axes = plt.subplots(m*n_repeats, sqrt_N, **kwargs)
return fig, axes
def nvidia_smi(robust=True):
try:
p = subprocess.run("nvidia-smi".split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.stdout.decode()
except Exception as e:
if robust:
return "Exception while calling nvidia-smi: {}".format(e)
else:
raise
_nvidia_smi_processes_header = "| GPU PID Type Process name Usage |"
_nvidia_smi_table_end = "+-----------------------------------------------------------------------------+"
def _nvidia_smi_parse_processes(s):
lines = s.split('\n')
header_idx = None
table_end_idx = None
for i, line in enumerate(lines):
if line == _nvidia_smi_processes_header:
header_idx = i
elif header_idx is not None and line == _nvidia_smi_table_end:
table_end_idx = i
assert header_idx is not None, "Malformed nvidia-smi string:\n{}".format(s)
assert table_end_idx is not None, "Malformed nvidia-smi string:\n{}".format(s)
if lines[header_idx+2].startswith('| No running processes found'):
return []
processes = []
for line in lines[header_idx+2:table_end_idx]:
tokens = line.split()
gpu_idx = int(tokens[1])
pid = int(tokens[2])
type = tokens[3]
process_name = tokens[4]
memory_usage = tokens[5]
memory_usage_mb = int(memory_usage[:-3])
processes.append((gpu_idx, pid, type, process_name, memory_usage_mb))
return processes
def gpu_memory_usage():
""" return gpu memory usage for current process in MB """
try:
s = nvidia_smi(robust=False)
except Exception:
return 0
gpu_processes = _nvidia_smi_parse_processes(s)
my_pid = os.getpid()
my_memory_usage_mb = 0
for gpu_idx, pid, type, process_name, memory_usage_mb in gpu_processes:
if pid == my_pid:
my_memory_usage_mb += memory_usage_mb
return my_memory_usage_mb
def view_readme_cl():
return view_readme(".", 2)
def view_readme(path, max_depth):
""" View readme files in a directory of experiments, sorted by the time at
which the experiment began execution.
"""
import iso8601
command = "find {} -maxdepth {} -name README.md".format(path, max_depth).split()
p = subprocess.run(command, stdout=subprocess.PIPE)
readme_paths = [r for r in p.stdout.decode().split('\n') if r]
dates_paths = []
for r in readme_paths:
d = os.path.split(r)[0]
try:
with open(os.path.join(d, 'stdout'), 'r') as f:
line = ''
try:
while not line.startswith("Starting training run"):
line = next(f)
except StopIteration:
line = None
if line is not None:
tokens = line.split()
assert len(tokens) == 13
dt = iso8601.parse_date(tokens[5] + " " + tokens[6][:-1])
dates_paths.append((dt, r))
else:
raise Exception()
except Exception:
print("Omitting {} which has no valid `stdout` file.".format(r))
_sorted = sorted(dates_paths)
for d, r in _sorted:
print("\n" + "-" * 80 + "\n\n" + "====> {} <====".format(r))
print("Experiment started on {}\n".format(d))
with open(r, 'r') as f:
print(f.read())
def confidence_interval(data, coverage):
from scipy import stats
return stats.t.interval(
coverage, len(data)-1, loc=np.mean(data), scale=stats.sem(data))
def standard_error(data):
from scipy import stats
return stats.sem(data)
def zip_root(zipfile):
""" Get the name of the root directory inside a zip file, if it has one. """
if not isinstance(zipfile, ZipFile):
zipfile = ZipFile(zipfile, 'r')
zip_root = min(
(z.filename for z in zipfile.infolist()),
key=lambda s: len(s))
if zip_root.endswith('/'):
zip_root = zip_root[:-1]
return zip_root
def get_param_hash(d, name_params=None):
if not name_params:
name_params = d.keys()
param_str = []
for name in sorted(name_params):
value = d[name]
if callable(value):
value = inspect.getsource(value)
param_str.append("{}={}".format(name, value))
param_str = "_".join(param_str)
param_hash = hashlib.sha1(param_str.encode()).hexdigest()
return param_hash
CLEAR_CACHE = False
def set_clear_cache(value):
""" If called with True, then whenever `sha_cache` function is instantiated, it will ignore
any cache saved to disk, and instead just call the function as normal, saving the results
as the new cache value. """
global CLEAR_CACHE
CLEAR_CACHE = value
def sha_cache(directory, recurse=False, verbose=False):
os.makedirs(directory, exist_ok=True)
def _print(s, verbose=verbose):
if verbose:
print("sha_cache: {}" .format(s))
def decorator(func):
sig = inspect.signature(func)
def new_f(*args, **kwargs):
bound_args = sig.bind(*args, **kwargs)
param_hash = get_param_hash(bound_args.arguments)
filename = os.path.join(directory, "{}_{}.cache".format(func.__name__, param_hash))
loaded = False
try:
if not CLEAR_CACHE:
_print("Attempting to load...")
with open(filename, 'rb') as f:
value = dill.load(f)
loaded = True
_print("Loaded successfully.")
except FileNotFoundError:
_print("File not found.")
pass
finally:
if not loaded:
_print("Calling function...")
value = func(**bound_args.arguments)
_print("Saving results...")
with open(filename, 'wb') as f:
dill.dump(value, f, protocol=dill.HIGHEST_PROTOCOL, recurse=recurse)
return value
return new_f
return decorator
def _run_cmd(cmd):
if isinstance(cmd, str):
cmd = cmd.split()
return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode()
def find_git_directories():
all_packages = pip_freeze()
all_packages = all_packages.split('\n')
git_packages = [p.split('=')[-1] for p in all_packages if p.startswith('-e git+')]
version_controlled_dirs = set()
for p in git_packages:
package = importlib.import_module(p)
directory = os.path.dirname(package.__file__)
# Check whether any ancestor directory contains a .git directory
while directory:
git_dir = os.path.join(directory, '.git')
if os.path.isdir(git_dir):
version_controlled_dirs.add(directory)
break
directory = os.path.dirname(directory)
return sorted(version_controlled_dirs)
def summarize_git_repo(directory, n_logs=10, diff=False):
s = []
with cd(directory):
s.append("*" * 40)
s.append("git summary for directory {}\n".format(directory))
s.append("log:\n")
log = _run_cmd('git log -n {}'.format(n_logs))
s.append(log)
s.append("\nstatus:\n")
status = _run_cmd('git status --porcelain')
s.append(status)
s.append("\ndiff:\n")
if diff:
diff = _run_cmd('git diff HEAD')
s.append(diff)
else:
s.append("<ommitted>")
s.append("\nEnd of git summary for directory {}".format(directory))
s.append("*" * 40 + "\n")
return '\n'.join(s)
def summarize_git_repos(**summary_kwargs):
s = []
git_dirs = find_git_directories()
for git_dir in git_dirs:
git_summary = summarize_git_repo(git_dir, **summary_kwargs)
s.append(git_summary)
return '\n'.join(s)
def pip_freeze(**kwargs):
return _run_cmd('pip freeze')
def one_hot(indices, depth):
array = np.zeros(indices.shape + (depth,))
batch_indices = np.unravel_index(range(indices.size), indices.shape)
array[batch_indices + (indices.flatten(),)] = 1.0
return array
@contextmanager
def remove(filenames):
try:
yield
finally:
if isinstance(filenames, str):
filenames = filenames.split()
for fn in filenames:
try:
shutil.rmtree(fn)
except NotADirectoryError:
os.remove(fn)
except FileNotFoundError:
pass
@contextmanager
def modify_env(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place.
The ``os.environ`` dictionary is updated in-place so that the modification
is sure to work in all situations.
:param remove: Environment variables to remove.
:param update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
def make_symlink(target, name):
""" NB: ``target`` is just used as a simple string when creating
the link. That is, ``target`` is the location of the file we want
to point to, relative to the location that the link resides.
It is not the case that the target file is identified, and then
some kind of smart process occurs to have the link point to that file.
"""
try:
os.remove(name)
except OSError:
pass
os.symlink(target, name)
class ExperimentStore(object):
""" Stores a collection of experiments. Each new experiment is assigned a fresh sub-path. """
def __init__(self, path, prefix='exp', max_experiments=None, delete_old=False):
self.path = os.path.abspath(str(path))
assert prefix, "prefix cannot be empty"
self.prefix = prefix
self.max_experiments = max_experiments
self.delete_old = delete_old
os.makedirs(os.path.realpath(self.path), exist_ok=True)
def new_experiment(self, name, seed, data=None, add_date=False, force_fresh=True, update_latest=True):
""" Create a directory for a new experiment. """
assert seed is not None and seed >= 0 and seed < np.iinfo(np.int32).max and isinstance(seed, int)
if self.max_experiments is not None:
experiments = os.listdir(self.path)
n_experiments = len(experiments)
if n_experiments >= self.max_experiments:
if self.delete_old:
paths = [
os.path.join(self.path, p) for p in experiments
if p.startswith(self.prefix)]
sorted_by_modtime = sorted(
paths, key=lambda x: os.stat(x).st_mtime, reverse=True)
for p in sorted_by_modtime[self.max_experiments-1:]:
print("Deleting old experiment directory {}.".format(p))
try:
shutil.rmtree(p)
except NotADirectoryError:
os.remove(p)
else:
raise Exception(
"Too many experiments (greater than {}) in "
"directory {}.".format(self.max_experiments, self.path))
data = data or {}
config_dict = data.copy()
config_dict['seed'] = str(seed)
filename = make_filename(
self.prefix + '_' + name, add_date=add_date, config_dict=config_dict)
if update_latest:
make_symlink(filename, os.path.join(self.path, 'latest'))
return ExperimentDirectory(os.path.join(self.path, filename), force_fresh=force_fresh)
def __str__(self):
return "ExperimentStore({})".format(self.path)
def __repr__(self):
return str(self)
def experiment_finished(self, exp_dir, success):
dest_name = 'complete' if success else 'incomplete'
dest_path = os.path.join(self.path, dest_name)
os.makedirs(dest_path, exist_ok=True)
shutil.move(exp_dir.path, dest_path)
exp_dir.path = os.path.join(dest_path, os.path.basename(exp_dir.path))
def _checked_makedirs(directory, force_fresh):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST or force_fresh:
raise
except FileExistsError:
if force_fresh:
raise
class ExperimentDirectory(object):
""" Wraps a directory storing data related to an experiment. """
def __init__(self, path, force_fresh=False):
self.path = path
_checked_makedirs(path, force_fresh)
def path_for(self, *path, is_dir=False):
""" Get path for a file, creating necessary subdirs. """
path = os.path.join(*path)
if is_dir:
filename = ""
else:
path, filename = os.path.split(path)
full_path = self.make_directory(path)
return os.path.join(full_path, filename)
def make_directory(self, path, exist_ok=True):
full_path = os.path.join(self.path, path)
os.makedirs(full_path, exist_ok=exist_ok)
return full_path
def record_environment(self, config=None, dill_recurse=False, git_diff=True):
with open(self.path_for('context/git_summary.txt'), 'w') as f:
f.write(summarize_git_repos(diff=git_diff))
uname_path = self.path_for("context/uname.txt")
subprocess.run("uname -a > {}".format(uname_path), shell=True)
lscpu_path = self.path_for("context/lscpu.txt")
subprocess.run("lscpu > {}".format(lscpu_path), shell=True)
environ = {k.decode(): v.decode() for k, v in os.environ._data.items()}
with open(self.path_for('context/os_environ.txt'), 'w') as f:
f.write(pformat(environ))
pip = pip_freeze()
with open(self.path_for('context/pip_freeze.txt'), 'w') as f:
f.write(pip)
if config is not None:
with open(self.path_for('config.pkl'), 'wb') as f:
dill.dump(config, f, protocol=dill.HIGHEST_PROTOCOL, recurse=dill_recurse)
with open(self.path_for('config.json'), 'w') as f:
json.dump(config.freeze(), f, default=str, indent=4, sort_keys=True)
@property
def host(self):
try:
with open(self.path_for('context/uname.txt'), 'r') as f:
return f.read().split()[1]
except FileNotFoundError:
with open(self.path_for('uname.txt'), 'r') as f:
return f.read().split()[1]
def edit_text(prefix=None, editor="vim", initial_text=None):
if editor != "vim":
raise Exception("NotImplemented")
with tempfile.NamedTemporaryFile(mode='w',
prefix='',
suffix='.md',
delete=False) as temp_file:
pass
try:
if initial_text:
with open(temp_file.name, 'w') as f:
f.write(initial_text)
subprocess.call(['vim', '+', str(temp_file.name)])
with open(temp_file.name, 'r') as f:
text = f.read()
finally:
try:
os.remove(temp_file.name)
except FileNotFoundError:
pass
return text
class Tee(object):
""" A stream that outputs to multiple streams.
Does not close its streams; leaves responsibility for that with the caller.
"""
def __init__(self, *streams):
self.streams = streams
def write(self, data):
for s in self.streams:
s.write(data)
def flush(self):
for s in self.streams:
s.flush()
def fileno(self):
for s in self.streams:
if hasattr(s, "fileno"):
return s.fileno()
@contextmanager
def redirect_stream(stream_name, filename, mode='w', tee=False, **kwargs):
assert stream_name in ['stdout', 'stderr']
with open(str(filename), mode=mode, **kwargs) as f:
old = getattr(sys, stream_name)
new = f
if tee:
new = Tee(f, old)
setattr(sys, stream_name, new)
try:
yield
except BaseException:
exc = traceback.format_exc()
f.write(exc)
raise
finally:
setattr(sys, stream_name, old)
def make_filename(main_title, directory='', config_dict=None, add_date=True,
sep='_', kvsep='=', extension='', omit=[]):
""" Create a filename.
Parameters
----------
main_title: string
The main title for the file.
directory: string
The directory to write the file to.
config_dict: dict
Keys and values that will be added to the filename. Key/value
pairs are put into the filename by the alphabetical order of the keys.
add_date: boolean
Whether to append the current date/time to the filename.
sep: string
Separates items in the config dict in the returned filename.
kvsep: string
Separates keys from values in the returned filename.
extension: string
Appears at end of filename.
"""
if config_dict is None:
config_dict = {}
if directory and directory[-1] != '/':
directory += '/'
labels = [directory + main_title]
key_vals = list(config_dict.items())
key_vals.sort(key=lambda x: x[0])
for key, value in key_vals:
if not isinstance(key, str):
raise ValueError("keys in config_dict must be strings.")
if not isinstance(value, str):
raise ValueError("values in config_dict must be strings.")
if not str(key) in omit:
labels.append(kvsep.join([key, value]))
if add_date:
date_time_string = str(datetime.datetime.now()).split('.')[0]
for c in ": -":
date_time_string = date_time_string.replace(c, '_')
labels.append(date_time_string)
file_name = sep.join(labels)
if extension:
if extension[0] != '.':
extension = '.' + extension
file_name += extension
return file_name
def parse_timedelta(d, fmt='%a %b %d %H:%M:%S %Z %Y'):
date = parse_date(d, fmt)
return date - datetime.datetime.now()
def parse_date(d, fmt='%a %b %d %H:%M:%S %Z %Y'):
# default value for `fmt` is default format used by GNU `date`
with open(os.devnull, 'w') as devnull:
# A quick hack since just using the first option was causing weird things to happen, fix later.
if " " in d:
dstr = subprocess.check_output(["date", "-d", d], stderr=devnull)
else:
dstr = subprocess.check_output("date -d {}".format(d).split(), stderr=devnull)
dstr = dstr.decode().strip()
return datetime.datetime.strptime(dstr, fmt)
@contextmanager
def cd(path):
""" A context manager that changes into given directory on __enter__,
change back to original_file directory on exit. Exception safe.
"""
path = str(path)
old_dir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_dir)
@contextmanager
def memory_limit(mb):
""" Limit the physical memory available to the process. """
rsrc = resource.RLIMIT_DATA
prev_soft_limit, hard = resource.getrlimit(rsrc)
resource.setrlimit(rsrc, (int(mb) * 1024**2, hard))
yield
resource.setrlimit(rsrc, (prev_soft_limit, hard))
def memory_usage(physical=False):
""" return memory usage for current process in MB """
process = psutil.Process(os.getpid())
info = process.memory_info()
if physical:
return info.rss / float(2 ** 20)
else:
return info.vms / float(2 ** 20)
# Character used for ascii art, sorted in order of increasing sparsity
ascii_art_chars = \
"$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/|()1{}[]?-_+~<>i!lI;:,\"^`'. "
def char_map(value):
""" Maps a relative "sparsity" or "lightness" value in [0, 1) to a character. """
if value >= 1:
value = 1 - 1e-6
n_bins = len(ascii_art_chars)
bin_id = int(value * n_bins)
return ascii_art_chars[bin_id]
def plt_to_img():
with tempfile.TemporaryFile() as fp:
plt.savefig(fp, format='png', bbox_inches='tight')
fp.seek(0)
img = imageio.imread(fp)
plt.close('all')
gc.collect()
return img
def image_to_string(array):
""" Convert an image stored as an array to an ascii art string """
if array.ndim == 3:
array = array.mean(-1)
if array.ndim == 1:
array = array.reshape(-1, int(np.sqrt(array.shape[0])))
if not np.isclose(array.max(), 0.0):
array = array / array.max()
image = [char_map(value) for value in array.flatten()]
image = np.reshape(image, array.shape)
return '\n'.join(''.join(c for c in row) for row in image)
def shift_fill(a, n, axis=0, fill=0.0, reverse=False):
""" shift n spaces backward along axis, filling rest in with 0's. if n is negative, shifts forward. """
shifted = np.roll(a, n, axis=axis)
shifted[:n, ...] = fill
return shifted
def gen_seed():
return np.random.randint(np.iinfo(np.int32).max)
class DataContainer(object):
def __init__(self, X, Y):
assert len(X) == len(Y)
self.X, self.Y = X, Y
def get_random(self):
idx = np.random.randint(len(self.X))
return self.X[idx], self.Y[idx]
def get_random_with_label(self, label):
valid = self.Y == label
X = self.X[valid.flatten(), :]
Y = self.Y[valid]
idx = np.random.randint(len(X))
return X[idx], Y[idx]
def get_random_without_label(self, label):
valid = self.Y != label
X = self.X[valid.flatten(), :]
Y = self.Y[valid]
idx = np.random.randint(len(X))
return X[idx], Y[idx]
def digits_to_numbers(digits, base=10, axis=-1, keepdims=False):
""" Convert array of digits to number, assumes little-endian (least-significant first). """
mult = base ** np.arange(digits.shape[axis])
shape = [1] * digits.ndim
shape[axis] = mult.shape[axis]
mult = mult.reshape(shape)
return (digits * mult).sum(axis=axis, keepdims=keepdims)
def numbers_to_digits(numbers, n_digits, base=10):
""" Convert number to array of digits, assumed little-endian. """
numbers = numbers.copy()
digits = []
for i in range(n_digits):
digits.append(numbers % base)
numbers //= base
return np.stack(digits, -1)
NotSupplied = object()
class Param(object):
def __init__(self, default=NotSupplied, aliases=None, help="", type=None):
""" aliases are different ways to fill the value (i.e. from config or kwargs),
but should not be used to access the value as a class attribute. """
self.default = default
if isinstance(aliases, str):
aliases = aliases.split()
self.aliases = aliases or []
self.help = help
self.type = type
class Parameterized(object):
""" An object that can have `Param` class attributes. These class attributes will be
turned into instance attributes at instance creation time. To set a value for the instance
attributes, we perform the following checks (the first value that is found is used):
1. check the kwargs passed into class constructor for a value with key "<param-name>"
2. check dps.cfg for values with keys of the form "<class-name>:<param-name>". `class-name`
can be the class of the object or any base class thereof. More derived/specific classes
will be checked first (specifically, we iterate through classes in order of the MRO).
3. check dps.cfg for values with name "<param-name>"
4. fallback on the param's default value, if one was supplied.
If no value is found by this point, an AttributeError is raised.
A note on deep copies: at instance creation time, we store values for all the parameters.
Deep copies are created by creating a new object using those creation-time parameter values.
"""
_resolved = False
def __new__(cls, *args, **kwargs):
obj = super(Parameterized, cls).__new__(cls)
obj._resolve_params(**kwargs)
# Stored for copying purposes, to get parameter as they are before __init__ is called.
obj._params_at_creation_time = obj.param_values()
return obj
def __init__(self, *args, **kwargs):
pass
def __str__(self):
return "{}(\n{}\n)".format(self.__class__.__name__, pformat(self.param_values()))
@classmethod
def _get_param_value(cls, name, param, kwargs):
aliases = list([name] + param.aliases)
# Check kwargs
for alias in aliases:
value = kwargs.get(alias, NotSupplied)
if value is not NotSupplied:
return value
# Check cfg with class name label
for _cls in cls.__mro__:
for alias in aliases:
key = _cls.__name__ + ":" + alias
value = getattr(dps.cfg, key, NotSupplied)
if value is not NotSupplied:
return value
# Check cfg
for alias in aliases:
value = getattr(dps.cfg, alias, NotSupplied)
if value is not NotSupplied:
return value
# Try the default value
if value is NotSupplied:
if param.default is not NotSupplied:
return param.default
else:
raise AttributeError(
"Could not find value for parameter `{}` for class `{}` "
"in either kwargs or config, and no default was provided.".format(
name, cls.__name__))
def _resolve_params(self, **kwargs):
if not self._resolved:
for k, v in self._capture_param_values(**kwargs).items():
setattr(self, k, v)
self._resolved = True
@classmethod
def _capture_param_values(cls, **kwargs):
""" Return the params that would be created if an object of the
current class were constructed in the current context with the given kwargs. """
param_values = dict()
for name in cls.param_names():
param = getattr(cls, name)
value = cls._get_param_value(name, param, kwargs)
if param.type is not None:
value = param.type(value)
param_values[name] = value
return param_values
@classmethod
def param_names(cls):
params = []
for p in dir(cls):
try:
if p != 'params' and isinstance(getattr(cls, p), Param):
params.append(p)
except Exception:
pass
return params
def param_values(self):
if not self._resolved:
raise Exception("Parameters have not yet been resolved.")
return {n: getattr(self, n) for n in self.param_names()}
def __deepcopy__(self, memo):
cls = self.__class__
kwargs = self._params_at_creation_time
result = cls.__new__(cls, **kwargs)
result.__init__(**kwargs)
memo[id(self)] = result
return result
def du(path):
"""disk usage in human readable format (e.g. '2,1GB')"""
return subprocess.check_output(['du', '-sh', str(path)]).split()[0].decode('utf-8')
class pdb_postmortem:
def __init__(self, do_it=True):
self.do_it = do_it
def __enter__(self):
pass
def __exit__(self, type_, value, tb):
if self.do_it and type_:
traceback.print_exc()
pdb.post_mortem(tb)
return True
def camel_to_snake(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def process_path(path, real_path=False):
path = os.path.expandvars(os.path.expanduser(str(path)))
if real_path:
path = os.path.realpath(path)
return path
def path_stem(path):
no_ext = os.path.splitext(path)[0]
return os.path.basename(no_ext)
@contextmanager
def catch(exception_types, action=None):
""" A try-except block as a context manager. """
try:
yield
except exception_types as e:
if isinstance(action, str):
print(action)
elif action:
action(e)
class Alarm(BaseException):
pass
def raise_alarm(*args, **kwargs):
raise Alarm("Raised by `raise_alarm`.")
class time_limit(object):
""" Example use:
with time_limit(seconds=5) as tl:
while True:
pass
if tl.ran_out:
print("Time ran out.")
"""
_stack = []
def __init__(self, seconds, verbose=False, timeout_callback=None):
self.seconds = seconds
self.verbose = verbose
self.ran_out = False
self.timeout_callback = timeout_callback
def __str__(self):
return (
"time_limit(seconds={}, verbose={}, ran_out={}, "
"timeout_callback={})".format(
self.seconds, self.verbose, self.ran_out, self.timeout_callback))
def __enter__(self):
if time_limit._stack:
raise Exception(
"Only one instance of `time_limit` may be active at once. "
"Another time_limit instance {} was already active.".format(
time_limit._stack[0]))
self.old_handler = signal.signal(signal.SIGALRM, raise_alarm)
if self.seconds <= 0:
raise_alarm("Didn't get started.")
if not np.isinf(self.seconds):
signal.alarm(int(np.floor(self.seconds)))
self.then = time.time()
time_limit._stack.append(self)
return self
def __exit__(self, exc_type, exc, exc_tb):
self.elapsed_time = time.time() - self.then
signal.signal(signal.SIGALRM, self.old_handler)
time_limit._stack.pop()
if exc_type is Alarm:
self.ran_out = True
if self.verbose:
print("Block ran for {} seconds (limit was {}).".format(
self.elapsed_time, self.seconds))
if self.timeout_callback:
self.timeout_callback(self)
return True
else:
signal.alarm(0) # Cancel the alarm.
return False
def timed_func(func):
@wraps(func)
def f(*args, **kwargs):
with timed_block(func.__name__):
return func(*args, **kwargs)
return f
@contextmanager
def timed_block(name=None):
if name is None:
frame = inspect.stack()[1]
name = "{}:{}".format(frame.filename, frame.lineno)
start_time = time.time()
yield
print("Call to block <{}> took {} seconds.".format(name, time.time() - start_time))
# From py.test
class KeywordMapping(object):
""" Provides a local mapping for keywords.
Can be used to implement user-friendly name selection
using boolean expressions.
names=[orange], pattern = "ora and e" -> True
names=[orange], pattern = "orang" -> True
names=[orange], pattern = "orane" -> False
names=[orange], pattern = "ora and z" -> False
names=[orange], pattern = "ora or z" -> True
Given a list of names, map any string that is a substring
of one of those names to True.
``names`` are the things we are trying to select, ``pattern``
is the thing we are using to select them. Note that supplying
multiple names does not mean "apply the pattern to each one
separately". Rather, we are selecting the list as a whole,
which doesn't seem that useful. The different names should be
thought of as different names for a single object.
"""
def __init__(self, names):
self._names = names
def __getitem__(self, subname):
if subname == "_":
return True
for name in self._names:
if subname in name:
return True
return False
def eval(self, pattern):
return eval(pattern, {}, self)
@staticmethod
def batch(batch, pattern):
""" Apply a single pattern to a batch of names. """
return [KeywordMapping([b]).eval(pattern) for b in batch]
class SigTerm(Exception):
pass
class NumpySeed(object):
def __init__(self, seed):
if seed < 0:
seed = None
self.seed = seed
self.state = None
def __enter__(self):
self.state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.state)
class _bool(object):
def __new__(cls, val):
if val in ("0", "False", "F", "false", "f"):
return False
return bool(val)
def popleft(l, default=None):
if default is not None:
try:
return l.popleft()
except IndexError:
return default
else:
return l.popleft()
def nested_update(d, other):
if not isinstance(d, dict) or not isinstance(other, dict):
return
for k, v in other.items():
if k in d and isinstance(d[k], dict) and isinstance(v, dict):
nested_update(d[k], v)
else:
d[k] = v
class Config(dict, MutableMapping):
""" Note: multi-level setting will succeed more often with __setitem__ than __setattr__.
This doesn't work:
c = Config()
c.a.b = 1
But this does:
c = Config()
c["a:b"] = 1
"""
_reserved_keys = None
def __init__(self, _d=None, **kwargs):
if _d:
self.update(_d)
self.update(kwargs)
def flatten(self):
return {k: self[k] for k in self._keys()}
def _keys(self, sep=":"):
stack = [iter(dict.items(self))]
key_prefix = ''
while stack:
new = next(stack[-1], None)
if new is None:
stack.pop()
key_prefix = key_prefix.rpartition(sep)[0]
continue
key, value = new
nested_key = key_prefix + sep + key
if isinstance(value, dict) and value:
stack.append(iter(value.items()))
key_prefix = nested_key
else:
yield nested_key[1:]
def __iter__(self):
return self._keys()
def keys(self):
return MutableMapping.keys(self)
def values(self):
return MutableMapping.values(self)
def items(self):
return MutableMapping.items(self)
def __str__(self):
items = {k: v for k, v in dict.items(self)}
s = "{}(\n{}\n)".format(self.__class__.__name__, pformat(items))
return s
def __repr__(self):
return str(self)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def __getitem__(self, key):
assert isinstance(key, str), "`Config` keys must be strings."
if ':' in key:
keys = key.split(':')
value = self
for k in keys:
try:
value = value[k]
except Exception:
try:
value = value[int(k)]
except Exception:
raise KeyError(
"Calling __getitem__ with key {} failed at component {}.".format(key, k))
return value
else:
return super(Config, self).__getitem__(key)
def __setitem__(self, key, value):
assert isinstance(key, str), "`Config` keys must be strings."
if ':' in key:
keys = key.split(':')
to_set = self
for k in keys[:-1]:
nxt = None
try:
nxt = to_set[k]
except KeyError:
try:
nxt = to_set[int(k)]
except Exception:
pass
if not isinstance(nxt, (list, dict)):
nxt = self.__class__()
to_set[k] = nxt
to_set = nxt
try:
to_set[keys[-1]] = value
except Exception:
to_set[int(keys[-1])] = value
else:
self._validate_key(key)
return super(Config, self).__setitem__(key, value)
def __delitem__(self, key):
assert isinstance(key, str), "`Config` keys must be strings."
if ':' in key:
keys = key.split(':')
to_del = self
for k in keys[:-1]:
try:
to_del = to_del[k]
except Exception:
try:
to_del = to_del[int(k)]
except Exception:
raise KeyError("Calling __getitem__ with key {} failed at component {}.".format(key, k))
try:
del to_del[keys[-1]]
except Exception:
try:
to_del = to_del[int(k)]
except Exception:
raise KeyError("Calling __getitem__ with key {} failed at component {}.".format(key, k))
else:
return super(Config, self).__delitem__(key)
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("Could not find attribute called `{}`.".format(key))
def __setattr__(self, key, value):
if key == '_reserved_keys':
super(Config, self).__setattr__(key, value)
else:
self[key] = value
def __enter__(self):
ConfigStack._stack.append(self)
def __exit__(self, exc_type, exc, exc_tb):
popped = ConfigStack._stack.pop()
assert popped == self, "Something is wrong with the config stack."
return False
def _validate_key(self, key):
msg = "Bad key for config: `{}`.".format(key)
assert isinstance(key, str), msg
assert not key.startswith('_'), msg
assert key not in self._reserved_keys, msg
def copy(self, _d=None, **kwargs):
""" Copy and update at the same time. """
new = copy.deepcopy(self)
if _d:
new.update(_d)
new.update(**kwargs)
return new
def update(self, _d=None, **kwargs):
nested_update(self, _d)
nested_update(self, kwargs)
def update_from_command_line(self, strict=True):
cl_args = clify.wrap_object(self, strict=strict).parse()
self.update(cl_args)
def freeze(self, remove_callable=False):
_config = Config()
for key in self.keys():
value = self[key]
if remove_callable and callable(value):
value = str(value)
_config[key] = value
return _config
Config._reserved_keys = dir(Config)
def update_scratch_dir(config, new_scratch_dir):
def fixup_dir(name):
attr_name = name + "_dir"
dir_name = os.path.join(new_scratch_dir, name)
dir_name = process_path(dir_name)
setattr(config, attr_name, dir_name)
os.makedirs(dir_name, exist_ok=True)
fixup_dir("data")
fixup_dir("model")
fixup_dir("local_experiments")
fixup_dir("parallel_experiments_build")
fixup_dir("parallel_experiments_run")
config_template = """
config = dict(
start_tensorboard=True,
tbport=6006,
reload_interval=10,
show_plots=False,
verbose=False,
use_gpu=False,
per_process_gpu_memory_fraction=0,
gpu_allow_growth=True,
parallel_exe="$HOME/.local/bin/parallel",
scratch_dir="{scratch_dir}",
slurm_preamble='''
export OMP_NUM_THREADS=1
module purge
module load python/3.6.3
module load scipy-stack
source "$VIRTUALENVWRAPPER_BIN"/virtualenvwrapper.sh
workon her_curriculum''',
ssh_hosts=(
["ecrawf6@lab1-{{}}.cs.mcgill.ca".format(i+1) for i in range(16)]
+ ["ecrawf6@lab2-{{}}.cs.mcgill.ca".format(i+1) for i in range(51)]
+ ["ecrawf6@cs-{{}}.cs.mcgill.ca".format(i+1) for i in range(32)]
),
ssh_options=(
"-oPasswordAuthentication=no "
"-oStrictHostKeyChecking=no "
"-oConnectTimeout=5 "
"-oServerAliveInterval=2"
),
)
"""
def _load_system_config(key=None):
home = os.getenv("HOME")
config_dir = os.path.join(home, ".config")
config_loc = os.path.join(config_dir, "dps_config.py")
if not os.path.exists(config_loc):
print("Creating config at {}...".format(config_loc))
default_scratch_dir = os.path.join(home, "dps_data")
scratch_dir = input("Enter a location to create a scratch directory for dps "
"(for saving experiment results, cached datasets, etc.). "
"Leave blank to accept the default of '{}'.\n".format(default_scratch_dir))
scratch_dir = process_path(scratch_dir) or default_scratch_dir
config = config_template.format(scratch_dir=scratch_dir)
with open(config_loc, "w") as f:
f.write(config)
config_module_spec = importlib.util.spec_from_file_location("dps_config", config_loc)
config_module = config_module_spec.loader.load_module()
config = Config(**config_module.config)
def fixup_dir(name):
attr_name = name + "_dir"
dir_name = getattr(config, attr_name, None)
if dir_name is None:
dir_name = os.path.join(config.scratch_dir, name)
dir_name = process_path(dir_name)
setattr(config, attr_name, dir_name)
os.makedirs(dir_name, exist_ok=True)
fixup_dir("data")
fixup_dir("model")
fixup_dir("local_experiments")
fixup_dir("parallel_experiments_build")
fixup_dir("parallel_experiments_run")
return config
SYSTEM_CONFIG = _load_system_config()
class ClearConfig(Config):
def __init__(self, _d=None, **kwargs):
config = _load_system_config()
if _d:
config.update(_d)
config.update(kwargs)
super().__init__(**config)
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class ConfigStack(dict, metaclass=Singleton):
_stack = []
@property
def config_sequence(self):
""" Get all configs up the the first occurence of an instance of ClearConfig """
stack = ConfigStack._stack[::-1]
for i, config in enumerate(stack):
if isinstance(config, ClearConfig):
return stack[:i+1]
return stack
def clear_stack(self, default=NotSupplied):
self._stack.clear()
if default is not None:
if default is NotSupplied:
self._stack.append(SYSTEM_CONFIG.copy())
else:
self._stack.append(default)
def __str__(self):
return self.to_string(hidden=True)
def __repr__(self):
return str(self)
def to_string(self, hidden=False):
s = []
seen_keys = set()
reverse_stack = self._stack[::-1]
visible_keys = [set() for config in reverse_stack]
cleared = False
for vk, config in zip(visible_keys, reverse_stack):
if not cleared:
for key in config.keys():
if key not in seen_keys:
vk.add(key)
seen_keys.add(key)
if isinstance(config, ClearConfig):
cleared = True
for i, (vk, config) in enumerate(zip(visible_keys[::-1], reverse_stack[::-1])):
visible_items = {k: v for k, v in config.items() if k in vk}
if hidden:
hidden_items = {k: v for k, v in config.items() if k not in vk}
_s = "# {}: <{} -\nVISIBLE:\n{}\nHIDDEN:\n{}\n>".format(
i, config.__class__.__name__,
pformat(visible_items), pformat(hidden_items))
else:
_s = "# {}: <{} -\n{}\n>".format(i, config.__class__.__name__, pformat(visible_items))
s.append(_s)
s = '\n'.join(s)
return "<{} -\n{}\n>".format(self.__class__.__name__, s)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __iter__(self):
return iter(self._keys())
def _keys(self):
keys = set()
for config in self.config_sequence:
keys |= config.keys()
return list(keys)
def keys(self):
return MutableMapping.keys(self)
def values(self):
return MutableMapping.values(self)
def items(self):
return MutableMapping.items(self)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def __getitem__(self, key):
for config in self.config_sequence:
if key in config:
return config[key]
raise KeyError("Cannot find a value for key `{}`".format(key))
def __setitem__(self, key, value):
self._stack[-1][key] = value
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("No attribute named `{}`.".format(key))
def __setattr__(self, key, value):
setattr(self._stack[-1], key, value)
def update(self, *args, **kwargs):
self._stack[-1].update(*args, **kwargs)
def freeze(self, remove_callable=False):
_config = Config()
for key in self.keys():
value = self[key]
if remove_callable and callable(value):
value = str(value)
_config[key] = value
return _config
def update_from_command_line(self, strict=True):
cl_args = clify.wrap_object(self, strict=strict).parse()
self.update(cl_args)
def restart_tensorboard(logdir, port=6006, reload_interval=120):
sp = subprocess
print("Killing old tensorboard process...")
try:
command = "fuser {}/tcp -k".format(port)
sp.run(command.split(), stdout=sp.DEVNULL, stderr=sp.DEVNULL)
except sp.CalledProcessError as e:
print("Killing tensorboard failed:")
print(e.output)
print("Restarting tensorboard process...")
command = "tensorboard --logdir={} --port={} --reload_interval={}".format(logdir, port, reload_interval)
print(command)
sp.Popen(command.split(), stdout=sp.DEVNULL, stderr=sp.DEVNULL)
print("Done restarting tensorboard.\n")
def map_structure(func, *args, is_leaf):
if is_leaf(args[0]):
return func(*args)
else:
if isinstance(args[0], dict):
arg_keys = [a.keys() for a in args]
assert all(keys == arg_keys[0] for keys in arg_keys), (
"Arguments do not have same structure: {}.".format(arg_keys))
new_dict = {
k: map_structure(func, *[a[k] for a in args], is_leaf=is_leaf)
for k in args[0]}
return type(args[0])(new_dict)
else:
arg_lens = [len(a) for a in args]
assert all(np.array(arg_lens) == arg_lens[0]), (
"Arguments do not have same structure: {} ".format(arg_lens))
new_list = [map_structure(func, *[a[i] for a in args], is_leaf=is_leaf)
for i in range(arg_lens[0])]
return type(args[0])(new_list)
def test_map_structure():
a = dict(a=[1, 2], b=3)
b = dict(a=[3, 4], b=10)
result = map_structure(lambda x, y: x + y, a, b, is_leaf=lambda x: isinstance(x, int))
assert tuple(result["a"]) == (4, 6)
assert result["b"] == 13
result = map_structure(lambda *x: None, a, is_leaf=lambda x: isinstance(x, int))
assert tuple(result["a"]) == (None, None)
assert result["b"] is None
result = map_structure(lambda *x: None, a, b, is_leaf=lambda x: isinstance(x, int))
assert tuple(result["a"]) == (None, None)
assert result["b"] is None
|
print("enter correct user name and password combo to continue")
count=0
username=bhavanagadde
password=bhavana#1995
while password!="bhavana#1995" and username!="bhavanagadde" and count<3:
username=input('enter username=') and password=input('enter password=')
if username==bhavanagadde and password==bhavana#1995:
print("access granted")
elif:
print("access denied.try again")
count-=1
else:
print("after 3 attempts denied")
|
#!/usr/bin/env python
#-*- coding: UTF-8 -*_
import os
import shutil
import sys
from Bio import SeqIO
#Importar the function in other script
import Find_domains
import funtion_blast
import make_tres
#Informatatio for teh program help
if len(sys.argv)<2:
print("""\n --------------Welcome to the program help-------------
The aim of this program is doing a blastp, a muscle aligment and
find the domains of the protein sequence.
For doing the blast will pass from the genbak format to a fasta format
for doing the blastp. Having one file for each query_ID. Then the
result will be filtered with the value introduced by the user. If the
user dont introduced any value, the program has definied the values.
The muscle alignment is done with the querys and their subjects. With
this output will can create the phylogenetic tree.
The domains where found using the file 'prosite.dat' which should
be located in the folder. This output wil be used to find the pattern.
All the results will be copy in a folder with the name that the user
has choose.
To run the program you hace to introduced in this order:
1. The name of folder where the the files of the genbanks
are save.
2. Query file
3. Value of coverage (not compolsary)
4. Value of identity (not compolsary
--------------Thank ok for using the help--------------
""")
sys.exit()
else:
pass
#Folder to save the results
print("Introduced name for the folder to save the results")
usr_ip=input()
try:
folder_result="Resultado_" + str(usr_ip)
os.mkdir(folder_result)
except OSError:
print("Error creating the folder. Please try again")
exit()
else:
print("Succesful creating the directory")
#Assigment of value
folder=sys.argv[1]
file_query=sys.argv[2]
#Call to the function Parser
funtion_blast.Parser(folder)
shutil.copy('file_parseado', folder_result)
#Call to the function blast
funtion_blast.BlastP(file_query, 'file_parseado')
#Exception when the values are not correct
funtion_blast.Values(file_query)
#Call the function for alignment
make_tres.Pre_Alignment(file_query)
make_tres.Alignment(file_query, 'folder_result')
make_tres.Tree(file_query)
#Call to the function to find domains
Find_domains.Parsear_prosite(folder_result)
Find_domains.find_domains(file_query,folder_result, file_pros='prosite_parser.tsv')
#Move the file to the correct folder
shutil.move('prosite_parser.tsv', folder_result)
for record in SeqIO.parse(file_query, "fasta"):
shutil.move(record.id + "_result.tsv", folder_result)
shutil.move(record.id + "_filter.tsv", folder_result)
shutil.move(record.id + "_muscle.faa", folder_result)
shutil.move(record.id + "_alignment.faa", folder_result)
shutil.move(record.id + "_tree.nw", folder_result)
shutil.move(record.id+"_domains.txt", folder_result)
os.remove('file_parseado')
|
from django.apps import AppConfig
class CourseManagementAppConfig(AppConfig):
name = 'course_management_app'
|
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from nonlinear import NonLinearSolver
@given()
# @example([])
def test_NonLinearSolver(s):
pass
|
import struct
padding = 'A' * 76
return_address = struct.pack('I', 0xb7ec60c0)
win_address = struct.pack('I', 0x080483f4)
print padding + win_address + return_address
|
from enum import Enum
from pygame.locals import K_UP, K_DOWN, K_LEFT, K_RIGHT
Color = {
"RED": (255, 0, 0),
"BLUE": (0, 0, 255),
"GREEN": (0, 255, 0),
"YELLOW": (255, 255, 0),
"CYAN": (0, 255, 255),
"WHITE": (255, 255, 255),
"GRAY": (50, 50, 50)
}
RED = Color["RED"]
BLUE = Color["BLUE"]
GREEN = Color["GREEN"]
YELLOW = Color["YELLOW"]
CYAN = Color["CYAN"]
WHITE = Color["WHITE"]
GRAY = Color["GRAY"]
CLICKED_COLOR={0:(255,100,100),1:(100,100,255),2:(100,255,100),3:(100,255,100)}
COLORS = [Color["RED"], Color["BLUE"], Color["GREEN"], Color["YELLOW"], Color["CYAN"], Color["WHITE"]]
COLORS_STR = ["RED", "BLUE", "GREEN", "YELLOW", "CYAN", "WHITE"]
FRAME_LOCATION = "Demonstrations"
# might require just for DQN
class Action(Enum):
MOVE_UP = 'MOVE_UP'
MOVE_DOWN = 'MOVE_DOWN'
MOVE_LEFT = 'MOVE_LEFT'
MOVE_RIGHT = 'MOVE_RIGHT'
FINISHED = 'FINISHED'
DROP = 'DROP'
PICK = 'PICK'
ind_to_action = {0: "MOVE", 1: "RIGHT", 2: "LEFT", 3: "UP", 4: "DOWN", 5: "DROP", 6: "PICK0", 7: "PICK1", 8: "PICK2",
9: "PICK3", 10: "PICK4", 11: "FINISHED"}
action_to_ind = {"MOVE": 0, "RIGHT": 1, "LEFT": 2, "UP": 3, "DOWN": 4, "DROP": 5, "PICK0": 6, "PICK1": 7, "PICK2": 8,
"PICK3": 9, "PICK4": 10, "FINISHED": 11}
key_to_action = {
K_UP: Action.MOVE_UP,
K_DOWN: Action.MOVE_DOWN,
K_LEFT: Action.MOVE_LEFT,
K_RIGHT: Action.MOVE_RIGHT
}
step_size = 50
move_action_to_deviation = {
Action.MOVE_UP: (0, -step_size),
Action.MOVE_DOWN: (0, step_size),
Action.MOVE_LEFT: (-step_size, 0),
Action.MOVE_RIGHT: (step_size, 0),
Action.DROP: (0, 0),
Action.FINISHED: (0, 0),
Action.PICK: (0, 0)
}
|
from PIL import Image
import base64
import numpy as np
import requests
import json
import cv2
import dlib
import sys
from random import randint
from random import uniform
from random import choice
import time
from PyQt5 import QtGui
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
from face_recognition_system import face_detector
from Service.FaceRecognitionCore import FaceRecognition
from Utils.thaitext import drawText
class VideoThread(QThread):
change_pixmap_signal = pyqtSignal(np.ndarray)
def __init__(self):
super().__init__()
self._run_flag = True
self.face_detector = face_detector()
self.FaceRecognition = FaceRecognition()
def run(self):
# capture from web cam
cap = cv2.VideoCapture(0)
while self._run_flag:
ret, cv_img = cap.read()
if ret:
try:
x = self.face_detector.draw_bbox(cv_img)
for cv_img, faces in x:
# print(len(faces))
for i, face in enumerate(faces, 1):
# print(face[1])
encode_face_data = self.FaceRecognition.encode_face_data(face[0])
result_idx = self.FaceRecognition.match(encode_face_data, 1)
result = self.FaceRecognition.getface(result_idx,theshold=7.5)
name = result['name']
student_id = result['student_id']
nickname = result['nickname']
classroom = result['class']
image_path = result['image_path']
added_time = result['added_time']
print(f"face: {i} name: {name}, student_id: {student_id} nickname: {nickname}")
cv_img = drawText(cv_img, f"face_id :{i} {name} {nickname}", pos=(face[1][0], face[1][1]-70, face[1][2], face[1][3]), fontSize=18, color=(255, 255, 255))
except:
pass
self.change_pixmap_signal.emit(cv_img)
# shut down capture system
cap.release()
def stop(self):
"""Sets run flag to False and waits for thread to finish"""
self._run_flag = False
self.wait()
class App(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("SWS_FACE_RECOGNITION")
self.disply_width = 1280
self.display_height = 720
# create the label that holds the image
self.image_label = QLabel(self)
self.image_label.resize(self.disply_width, self.display_height)
# create a text label
self.textLabel = QLabel('Webcam')
# create a vertical box layout and add the two labels
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
vbox.addWidget(self.textLabel)
# set the vbox layout as the widgets layout
self.setLayout(vbox)
# create the video capture thread
self.thread = VideoThread()
# connect its signal to the update_image slot
self.thread.change_pixmap_signal.connect(self.update_image)
# start the thread
self.thread.start()
def closeEvent(self, event):
self.thread.stop()
event.accept()
@pyqtSlot(np.ndarray)
def update_image(self, cv_img):
"""Updates the image_label with a new opencv image"""
qt_img = self.convert_cv_qt(cv_img)
self.image_label.setPixmap(qt_img)
def convert_cv_qt(self, cv_img):
"""Convert from an opencv image to QPixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(self.disply_width, self.display_height, Qt.KeepAspectRatio)
return QPixmap.fromImage(p)
if __name__=="__main__":
app = QApplication(sys.argv)
a = App()
a.show()
sys.exit(app.exec_())
|
import json
from channels.db import database_sync_to_async
from channels.generic.websocket import AsyncWebsocketConsumer
import messaging.models
from messaging.utils import Firebase
class MessageConsumer(AsyncWebsocketConsumer):
async def websocket_connect(self, event):
self.user = self.retrieve_user()
if self.user.is_anonymous:
await self.close()
self.event = self.scope['url_route']['kwargs']['pk']
self.event_room = f'event_{self.event}'
await self.channel_layer.group_add(
self.event_room,
self.channel_name
)
await self.accept()
async def websocket_receive(self, event):
message_text = event.get('text', None)
data = json.loads(message_text)
if data['message']:
message = await self.save_message(data['message'])
send_data = dict(
message=data['message'],
sender=message.sender.pk,
pk=message.pk,
sender_name=message.sender.get_full_name(),
sent_at=str(message.sent_at),
)
await self.firebase_notification(message)
await self.channel_layer.group_send(
self.event_room,
{
'type': 'new_message',
'data': json.dumps(send_data),
}
)
async def disconnect(self, close_code):
await self.channel_layer.group_discard(
self.event_room,
self.channel_name
)
async def new_message(self, event):
await self.send(text_data=event['data'])
async def firebase_notification(self, message):
firebase_data = dict(
message=message.message,
event_id=message.event.pk,
full_name=self.user.get_full_name(),
avatar=self.user.avatar.url if self.user.avatar else '',
)
users = message.event.users.all()
user = users[1].device_id if users[0].device_id == self.user.pk else users[0].device_id
registration_ids = [user]
await Firebase().send_message(firebase_data, registration_ids, 1)
def retrieve_user(self):
return self.scope['user']
@database_sync_to_async
def save_message(self, message):
event = messaging.models.Event.objects.get(pk=self.event, resolved=False)
return messaging.models.Message.objects.create(
event=event,
sender=self.user,
message=message,
)
|
from django.contrib import admin
from architect.inventory.models import Inventory, Resource
@admin.register(Inventory)
class InventoryAdmin(admin.ModelAdmin):
list_display = ('name', 'engine', 'status')
list_filter = ('status', 'engine')
@admin.register(Resource)
class ResourceAdmin(admin.ModelAdmin):
list_display = ('name', 'uid', 'inventory', 'kind', 'status')
list_filter = ('inventory', 'status', 'kind')
search_fields = ['uid', 'name']
|
# http://www.practicepython.org/exercise/2014/03/05/05-list-overlap.html
from random import randint
def estaDentroDe(num, lista):
for i in lista:
if (i == num):
return True
a = []
b = []
res = []
for i in range(20): #GENERAR DOS LISTAS ALEATORIAS
numa = randint(0,20)
numb = randint(0,20)
a.append(numa)
b.append(numb)
#
a.sort()
b.sort()
for i in range(20): #RECORRER LAS DOS LISTAS EN BUSCA DE REPETICIONES
repe = False
for j in range(20):
#repe = False
if(a[i] == b[j]):
if (not estaDentroDe(a[i],res)):
res.append(a[i])
#
res.sort()
print (a)
print (b)
print (res)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from SAAS_UI_TEST.framework.readconfig import config,LOG_PATH
import logging
import os.path
from SAAS_UI_TEST.framework.browser_engine import BrowserEngine
from SAAS_UI_TEST.framework.logger import Logger
'''
level = logging.DEBUG
logger = logging.getLogger('browserEngine')
logger.setLevel(level)
logfilename = './logs/log.log'
fh = logging.FileHandler(logfilename)
fh.setLevel(level)
sh = logging.StreamHandler(stream=None)
sh.setLevel(level)
fmt = "%(asctime)-15s %(levelname)s %(filename)s %(lineno)d %(process)d %(message)s"
datefmt = "%a %d %b %Y %H:%M:%S"
formatter = logging.Formatter(fmt, datefmt)
sh.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(sh)
logger.info('hhhhhhhhhhhhhhhhhhhhhhhhhhhhh')
peth1 = os.path.abspath(__file__)
peth2 = os.path.dirname(__file__)
peth3 = os.path.dirname(os.path.abspath('.'))
print(peth1)
print(peth2)
print(peth3)
'''
browser = BrowserEngine().open_browser()
logger = Logger(logger='BrowserEngine').getlog()
logger.info('testssssssssssssss')
|
import unittest
from katas.kyu_7.return_the_closest_number_multiple_of_10 import \
closest_multiple_10
class ClosestMultipleOf10TestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(closest_multiple_10(54), 50)
def test_equal_2(self):
self.assertEqual(closest_multiple_10(55), 60)
|
import sys
import random
from PyQt5.QtGui import QPainter, QColor
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMainWindow, QTableWidgetItem
import sqlite3
class MyWidget(QMainWindow):
def __init__(self):
super().__init__()
uic.loadUi("addEditCoffeeForm.ui", self)
self.pushButton_2.clicked.connect(self.hhh)
self.pushButton.clicked.connect(self.hhh1)
def hhh(self):
e1 = self.lineEdit.text()
e2 = self.lineEdit_2.text()
e3 = self.lineEdit_3.text()
e4 = self.lineEdit_4.text()
e5 = self.lineEdit_5.text()
e6 = self.lineEdit_6.text()
e7 = self.lineEdit_7.text()
if e1 != '' and e2 != '' and e3 != '' and e4 != '' and e5 != '' and e6 != '' and e7 != '':
con = sqlite3.connect('coffe.sqlite')
cur = con.cursor()
print(e1, e2, e3, e4, e5, e6, e7)
cur.execute("""INSERT INTO Coffe('id', 'name of the variety', 'degree of roasting', 'Type', 'description of taste', 'price', 'volume of packaging') VALUES ((?), (?), (?), (?), (?), (?), (?))""", [e1, e2, e3, e7, e6, e5, e4])
else:
self.pushButton_2.setText(("Добавить (Не заполнено)"))
def hhh1(self):
e = self.lineEdit_15.text()
e1 = self.lineEdit_8.text()
e2 = self.lineEdit_9.text()
e3 = self.lineEdit_10.text()
e4 = self.lineEdit_11.text()
e5 = self.lineEdit_12.text()
e6 = self.lineEdit_13.text()
e7 = self.lineEdit_14.text()
if e1 != '' and e2 != '' and e3 != '' and e4 != '' and e5 != '' and e6 != '' and e7 != '':
con = sqlite3.connect('coffe.sqlite')
cur = con.cursor()
print(e1, e2, e3, e4, e5, e6, e7)
cur.execute("""UPDATE Coffe SET 'id' = (?), 'name of the variety' = (?), 'degree of roasting' = (?), 'Type' = (?), 'description of taste' = (?), 'price' = (?), 'volume of packaging' = (?) WHERE 'id' = (?)""", [e1, e2, e3, e7, e6, e5, e4, e])
else:
self.pushButton.setText(("Изменить (Не заполнено)"))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyWidget()
ex.show()
sys.exit(app.exec())
|
import tweepy as tw
from config import TWITTER_CONFIG
class TwitterConsumer:
def __init__(self):
auth = tw.OAuthHandler(TWITTER_CONFIG["consumer_key"], TWITTER_CONFIG["consumer_secret"])
auth.set_access_token(TWITTER_CONFIG["key"], TWITTER_CONFIG["secret_key"])
self.__api = tw.API(auth, wait_on_rate_limit=True)
def count_citations(self, term):
tweets = self.__api.search(q=term,
screen_name="twitter",
twett_mode="extended")
return len(tweets)
|
import json
import boto3
from pytube import YouTube
import botocore.vendored.requests.packages.urllib3 as urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def lambda_handler(event, context):
ACCESS_KEY_ID = 'AKIAIP7E6DYJUA6FZXEQ'
ACCESS_SECRET_KEY = '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'
BUCKET_NAME = 'seektube'
yt_id=event["queryStringParameters"]['id']
yt_url = "https://www.youtube.com/watch?v=" + yt_id
yt = YouTube(yt_url)
stream = yt.streams.filter(progressive=True, file_extension="mp4").all()[-1]
key = yt_id + '.mp4'
s3 = boto3.client("s3",aws_access_key_id=ACCESS_KEY_ID,aws_secret_access_key=ACCESS_SECRET_KEY)
http = urllib3.PoolManager()
s3.upload_fileobj(http.request("GET", stream.url, preload_content = False), BUCKET_NAME, key)
message = {
"id": yt_id,
"title": yt.title,
"resolution": stream.resolution,
"mime_type": stream.mime_type
}
return {
"statusCode": 200,
"body": json.dumps(message),
"headers": {
"Access-Control-Allow-Origin": "*",
"Content-Type": "application/json"
}
}
|
from crontab import CronTab
"""
Here the object can take two parameters one for setting
the user cron jobs, it defaults to the current user
executing the script if ommited. The fake_tab parameter
sets a testing variable. So you can print what could be
written to the file onscreen instead or writting directly
into the crontab file.
"""
tab = CronTab('root')
cmd = 'echo +1 > cron_output'
# You can even set a comment for this command
cron_job = tab.new(cmd, comment='This is the main command')
cron_job.minute.every(1)
#writes content to crontab
tab.write()
print tab.render()
|
from nastran.analysis import Subcase
from typing import Dict
import numpy as np
from pyNastran.bdf.bdf import BDF
from nastran.aero.superpanels import SuperAeroPanel5, SuperAeroPanel1
from nastran.aero.analysis.flutter import FlutterSubcase, FlutterAnalysisModel
class PanelFlutterSubcase(FlutterSubcase):
def __init__(self, id, spc=None, fmethod=None, method=None,
plate_stiffness=None, vref=None, **args):
super().__init__(id, spc=spc, fmethod=fmethod, method=method, **args)
self.plate_stiffness = plate_stiffness
self.vref = vref
class PanelFlutterAnalysisModel(FlutterAnalysisModel):
"""
Class to model a panel flutter configuration in Nastran.
"""
def __init__(self, model: BDF = None, global_case = None,
subcases: Dict[int, Subcase] = {},
params=None, diags=None, interface=None,superpanels=None):
super().__init__(model=model, global_case=global_case,
subcases=subcases, params=params, diags=diags,
interface=interface)
self.superpanels = superpanels if superpanels is not None else []
def add_superpanel(self, superpanel):
self.superpanels.append(superpanel)
def write_cards(self):
super().write_cards()
for spanel in self.superpanels:
self._write_superpanel_cards(spanel)
# Validate
self.model.validate()
print('Aerodynamic Flutter solution created!')
def _write_splines2_for_superpanel(self, superpanel, caeros, cords=None):
# SET and SPLINE cards
for i in range(superpanel.nchord):
# TODO: Make optional use of set2 or set1
# grid set (nodes) to the spline interpolation
# struct = spanel.aeropanels[i].structural_ids
# if type(struct) == int:
# grid_group = model.sets[struct]
# elif len(list(struct)) > 0:
# grid_group = model.add_set1(idutil.get_next_set_id(), list(struct))
# else:
# raise Exception('Structural grid set for Splines could not be created.')
grid_group = self.model.add_set2(self.idutil.get_next_set_id(), caeros[i].eid, -0.01, 1.01, -0.01, 1.01)
# Linear Spline (SPLINE2) element
self.model.add_spline2(self.idutil.get_next_spline_id(),
caero=caeros[i].eid,
# Coordinate system of the CAERO5 element
# (Y-Axis must be colinear with "Elastic Axis")
cid=0 if cords is None else cords[i].cid,
id1=caeros[i].eid,
id2=caeros[i].eid + superpanel.nspan - 1,
setg=grid_group.sid,
# Detached bending and torsion (-1 -> infinity flexibility), only Z displacement
# allowed to comply with the rigid chord necessity of the Piston Theory
# and still model the plate bending (with N chord-wise elements).
dthx=-1.,
dthy=-1.,
dz=0.)
def _write_spline1_for_superpanel(self, elements):
grid_group = self.model.add_set2(self.idutil.get_next_set_id(), elements['main'].eid, -0.01, 1.01, -0.01, 1.01)
self.model.add_spline1(self.idutil.get_next_spline_id(),
caero=elements['main'].eid,
box1=elements['main'].eid,
box2=elements['main'].eid + elements['main'].nspan * elements['main'].nchord - 1,
setg=grid_group.sid)
def _write_superpanel_cards(self, **args):
pass
class PanelFlutterPistonAnalysisModel(PanelFlutterAnalysisModel):
"""
Class to model a panel flutter configuration with Piston Theory in Nastran.
"""
# def __init__(self, model: BDF = None, global_case = None,
# subcases: Dict[int, Subcase] = {},
# params=None, diags=None, interface=None,superpanels=[]):
# super().__init__(model=model, global_case=global_case, subcases=subcases,
# params=params, diags=diags, interface=interface,
# superpanels=superpanels)
def _write_superpanel_cards(self, superpanel: SuperAeroPanel5):
# AEFACT cards
thickness_integrals = self.model.add_aefact(self.idutil.get_next_aefact_id(),
superpanel.thick_int)
machs_n_alphas = self._write_machs_and_alphas(self.global_case.machs, self.global_case.alphas)
# PAERO5 card
paero = self.model.add_paero5(self.idutil.get_next_paero_id(),
caoci=superpanel.ctrl_surf,
nalpha=1,
lalpha=machs_n_alphas.sid)
caeros, cords = self._write_caero5_as_panel(superpanel, paero, thickness_integrals)
self._write_splines2_for_superpanel(superpanel, caeros, cords)
def _write_caero5_as_panel(self, superpanel: SuperAeroPanel5, paero, thickness_integrals):
# CORD2R and CAERO5 cards
# wind_x_vector = np.array([1., 0., 0.])
caeros = []
cords = []
id_increment = self.idutil.get_last_element_id()
for _, panel in superpanel.aeropanels.items():
# set origin to element mid chord (linear spline requires the Y axis to be colinear with the
# "elastic axis" of the structure, since it is a plate chord-wise divided,
# the elastic axis should be at mid chord)
origin = panel.p1 + panel.d12 / 2
# point in the XZ plane to define the coordinate system
# this hardcodes the Y axis of the local aerodynamic coordinate system
# to be colinear with the element Y axis (i.e. the vector of p1 to p4)
pxz_i = origin + panel.d12
# local aerodynamic coordinate system
cords.append(
self.model.add_cord2r(self.idutil.get_next_coord_id(),
origin,
origin + panel.normal,
pxz_i))
# CAERO5 element
caeros.append(
self.model.add_caero5(self.idutil.get_next_caero_id() + id_increment,
pid=paero.pid,
cp=0,
nspan=panel.nspan,
lspan=None,
nthick=thickness_integrals.sid,
p1=panel.p1,
x12=panel.l12,
p4=panel.p4,
x43=panel.l12,
ntheory=panel.theory)
)
id_increment = panel.nspan - 1
return caeros, cords
class PanelFlutterPistonZAEROAnalysisModel(PanelFlutterAnalysisModel):
# def __init__(self, model: BDF = None, global_case = None,
# subcases: Dict[int, Subcase] = {},
# params=None, diags=None, interface=None,superpanels=[]):
# super().__init__(model=model, global_case=global_case, subcases=subcases,
# params=params, diags=diags, interface=interface,
# superpanels=superpanels)
def _write_superpanel_cards(self, superpanel: SuperAeroPanel1):
paero = self.model.add_paero1(self.idutil.get_next_paero_id())
elements = {}
last_id = self.idutil.get_last_element_id()
pot = int(np.ceil(np.log10(last_id))) + 1
# TODO: improve eid handle
main = superpanel.aeropanels['main']
left = superpanel.aeropanels['left']
right = superpanel.aeropanels['right']
elements['main'] = self.model.add_caero1(int(10 ** pot + 1),
pid=paero.pid,
nspan=main.nspan,
nchord=main.nchord,
igroup=1,
p1=main.p1,
p4=main.p4,
x12=main.l12,
x43=main.l43)
elements['left'] = self.model.add_caero1(self.idutil.get_next_caero_id() + main.nspan * main.nchord,
pid=paero.pid,
nspan=left.nspan,
nchord=left.nchord,
igroup=1,
p1=left.p1,
p4=left.p4,
x12=left.l12,
x43=left.l43)
elements['right'] = self.model.add_caero1(self.idutil.get_next_caero_id() + left.nspan * left.nchord,
pid=paero.pid,
nspan=right.nspan,
nchord=right.nchord,
igroup=1,
p1=right.p1,
p4=right.p4,
x12=right.l12,
x43=right.l43)
# self.write_spline1_for_panel(elements)
self.write_splines2_for_superpanel(superpanel, elements['main'])
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
from wadebug.config import Config
class Result:
def to_dict(self):
ret = {
"class": self.action.__name__,
"user_facing_name": self.action.user_facing_name,
"result": self.result,
"short_description": self.action.short_description,
"message": self.message,
"details": self.details,
"remediation": self.remediation,
}
if Config().development_mode and hasattr(self, "traceback"):
ret["traceback"] = self.traceback
return ret
@property
def result(self):
"""String value of the result."""
return self.__class__.__name__.lower()
class OK(Result):
def to_dict(self):
return {
"class": self.action.__name__,
"user_facing_name": self.action.user_facing_name,
"result": self.__class__.__name__,
}
def __init__(self, action):
self.action = action
class _NotOK(Result):
"""Intermediate class to create cases where something is wrong."""
def __init__(self, action, message, details, remediation, traceback=None):
self.action = action
self.message = message
if isinstance(details, Exception):
self.details = str(details)
self.traceback = traceback
else:
self.details = details
self.remediation = remediation
class Warning(_NotOK):
pass
class Skipped(_NotOK):
pass
class Problem(_NotOK):
pass
class WADebugError(_NotOK):
@property
def result(self):
return "wadebug_error"
|
from copy import deepcopy
from unittest.mock import patch
from mybib.graphql.access_layer import EntityAlreadyExistsError
@patch("mybib.web.api.papers.insert_paper", autospec=True)
def test_post_inserts(
insert_paper_mock, authenticated_post, bibtex_json_multiple_authors
):
bibtex_multiple_authors, json_multiple_authors = bibtex_json_multiple_authors
insert_paper_mock.return_value = json_multiple_authors
entries = deepcopy(json_multiple_authors)
inserted_paper = entries[0]
inserted_paper["_bibtex"] = bibtex_multiple_authors
response = authenticated_post("/api/papers", data=bibtex_multiple_authors)
insert_paper_mock.assert_called_once_with(inserted_paper)
assert response.status_code == 201
@patch("mybib.web.api.papers.insert_paper", autospec=True)
def test_post_inserts_already_exist(
insert_paper_mock, authenticated_post, bibtex_json_multiple_authors
):
bibtex_multiple_authors, json_multiple_authors = bibtex_json_multiple_authors
insert_paper_mock.side_effect = EntityAlreadyExistsError()
entries = deepcopy(json_multiple_authors)
inserted_paper = entries[0]
inserted_paper["_bibtex"] = bibtex_multiple_authors
response = authenticated_post("/api/papers", data=bibtex_multiple_authors)
insert_paper_mock.assert_called_once_with(inserted_paper)
assert response.status_code == 409
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.