content
stringlengths 5
1.05M
|
|---|
import sys
import traceback
import battlecode as bc
#magic numbers
unmapped = 60000
impassable = 65000
def pathPlanetMap(currentMap):
mapHeight = currentMap.height
mapWidth = currentMap.width
#game map grows up and right, array grows down and right
#so all my maps are upsidedown
map = [[unmapped]*mapHeight for i in range(mapWidth)]
mL = bc.MapLocation(currentMap.planet,1,1)
for i in range(0,mapHeight):
for j in range(0,mapWidth):
if currentMap.is_passable_terrain_at(bc.MapLocation(currentMap.planet,i,j)):
map[j][i] = unmapped
else:
map[j][i] = impassable
return map
#takes one of the above processed maps for terrain,
#and the original map it was generated from which has a few more details
#like the locations of resources
def miningMap(processedMap, generatedMap):
miningMap = 0
#TODO: generate resources map
return miningMap
#takes a martian map and offers some good landing zones
def landingZone(marsMap):
coOrds = []
mapSize = len(marsMap)
for x in range(0,mapSize):
for y in range(0,mapSize):
if marsMap[x][y] == unmapped:
marsMap[x][y] = 0
coOrds.append([mapSize-x,mapSize-y])#dont forget to put the co-ords back into game format
marsMap = mapFill(marsMap,x,y)
coOrds += landingZone(marsMap)
return coOrds
def pathMap(map, x, y):
#rearrange the co-ords given to match our internal structure
sizeOf = len(map)-1
x,y = sizeOf-y,sizeOf-x
return mapFill(map,x,y)
def mapFill(map,x,y):
#define our target destination
map[x][y] = 0
#make a list of locations we have rated, to be the source of further ratings
openlocs = []
openlocs.append([x,y])
#so we know not to go off the edges
edge = len(map)
#at each location we have rated, we inspect all around it,
#any that are empty are 1 step further than us
for loc in openlocs:
i,j = loc
#orthagonal
#x-1,y
if i-1 >=0:
if map[i-1][j] == unmapped and map[i-1][j] != impassable:
map[i-1][j] = map[i][j] +1
openlocs.append([i-1,j])
#x,y+1
if j+1 < edge:
if map[i][j+1] == unmapped and map[i][j+1] != impassable:
map[i][j+1] = map[i][j] +1
openlocs.append([i,j+1])
#x+1,y
if i+1 < edge:
if map[i+1][j] == unmapped and map[i+1][j] != impassable:
map[i+1][j] = map[i][j] +1
openlocs.append([i+1,j])
#x,y-1
if j-1 >= 0:
if map[i][j-1] == unmapped and map[i][j-1] != impassable:
map[i][j-1] = map[i][j] +1
openlocs.append([i,j-1])
#diagonal
#x-1,y+1
if i-1 >=0 and j+1 < edge:
if map[i-1][j+1] == unmapped and map[i-1][j+1] != impassable:
map[i-1][j+1] = map[i][j] +1
openlocs.append([i-1,j+1])
#x+1,y+1
if i+1 < edge and j+1 < edge:
if map[i+1][j+1] == unmapped and map[i+1][j+1] != impassable:
map[i+1][j+1] = map[i][j] +1
openlocs.append([i+1,j+1])
#x+1,y-1
if i+1 < edge and j-1 >=0:
if map[i+1][j-1] == unmapped and map[i+1][j-1] != impassable:
map[i+1][j-1] = map[i][j] +1
openlocs.append([i+1,j-1])
#x,y-1
if j-1 >= 0:
if map[i][j-1] == unmapped and map[i][j-1] != impassable:
map[i][j-1] = map[i][j] +1
openlocs.append([i,j-1])
#x-1,y-1
if j-1 >= 0 and i-1 >= 0:
if map[i-1][j-1] == unmapped and map[i-1][j-1] != impassable:
map[i-1][j-1] = map[i][j] +1
openlocs.append([i-1,j-1])
#print(map)
return map
#given a map(witch should have pathfinding stuffs, and my current location which direction do i move
#return list of directions, starting with best
def whereShouldIGo(map,x,y):
furthest = len(map)
sortedLocations = moveMaxDistance(map,x,y,furthest)
return sortedLocations
def moveMaxDistance(map,x,y,maxDist):
sizeOf = len(map)-1
x,y = sizeOf-y,sizeOf-x
#print(x)
#print(y)
#print(sizeOf-x)
#print(sizeOf-y)
nearbyLocations = []
edge = len(map)
for i in range(-1,2):
for j in range(-1,2):
if x+i>=0 and y+j>=0 and x+i<edge and y+j<edge and map[x+i][y+j]<65000 and map[x+i][y+j]<maxDist:
nearbyLocations.append([i,j,map[x+i][y+j]])
for i in range(1,len(nearbyLocations)):
for j in range(1,len(nearbyLocations)):
if nearbyLocations[j][2] < nearbyLocations[j-1][2]:
nearbyLocations[j], nearbyLocations[j-1] = nearbyLocations[j-1], nearbyLocations[j]
#since map is inverted, +i is south, -i is north
#+y is east, -y is west?
sortedLocations = []
for location in nearbyLocations:
if location[0] == 1:#south?
if location[1] == 0:
sortedLocations.append(bc.Direction.South)
if location[1] == 1:
sortedLocations.append(bc.Direction.Southwest)
if location[1] == -1:
sortedLocations.append(bc.Direction.Southeast)
if location[0] == 0:
if location[1] == 1:
sortedLocations.append(bc.Direction.West)
if location[1] == -1:
sortedLocations.append(bc.Direction.East)
if location[0] == -1:
if location[1] == 0:
sortedLocations.append(bc.Direction.North)
if location[1] == 1:
sortedLocations.append(bc.Direction.Northwest)
if location[1] == -1:
sortedLocations.append(bc.Direction.Northeast)
#print(nearbyLocations[0])
#print(sortedLocations)
return sortedLocations
|
# coding: utf-8
import os, errno
import shutil
import re
import time
from enum import Enum
import pickle
import string
from systemtools.location import isFile, getDir, isDir, sortedGlob, decomposePath, tmpDir
from systemtools.basics import getRandomStr
class TIMESPENT_UNIT(Enum):
DAYS = 1
HOURS = 2
MINUTES = 3
SECONDS = 4
def getLastModifiedTimeSpent(path, timeSpentUnit=TIMESPENT_UNIT.HOURS):
diff = time.time() - os.path.getmtime(path)
if timeSpentUnit == TIMESPENT_UNIT.SECONDS:
return diff
diff = diff / 60.0
if timeSpentUnit == TIMESPENT_UNIT.MINUTES:
return diff
diff = diff / 60.0
if timeSpentUnit == TIMESPENT_UNIT.HOURS:
return diff
diff = diff / 24.0
if timeSpentUnit == TIMESPENT_UNIT.DAYS:
return diff
def purgeOldFiles(pattern, maxTimeSpent, timeSpentUnit=TIMESPENT_UNIT.SECONDS):
allPlugins = sortedGlob(pattern)
for current in allPlugins:
timeSpent = getLastModifiedTimeSpent(current, timeSpentUnit)
if timeSpent > maxTimeSpent:
removeFile(current)
def strToFileName(*args, **kwargs):
return strToFilename(*args, **kwargs)
def strToFilename(text):
"""
https://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename
"""
text = text.replace(" ", "_")
valid_chars = "-_.()%s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in text if c in valid_chars)
def serialize(obj, path):
with open(path, 'wb') as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
def deserialize(path):
with open(path, 'rb') as handle:
return pickle.load(handle)
def getAllNumbers(text):
"""
This function is a copy of systemtools.basics.getAllNumbers
"""
if text is None:
return None
allNumbers = []
if len(text) > 0:
# Remove space between digits :
spaceNumberExists = True
while spaceNumberExists:
text = re.sub('(([^.,0-9]|^)[0-9]+) ([0-9])', '\\1\\3', text, flags=re.UNICODE)
if re.search('([^.,0-9]|^)[0-9]+ [0-9]', text) is None:
spaceNumberExists = False
numberRegex = '[-+]?[0-9]+[.,][0-9]+|[0-9]+'
allMatchIter = re.finditer(numberRegex, text)
if allMatchIter is not None:
for current in allMatchIter:
currentFloat = current.group()
currentFloat = re.sub("\s", "", currentFloat)
currentFloat = re.sub(",", ".", currentFloat)
currentFloat = float(currentFloat)
if currentFloat.is_integer():
allNumbers.append(int(currentFloat))
else:
allNumbers.append(currentFloat)
return allNumbers
def mkdir(path):
mkdirIfNotExists(path)
def mkdirIfNotExists(path):
"""
This function make dirs recursively like mkdir -p in bash
"""
os.makedirs(path, exist_ok=True)
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
def replaceInFile(path, listSrc, listRep):
with open(path, 'r') as f :
filedata = f.read()
for i in range(len(listSrc)):
src = listSrc[i]
rep = listRep[i]
filedata = filedata.replace(src, rep)
with open(path, 'w') as f:
f.write(filedata)
def fileExists(filePath):
return os.path.exists(filePath)
def globRemove(globPattern):
filesPaths = sortedGlob(globPattern)
removeFiles(filesPaths)
def removeFile(path):
if not isinstance(path, list):
path = [path]
for currentPath in path:
try:
os.remove(currentPath)
except OSError:
pass
def removeFiles(path):
removeFile(path)
def removeAll(path):
removeFile(path)
def fileToStr(path, split=False):
if split:
return fileToStrList(path)
else:
with open(path, 'r') as myfile:
data = myfile.read()
return data
def fileToStrList_old(path, strip=True):
data = fileToStr(path)
if strip:
data = data.strip()
return data.splitlines()
def fileToStrList(*args, removeDuplicates=False, **kwargs):
result = fileToStrListYielder(*args, **kwargs)
if removeDuplicates:
return list(set(list(result)))
else:
return list(result)
def basicLog(text, logger, verbose):
if verbose:
if text is not None and text != "":
if logger is None:
print(text)
else:
logger.info(text)
def fileToStrListYielder(path,
strip=True,
skipBlank=True,
commentStart="###",
logger=None,
verbose=True):
if path is not None and isFile(path):
commentCount = 0
with open(path) as f:
for line in f.readlines():
isComment = False
if strip:
line = line.strip()
if commentStart is not None and len(commentStart) > 0 and line.startswith(commentStart):
commentCount += 1
isComment = True
if not isComment:
if skipBlank and len(line) == 0:
pass
else:
yield line
if verbose and commentCount > 0:
basicLog("We found " + str(commentCount) + " comments in " + path, logger, verbose)
else:
if verbose:
basicLog(str(path) + " file not found.", logger, verbose)
def removeIfExists(path):
try:
os.remove(path)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occurred
def removeIfExistsSecure(path, slashCount=5):
if path.count('/') >= slashCount:
removeIfExists(path)
def removeTreeIfExists(path):
shutil.rmtree(path, True)
def removeTreeIfExistsSecure(path, slashCount=5):
if path.count('/') >= slashCount:
removeTreeIfExists(path)
def strListToTmpFile(theList, *args, **kwargs):
text = ""
for current in theList:
text += current + "\n"
return strToTmpFile(text, *args, **kwargs)
def strToTmpFile(text, name=None, ext="", addRandomStr=False, *args, **kwargs):
if text is None:
text = ""
if ext is None:
ext = ""
if ext != "":
if not ext.startswith("."):
ext = "." + ext
if name is None:
name = getRandomStr()
elif addRandomStr:
name += "-" + getRandomStr()
path = tmpDir(*args, **kwargs) + "/" + name + ext
strToFile(text, path)
return path
def strToFile(text, path):
# if not isDir(getDir(path)) and isDir(getDir(text)):
# path, text = text, path
if isinstance(text, list):
text = "\n".join(text)
textFile = open(path, "w")
textFile.write(text)
textFile.close()
def normalizeNumericalFilePaths(globRegex):
"""
This function get a glob path and rename all file1.json file2.json ... file20.json
to file01.json file02.json ... file20.json to better sort the folder by file names
"""
# We get all paths:
allPaths = sortedGlob(globRegex)
allNumbers = []
# We get all ints:
for path in allPaths:
# Get the filename without extension:
(dir, filename, ext, filenameExt) = decomposePath(path)
# Get all numbers:
currentNumbers = getAllNumbers(filename)
# Check if we have a int first:
if currentNumbers is None or len(currentNumbers) == 0:
print("A filename has no number.")
return False
firstNumber = currentNumbers[0]
if not isinstance(firstNumber, int):
print("A filename has no float as first number.")
return False
# Add it in the list:
allNumbers.append(firstNumber)
# Get the max int:
maxInt = max(allNumbers)
# Calculate the nmber of digit:
digitCountHasToBe = len(str(maxInt))
# Replace all :
i = 0
for i in range(len(allNumbers)):
currentPath = allPaths[i]
(dir, filename, ext, filenameExt) = decomposePath(currentPath)
currentInt = allNumbers[i]
currentRegex = "0*" + str(currentInt)
zerosCountToAdd = digitCountHasToBe - len(str(currentInt))
zerosStr = "0" * zerosCountToAdd
newFilename = re.sub(currentRegex, zerosStr + str(currentInt), filename, count=1)
newFilename = dir + newFilename + "." + ext
if currentPath != newFilename:
os.rename(currentPath, newFilename)
print(newFilename + " done.")
i += 1
return True
if __name__ == '__main__':
# normalizeNumericalFilePaths("/home/hayj/test/test1/*.txt")
# normalizeNumericalFilePaths("/users/modhel/hayj/NoSave/Data/TwitterArchiveOrg/Converted/*.bz2")
strToTmpFile("hoho", subDir="test", ext="txt")
strToFile("haha", tmpDir(subDir="test") + "/test.txt")
|
# -*- coding: utf-8 -*-
from dataviva import db, lm
from dataviva.apps.general.views import get_locale
from dataviva.apps.user.models import User
from dataviva.utils.encode import sha512
from dataviva.utils.send_mail import send_mail
from datetime import datetime
from dataviva.translations.dictionary import dictionary
from flask import Blueprint, render_template, g, session, redirect, jsonify, abort, Response, flash, request, url_for
from flask.ext.login import login_user, login_required
from forms import (SignupForm, ChangePasswordForm, ForgotPasswordForm, ProfileForm)
from hashlib import md5
from dataviva.apps.admin.views import required_roles
mod = Blueprint('user', __name__,
template_folder='templates',
url_prefix='/<lang_code>/user',
static_folder='static')
@mod.before_request
def before_request():
g.page_type = mod.name
@mod.url_value_preprocessor
def pull_lang_code(endpoint, values):
g.locale = values.pop('lang_code')
@mod.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', get_locale())
def _gen_confirmation_code(email):
return md5("%s-%s" % (email, datetime.now())).hexdigest()
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@mod.route('/new', methods=["POST", "GET"])
def create():
form = SignupForm()
if request.method == "POST":
if form.validate() is False:
if 'fullname' in form.errors:
return Response(form.errors['fullname'], status=400, mimetype='application/json')
if 'email' in form.errors:
return Response(form.errors['email'], status=400, mimetype='application/json')
if 'password' in form.errors:
return Response(form.errors['password'], status=400, mimetype='application/json')
return Response('Error in Form.', status=400, mimetype='application/json')
else:
if (User.query.filter_by(email=form.email.data).count() > 0):
return Response(dictionary()["email_already_exists"], status=400, mimetype='application/json')
try:
confirmation_code = _gen_confirmation_code(form.email.data)
user = User(
nickname=form.email.data.split('@')[0],
fullname=form.fullname.data,
email=form.email.data,
password=sha512(form.password.data),
confirmation_code=confirmation_code,
agree_mailer=form.agree_mailer.data
)
db.session.add(user)
db.session.commit()
except:
return Response(dictionary()["500"], status=500, mimetype='application/json')
send_confirmation(user)
message = dictionary()["check_your_inbox"] + ' ' + user.email
return Response(message, status=200, mimetype='application/json')
return render_template('user/new.html', form=form)
@mod.route('/edit', methods=["GET"])
@login_required
def edit():
form = ProfileForm()
form.profile.data = g.user.profile
form.fullname.data = g.user.fullname
form.email.data = g.user.email
form.birthday.data = g.user.birthday
form.country.data = g.user.country
form.state_province_region.data = g.user.state_province_region
form.city.data = g.user.city
form.occupation.data = g.user.occupation
form.institution.data = g.user.institution
form.agree_mailer.data = g.user.agree_mailer
return render_template("user/edit.html", form=form)
@mod.route('/edit', methods=["POST"])
@login_required
def change_profile():
form = ProfileForm()
if form.validate():
try:
user = g.user
user.profile = form.profile.data
user.fullname = form.fullname.data
user.email = form.email.data
user.birthday = form.birthday.data
user.country = form.country.data
user.state_province_region = form.state_province_region.data
user.city = form.city.data
user.occupation = form.occupation.data
user.institution = form.institution.data
user.agree_mailer = form.agree_mailer.data
db.session.commit()
flash(dictionary()["updated_profile"], "success")
except:
flash(dictionary()["500"], "danger")
return render_template("user/edit.html", form=form)
def send_confirmation(user):
confirmation_url = "%s%s/user/confirm/%s" % (request.url_root, g.locale, user.confirmation_code)
confirmation_tpl = render_template('user/mail/confirmation.html',
confirmation_url=confirmation_url)
send_mail("Account confirmation", [user.email], confirmation_tpl)
@mod.route('/confirm_pending/<user_email>', methods=["GET"])
def confirm_pending(user_email):
''' Used to inform to the user that its user is pending
'''
try:
user = User.query.filter_by(email=user_email)[-1]
except IndexError:
abort(404, 'User not found')
if user.confirmed:
return redirect(url_for('general.home'))
return render_template('user/confirm_pending.html', user=user.serialize())
@mod.route('/confirm/<code>', methods=["GET"])
def confirm(code):
try:
user = User.query.filter_by(confirmation_code=code)[-1]
user.confirmed = True
db.session.commit()
login_user(user, remember=True)
flash(dictionary()["complete_profile"], "info")
except IndexError:
abort(404, 'User not found')
return redirect(url_for('user.edit'))
@mod.route('/resend_confirmation/<user_email>', methods=["GET"])
def resend_confirmation(user_email):
'''Used to regen the confirmation_code and send the email again to the user
'''
try:
user = User.query.filter_by(email=user_email, confirmed=False)[-1]
except IndexError:
abort(404, 'Entry not found')
user.confirmation_code = _gen_confirmation_code(user.email)
db.session.commit()
send_confirmation(user)
flash(dictionary()["check_your_inbox"] + ' ' + user_email, 'success')
return redirect(url_for('user.confirm_pending', user_email=user.email))
@mod.route('/change_password', methods=["GET"])
@login_required
def change_password():
form = ChangePasswordForm()
return render_template("user/change_password.html", form=form)
@mod.route('/change_password', methods=["POST"])
@login_required
def change():
form = ChangePasswordForm()
user = load_user(session["user_id"])
if form.validate():
if user.password == sha512(form.current_password.data):
user.password = sha512(form.new_password.data)
db.session.commit()
flash(dictionary()["updated_password"], "success")
else:
flash(dictionary()["invalid_password"], "danger")
return render_template("user/change_password.html", form=form)
@mod.route('/forgot_password', methods=["GET"])
def forgot_password():
form = ForgotPasswordForm()
return render_template("user/forgot_password.html", form=form)
@mod.route('/forgot_password', methods=["POST"])
def reset_password():
form = ForgotPasswordForm()
try:
user = User.query.filter_by(email=form.email.data)[-1]
pwd = md5(str(datetime.now()) + form.email.data).hexdigest()[0:5]
user.password = sha512(pwd)
db.session.commit()
email_tp = render_template('user/mail/forgot.html',
user=user.serialize(),
new_pwd=pwd)
send_mail("Forgot Password", [user.email], email_tp)
flash(dictionary()["new_password_sent"], "success")
except:
flash(dictionary()["couldnt_find_user"], "danger")
return render_template("user/forgot_password.html", form=form)
return redirect(url_for('user.reset_password'))
@mod.route('/admin', methods=['GET'])
@login_required
@required_roles(1)
def admin():
user = User.query.all()
return render_template('user/admin.html', user=user)
@mod.route('/all/', methods=['GET'])
def all():
result = User.query.all()
users = []
for row in result:
users += [(row.id, row.fullname, row.email, row.role)]
return jsonify(users=users)
@mod.route('/admin/users/<status>/<status_value>', methods=['POST'])
@login_required
@required_roles(1)
def admin_activate(status, status_value):
for id in request.form.getlist('ids[]'):
users = User.query.filter_by(id=id).first_or_404()
if status_value == 'true':
users.role = 1
else:
users.role = 0
db.session.commit()
message = u"Usuário(s) alterado(s) com sucesso!"
return message, 200
|
#!/usr/bin/python
################################################################################
# retrieveKEGG
# Access the KEGG API and retrieves all data available for each protein-coding
# gene of the "n" organisms specified. Creates a file for each succesful query.
# Ivan Domenzain. Last edited: 2018-04-10
################################################################################
#INPUTS:
#1) Organism KEGG codes (as many as you want). Full list at:
# http://rest.kegg.jp/list/organism
organism_codes = ['sce',...,...,...]
#2) Path for storing all generated files:
output_path = '.../GECKO/databases/KEGG'
#3) Last organism processed (if the program was interrupted)
# Starting form scratch?, leave empty:
last_organism = ''
#4) Last gene entry processed (if the program was interrupted),
# Starting form scratch?, leave empty:
last_entry = ''
################################################################################
#retrieve_org_genesData: Function that extracts all data available
#in KEGG database for the organism in turn.
def retrieve_org_genesData(organism, last_entry):
#URL that returns the entire genes list for the organism
url = 'http://rest.kegg.jp/list/' + organism
genes_list = []
#Try/except for avoiding execution abortions ending in case of
#querying timeout exceeded
try:
#Stores the queried genes list as a string
data_str = urllib2.urlopen(url, timeout=20).read()
#String division into substrings for each gene. Just the entry names are
#saved on a list. Previously queried genes, if any, are removed from the list.
separator = organism + ':'
substrings = data_str.split(separator)
for i in substrings:
if i[0:i.find('\t')]!=(' ' and '\0'and ''):
genes_list.append(i[0:i.find('\t')])
if last_entry!='':
genes_list=genes_list[genes_list.index(last_entry):]
#Retrieves gene data, if sucessfuly queried and a UniProt code is found
#then a file .txt is created, otherwise, a warning is displayed
for gene in genes_list:
gene_query, gene_string = extract_geneEntry_data(organism, gene)
if gene_query.find('UniProt:')!=-1:
if gene_query!='':
fid = open(gene + '.txt','w')
fid.write(gene_query.decode('ascii','ignore'))
fid.close()
print 'Succesfully constructed ' + gene_string + '.txt'
else:
print 'Unsuccesful query for gene ' + gene_string
else:
print 'No UniProt code for ' + gene_string
except:
print organism + ' not found or timeout exceeded'
################################################################################
#extract_geneEntry_data: Function that retrieves specific
#gene entries from KEGG
def extract_geneEntry_data(organism, gene):
#URL that returns available data of the gene entry on KEGG
gene_string = organism+ ':' + gene
url = 'http://rest.kegg.jp/get/' + gene_string
#Try/except for avoiding timeout exceedings
try:
gene_query = urllib2.urlopen(url, timeout=20).read()
except:
gene_query=''
return(gene_query, gene_string)
################################################################################
#Main script
#Get current path:
import os
prev_path = os.getcwd()
#Remove organisms already queried from the list
if last_organism!='':
organism_codes=organism_codes[organism_codes.index(last_organism):]
#extensible library for opening URLs
import urllib2
#Main loop: retrieves all genes found for every organism
for organism in organism_codes:
#Creates (if not present) a subfolder for the organism inside the
#specified output path
org_path = output_path + '/' + organism
if not os.path.exists(org_path):
os.makedirs(org_path)
#access to the created organism subfolder
os.chdir(org_path)
#gets and creates files for all the gene entries found for the organism
organism_genes=retrieve_org_genesData(organism, last_entry)
os.chdir(prev_path)
################################################################################
|
from django.db import models
from django.contrib.auth.models import User
class Profile(models.Model):
image = models.ImageField(upload_to='images/', default='images/default.jpg')
bio = models.TextField(blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Image(models.Model):
image = models.ImageField(upload_to='images/')
caption = models.CharField(max_length=500)
user = models.ForeignKey(User, on_delete=models.CASCADE)
class ImageLikes(models.Model):
image = models.ForeignKey(Image, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
class ImageComments(models.Model):
image = models.ForeignKey(Image, on_delete=models.CASCADE)
image_comment = models.CharField(max_length=500)
user = models.ForeignKey(User, on_delete=models.CASCADE)
|
import re
input = open('d15.in').read()
lines = filter(None, input.split('\n'))
regex = r'^(\w+): capacity ([-\d]+), durability ([-\d]+), flavor ([-\d]+), texture ([-\d]+), calories ([-\d]+)$'
ingredients = []
capacities = []
durabilities = []
flavors = []
textures = []
calorieses = []
for i, line in enumerate(lines):
ingredient, capacity, durability, flavor, texture, calories = re.findall(regex, line)[0]
ingredients.append(ingredient)
capacities.append(int(capacity))
durabilities.append(int(durability))
flavors.append(int(flavor))
textures.append(int(texture))
calorieses.append(int(calories))
score = 0
p1 = 0
p2 = 0
for i in range(0,100):
for j in range(0,100-i):
for k in range(0,100-i-j):
l = 100-i-j-k
capacity = capacities[0]*i+capacities[1]*j+capacities[2]*k+capacities[3]*l
durability = durabilities[0]*i+durabilities[1]*j+durabilities[2]*k+durabilities[3]*l
flavor = flavors[0]*i+flavors[1]*j+flavors[2]*k+flavors[3]*l
texture = textures[0]*i+textures[1]*j+textures[2]*k+textures[3]*l
calories = calorieses[0]*i+calorieses[1]*j+calorieses[2]*k+calorieses[3]*l
if capacity <= 0 or durability <= 0 or flavor <= 0 or texture <= 0:
score = 0
continue
score = capacity*durability*flavor*texture
if score > p1:
p1 = score
if score > p2 and calories == 500:
p2 = score
print("P1:", p1)
print("P2:", p2)
|
import uuid
from django.db import models
from django_countries import fields
# Annex F - Company Data Service
# SQL data model
class Company(models.Model):
company_id = models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, )
name = models.TextField(db_index=True, default=None, unique=True, editable=False, )
industry = models.TextField(db_index=True, default=None, editable=False, )
description = models.TextField(default=None, editable=False, )
exchange = models.TextField(db_index=True, default=None, editable=False, )
country = fields.CountryField(db_index=True, default=None, editable=False, )
class Meta:
db_table = 'app_companies'
|
from django.shortcuts import reverse
from rest_framework.test import APITestCase
from model_bakery import baker
from glitchtip import test_utils # pylint: disable=unused-import
class OrgTeamTestCase(APITestCase):
""" Tests nested under /organizations/ """
def setUp(self):
self.user = baker.make("users.user")
self.organization = baker.make("organizations_ext.Organization")
self.organization.add_user(self.user)
self.client.force_login(self.user)
self.url = reverse("organization-teams-list", args=[self.organization.slug])
def test_list(self):
team = baker.make("teams.Team", organization=self.organization)
other_team = baker.make("teams.Team")
res = self.client.get(self.url)
self.assertContains(res, team.slug)
self.assertNotContains(res, other_team.slug)
def test_create(self):
data = {"slug": "team"}
res = self.client.post(self.url, data)
self.assertContains(res, data["slug"], status_code=201)
def test_unauthorized_create(self):
""" Only admins can create teams for that org """
data = {"slug": "team"}
organization = baker.make("organizations_ext.Organization")
url = reverse("organization-teams-list", args=[organization.slug])
res = self.client.post(url, data)
# Not even in this org
self.assertEqual(res.status_code, 400)
admin_user = baker.make("users.user")
organization.add_user(admin_user) # First user is always admin
organization.add_user(self.user)
res = self.client.post(url, data)
# Not an admin
self.assertEqual(res.status_code, 400)
def test_invalid_create(self):
url = reverse("organization-teams-list", args=["haha"])
data = {"slug": "team"}
res = self.client.post(url, data)
self.assertEqual(res.status_code, 400)
class TeamTestCase(APITestCase):
def setUp(self):
self.user = baker.make("users.user")
self.organization = baker.make("organizations_ext.Organization")
self.organization.add_user(self.user)
self.client.force_login(self.user)
self.url = reverse("team-list")
def test_list(self):
team = baker.make("teams.Team", organization=self.organization)
other_team = baker.make("teams.Team")
res = self.client.get(self.url)
self.assertContains(res, team.slug)
self.assertNotContains(res, other_team.slug)
def test_retrieve(self):
team = baker.make("teams.Team", organization=self.organization)
url = reverse(
"team-detail", kwargs={"pk": f"{self.organization.slug}/{team.slug}",},
)
res = self.client.get(url)
self.assertContains(res, team.slug)
def test_invalid_retrieve(self):
team = baker.make("teams.Team")
url = reverse(
"team-detail", kwargs={"pk": f"{self.organization.slug}/{team.slug}",},
)
res = self.client.get(url)
self.assertEqual(res.status_code, 404)
|
# Copyright (C) 2020-2021 by TeamSpeedo@Github, < https://github.com/TeamSpeedo >.
#
# This file is part of < https://github.com/TeamSpeedo/FridayUserBot > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/TeamSpeedo/blob/master/LICENSE >
#
# All rights reserved.
import os
import aiohttp
from main_start.core.decorators import speedo_on_cmd
from main_start.helper_func.basic_helpers import edit_or_reply, get_text
@speedo_on_cmd(
["paste"],
cmd_help={
"help": "Pastes The File Text In Nekobin!",
"example": "{ch}paste (reply to file)",
},
)
async def paste(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
tex_t = get_text(message)
message_s = tex_t
if not tex_t:
if not message.reply_to_message:
await pablo.edit(engine.get_string("NEEDS_REPLY").format("File / Text"))
return
if not message.reply_to_message.text:
file = await message.reply_to_message.download()
m_list = open(file, "r").read()
message_s = m_list
os.remove(file)
else:
message_s = message.reply_to_message.text
url = "https://hastebin.com/documents"
if not message_s:
await pablo.edit(engine.get_string("NEEDS_REPLY").format("File / Text"))
return
async with aiohttp.ClientSession() as session:
req = await session.post(url, data=message_s.encode('utf-8'), timeout=3)
resp = await req.json()
key = resp.get("key")
url = f"https://hastebin.com/{key}"
raw = f"https://hastebin.com/raw/{key}"
reply_text = engine.get_string("PASTED").format(url, raw)
await pablo.edit(reply_text)
|
ENGLISH_HELLO_PREFIX = "Hello"
def hello(name: str = None) -> str:
"""Return a personalized greeting.
Defaulting to `Hello, World` if no name and language are passed.
"""
if not name:
name = "World"
return f"{ENGLISH_HELLO_PREFIX}, {name}"
print(hello("world"))
|
from webdiff import argparser
import tempfile
import os
from nose.tools import *
_, file1 = tempfile.mkstemp()
_, file2 = tempfile.mkstemp()
dir1 = tempfile.mkdtemp()
dir2 = tempfile.mkdtemp()
def test_file_dir_pairs():
eq_({'files': (file1, file2)}, argparser.parse([file1, file2]))
eq_({'dirs': (dir1, dir2)}, argparser.parse([dir1, dir2]))
with assert_raises(argparser.UsageError):
argparser.parse([file1, dir1])
with assert_raises(argparser.UsageError):
argparser.parse([dir2, file2])
def test_port():
eq_({'files': (file1, file2), 'port': 12345},
argparser.parse(['--port', '12345', file1, file2]))
def test_github_pull_request():
eq_({'github': {'owner': 'danvk', 'repo': 'dygraphs', 'num': 292}},
argparser.parse(['https://github.com/danvk/dygraphs/pull/292']))
eq_({'github': {'owner': 'danvk', 'repo': 'dygraphs', 'num': 292}},
argparser.parse(['https://github.com/danvk/dygraphs/pull/292/']))
eq_({'github': {'owner': 'danvk', 'repo': 'dygraphs', 'num': 292}},
argparser.parse(['https://github.com/danvk/dygraphs/pull/292/files']))
eq_({'github': {'owner': 'danvk', 'repo': 'dygraphs', 'num': 292}},
argparser.parse(['https://github.com/danvk/dygraphs/pull/292/commits']))
|
import open3d as o3d
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler
import os
import h5py
import random
def o3d_to_numpy(o3d_cloud):
"""
Converts open3d pointcloud to numpy array
"""
np_cloud_points = np.asarray(o3d_cloud.points)
np_cloud_colors = np.asarray(o3d_cloud.colors)
np_cloud_normals = np.asarray(o3d_cloud.normals)
return np_cloud_points, np_cloud_colors, np_cloud_normals
def numpy_to_o3d(np_cloud_points, np_cloud_colors=None, np_cloud_normals=None):
#create o3d pointcloud and assign it
o3d_cloud = o3d.geometry.PointCloud()
o3d_cloud.points = o3d.utility.Vector3dVector(np_cloud_points)
if np_cloud_colors is not None:
o3d_cloud.colors = o3d.utility.Vector3iVector(np_cloud_colors)
if np_cloud_normals is not None:
o3d_cloud.normals = o3d.utility.Vector3dVector(np_cloud_normals)
o3d.visualization.draw_geometries([downpcd])
return o3d_cloud
#FEATURES
COLOR = 0
NORMAL = 1
#SAMPLING_METHODS
RANDOM_SAMPLE = 2
VOXEL_SAMPLE = 3
#NORMALIZATION_METHODS
MINMAX = 4
STANDARD = 5
MAXABS = 6
ROBUST = 7
#NONE
NONE = None
def load_dataset(file_list=[]):
"""
Loads a list of files and returns a list of o3d_clouds
"""
#return pointcloud_list
o3d_list = []
for file in file_list:
o3d_cloud = o3d.io.read_point_cloud(file)
# #just points
# if not COLOR in feature_list and not NORMAL in feature_list:
# points, _, _ = o3d_to_numpy(o3d_cloud)
# o3d_cloud = numpy_to_o3d(points)
# #just normals
# elif NORMAL in feature_list and COLOR not in feature_list:
# if o3d_cloud.has_normals():
# points, _, normals = o3d_to_numpy(o3d_cloud)
# o3d_cloud = numpy_to_o3d(points, np_cloud_normals=normals)
# #just colours
# elif COLOR in feature_list and NORMAL not in feature_list:
# points, colors, _ = o3d_to_numpy(o3d_cloud)
# o3d_cloud = numpy_to_o3d(points, np_cloud_normals=colors)
o3d_list.append(o3d_list)
return o3d_list
def get_labels(file_list, labels):
"""
File list only contains full file path
Returns a label for each file based on the filename. (If label exists in filename)
ex: animal_head_20201201.ply would return tuple(1, head) if head was the first label (1 index)
If a object is loaded and the label does not exist in the list (for example, animal_background_20201201.ply)
the labels will return (0, "unclassified")
NOTE: You do not need to explicitly define unclassified as a label.
"""
label_list = []
# print(labels)
for file in file_list:
filename = os.path.splitext(file)
# print("Filename", filename)
for index, item in enumerate(labels):
#checks if label exists in filename
if item.lower() in filename[0].lower():
label_list.append((index+1, item))
break
# #If not, label it as unclassified
# else:
# print("UNKNOWN ITEM", item, filename[0].lower())
# label_list.append((0, "unclassified"))
return label_list
def get_labels_auto(file_list, seperator = '_'):
"""
If files are labelled in this format:
Name_Yead_{LABEL}.ply where {LABEL} is the label or Folder_Name/{LABEL}.ply
This function will return the {LABEL} from the filename
"""
label_list = []
for file in file_list:
print(file)
last_occurence = file.rfind(seperator)+1
folder = file.rfind('/')+1
extension = file.rfind('.')
if folder > last_occurence:
label = file[folder:extension]
else:
label = file[last_occurence:extension]
print(label)
label_list.append(label)
return label_list
def combine_ply_from_folder(file_list, labels):
"""
File_list is a list of files and their paths
Labels: a list of strings you want to extract based on labels
Returns:
cloud_list = [o3d_cloud, o3d_cloud, ..., o3d_cloud] : List of o3d_Clouds
labels_list = [[0,1,2, ... , N], ..., [0,1,2, ... , N]] : List of integer lists containing IDs
This function iterates through all files in file list. It groups files by their folders and that folder creates a merged pointcloud.
In the process, it takes the file name and grabs the labels from that, and creates a label_list containing a label for each point in the pointcloud
"""
cloud_list = []
label_list = []
#Becasue we remove files from file_list as we fit them into groups, we want to iterate the folders until there is none left
while file_list:
for file in file_list:
group = set()
#Grabs the folder name
folder = os.path.basename(os.path.dirname(file))
#Searches entire list for files which are also in that folder
for index, search_file in enumerate(file_list):
search_folder = os.path.basename(os.path.dirname(search_file))
if search_folder == folder:
#creates a group which contains all files in the one folder
group.add(search_file)
#We now have every file in the group from the folder
print("GROUP", group)
#remove items in group from file_list
for item in group:
file_list.remove(item)
#labels of all pointclouds in group will be merged into merge_labels.
merged_labels = []
merged_cloud = o3d.geometry.PointCloud()
for index, cloud in enumerate(group):
#Load and join pointclouds
pcd_cloud = o3d.io.read_point_cloud(cloud)
merged_cloud = merged_cloud + pcd_cloud
# Grab labels from the group like (0,"Head") from Head.ply
group_labels = get_labels([cloud], labels)
# print(group_labels)
# as labels exist one-per-file, we extend the label to match points.
merged_labels.extend([group_labels[0][0]]*get_num_points(pcd_cloud))
# Create lists of merged clouds lists of labels according to the cloud
cloud_list.append(merged_cloud)
label_list.append(merged_labels)
print(label_list)
return cloud_list, label_list
def downsample_random(cloud, labels, num_points=500, print_down_labels = False, seed=None):
"""
Downsample using Random Sample method.
Value is the number of points to downsample to.
labels are sampled using the same seed generated at random each time the downsample is called unless specified (set seed)
It is sugguested to reassign number of points
"""
if seed is not None:
random.seed(random.random())
else:
random.seed(seed)
sampled_labels = []
points, colors, normals = o3d_to_numpy(cloud)
sampled_points = random.sample(points, num_points)
sampled_colors = random.sample(colors, num_points)
sampled_normals = random.sample(normals, num_points)
sampled_labels = random.sample(labels, num_points)
sparse_pcd = numpy_to_o3d(sampled_points, sampled_colors, sampled_normals)
print("Before Downsample: ", cloud, end=" | ")
print("After Downsample: Pointcloud with ", len(sparse_pcd[0].points), "points." )
return sparse_pcd, sampled_labels
def downsample_voxel(cloud, labels, method=VOXEL_SAMPLE, voxel_size=0.5, print_down_labels = False):
"""
Downsamples points based on a voxel grid (3D space divided into a grid).
Inside of each grid, each point is evaluated, and finds the mean (average) x,y,z location.
The label of the mean (averaged) point inside each grid is selected by the maximum number of label occurences (Numpy bincount and argmax).
Returns downsampled pointcoud and a list of respective labels: o3d_cloud, list
sparse_pcd, sampled_labels
"""
sampled_labels = []
# Downsample points
min_bound = cloud.get_min_bound() - voxel_size * 0.5
max_bound = cloud.get_max_bound() + voxel_size * 0.5
#Old version
# sparse_pcd, cubics_ids = cloud.voxel_down_sample_and_trace(voxel_size, min_bound, max_bound, False)
print("Before Downsample: ", cloud, end=" | ")
sparse_pcd = cloud.voxel_down_sample_and_trace(voxel_size, min_bound, max_bound, False)
print("After Downsample: Pointcloud with ", len(sparse_pcd[0].points), "points." )
cubics_ids = sparse_pcd[1]
sparse_pcd = sparse_pcd[0]
# Downsample labels
# Solution from https://github.com/intel-isl/Open3D-PointNet2-Semantic3D/blob/master/downsample.py and modified.
for cubic_id in cubics_ids:
cubic_id = cubic_id[cubic_id != -1]
cubic_labels = []
for label in cubic_id:
cubic_labels.append(labels[label])
#Label is the maximum count of labels in voxel
sampled_labels.append(np.bincount(cubic_labels).argmax())
if print_down_labels:
print("Cubic Labels", cubic_labels, end=" -> ")
print(sampled_labels[-1])
return sparse_pcd, sampled_labels
def downsample(cloud, labels, method=VOXEL_SAMPLE, value=0.5, print_down_labels = False):
"""
Downsamples pointcloud by specified method. Value is the downsample scale which
Voxel: Evenly samples a pointcloud based on spatial binning
Random: Randomly samples pointcloud to a specified number of points
"""
if method == RANDOM_SAMPLE:
sparse_pcd, sampled_labels = downsample_random(cloud, value)
elif method == VOXEL_SAMPLE:
sparse_pcd, sampled_labels = downsample_voxel(cloud, labels, voxel_size=value, print_down_labels = print_down_labels)
if print_down_labels:
print("Sampled Labels", sampled_labels)
return sparse_pcd, sampled_labels
def get_bounding_box(cloud):
pass
def normalize(cloud, method=MINMAX):
"""
MINMAX: scales based on min and max between zero and one. (scaling compresses all inliers) (Bad if outliers not removed/noisy data)
STANDARD: scaling to unit variance (Bad for non-normally distributed data, recomend to not use this)
MAXABS: Scales and translates based on max-absolute values. It does not shift/center the data, and thus does not destroy any sparsity. Identical to MINMAX on positive data. (Bad if outliers not removed/noisy data)
ROBUST: Centering and scaling based on percentiles, not influenced by a few number of very large marginal outliers. (Good if no statistial outlier cleaning was done)
Why scale? Different camera libraries measure at different scales. Kinect is mm while Realsense is in m.
"""
# for index, cloud in enumerate(pointcloud_list):
if method == MINMAX:
#normalize the points only
scaler = MinMaxScaler()
elif method == STANDARD:
scaler = StandardScaler()
elif method == MAXABS:
scaler = MaxAbsScaler()
elif method == ROBUST:
scaler = RobustScaler()
#Extract the points into numpy
points, colors, normals = o3d_to_numpy(cloud)
scaler.fit(points)
points = scaler.transform(points)
#put the normalize pointcloud back into open3d, and into the list
cloud = numpy_to_o3d(points, colors, normals)
return cloud
def test_train_split(pointcloud_list, test_split=33, seed=42):
"""
Test Split is how much (in percent) of the dataset should be turned into a testing dataset.
Training dataset will be the remaining of the split value
Seed is the test/train split seed. If the same seed is used, the test-train splits will be the same (if the same split value)
42 is default because it's the answer to life and everything
"""
pass
def export_hdf5(filename, cloud_list, labels, point_num, max_points):
"""
test_list: list of pointclouds which are split into the test sample
train_list: list of pointclouds which are split into the train sample
test_labels: A list which contains per-point labels(int) for each pointcloud. Eg [cloud1[0,0,0 ... 0], cloud2[2,2 .. 2] ]. Each inner list contains per-point labels
"""
data_h5 = np.zeros((len(cloud_list), max_points, 3))
colors_h5 = np.zeros((len(cloud_list), max_points, 3))
normals_h5 = np.zeros((len(cloud_list), max_points, 3))
labels_h5 = np.zeros((len(cloud_list), max_points))
for index, cloud in enumerate(cloud_list):
np_cloud_points, np_cloud_colors, np_cloud_normals = o3d_to_numpy(cloud)
for point_index, point in enumerate(np_cloud_points):
# print(point)
# print(np_cloud_normals)
data_h5[index, point_index] = point
colors_h5[index, point_index] = np_cloud_colors[point_index]
normals_h5[index, point_index] = np_cloud_normals[point_index]
labels_h5[index, point_index] = labels[index][point_index]
# data_h5[:np_cloud_points.shape[0],:np_cloud_points.shape[1]] = np_cloud_points
# print(data_h5)
# print(colors_h5)
# print(normals_h5)
# point_list.append(np_cloud_points)
# color_list.append(np_cloud_colors)
# normal_list.append(np_cloud_normals)
# print(filename +"_train.h5")
test_points, train_points, test_colors, train_colors, test_normals, train_normals, test_labels, train_labels, point_num_test, point_num_train = train_test_split(data_h5, colors_h5, normals_h5, labels_h5, point_num, test_size=0.33, random_state=42)
train_filename = (filename +"_train.h5")
print(train_filename)
hdf_train = h5py.File(train_filename, "w")
#dataset = f.create_dataset("data", data = point_data)
hdf_train.create_dataset("data", data = train_points)
hdf_train.create_dataset("data_num", data = point_num_train)
# hdf_test.create_dataset("label", data = test_labels) #Here we are just saying the labels belong to only one object (stick man, raccoon...)
hdf_train.create_dataset("label_seg", data = train_labels) #?
hdf_train.create_dataset("color", data = train_colors)
hdf_train.create_dataset("normal", data = train_normals)
hdf_train.flush()
hdf_train.close()
test_filename = filename +"_test.h5"
hdf_test = h5py.File(test_filename, "w")
#dataset = f.create_dataset("data", data = point_data)
hdf_test.create_dataset("data", data = test_points)
hdf_test.create_dataset("data_num", data = point_num_test)
# hdf_test.create_dataset("label", data = test_labels) #Here we are just saying the labels belong to only one object (stick man, raccoon...)
hdf_test.create_dataset("label_seg", data = test_labels) #?
hdf_test.create_dataset("color", data = test_colors)
hdf_test.create_dataset("normal", data = test_normals)
hdf_test.flush()
hdf_test.close()
print("HDF5 DATASET COMPLETE")
# def export_
def get_max_points(pointcloud_list):
"""
Receives a list of Open3D Pointclouds
Returns the point count of the largest pointcloud (most points)
"""
max_number = 0
for cloud in pointcloud_list:
#get the number of points in the pointcloud
number = get_num_points(cloud)
if number > max_number:
max_number = number
return max_number
def get_num_points(cloud):
return np.asarray(cloud.points).shape[0]
def estimate_normals(cloud, radius=0.1, max_nn=30):
cloud.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius, max_nn))
return cloud
def write_settings(self, setting, value):
pass
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_layer_widget.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Layer(object):
def setupUi(self, Layer):
Layer.setObjectName("Layer")
Layer.resize(1030, 51)
self.horizontalLayout = QtWidgets.QHBoxLayout(Layer)
self.horizontalLayout.setObjectName("horizontalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.comboBoxVol = QtWidgets.QComboBox(Layer)
self.comboBoxVol.setObjectName("comboBoxVol")
self.gridLayout.addWidget(self.comboBoxVol, 0, 0, 1, 1)
self.comboBoxLut = QtWidgets.QComboBox(Layer)
self.comboBoxLut.setObjectName("comboBoxLut")
self.gridLayout.addWidget(self.comboBoxLut, 0, 1, 1, 1)
self.horizontalLayout.addLayout(self.gridLayout)
self.retranslateUi(Layer)
QtCore.QMetaObject.connectSlotsByName(Layer)
def retranslateUi(self, Layer):
_translate = QtCore.QCoreApplication.translate
Layer.setWindowTitle(_translate("Layer", "Form"))
|
from .type import Type, Base, JSONable, AbstractJSONable
from .functions import to_json
__version__ = "0.2.1"
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 by Brian Horn, trycatchhorn@gmail.com.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Provides a general data structure for container.
"""
from abc import abstractmethod
from py_alg_dat.string_visitor import StringVisitor
__author__ = "Brian Horn"
__copyright__ = "Copyright (c) 2015 Brian Horn"
__credits__ = "Brian Horn"
__license__ = "MIT"
__version__ = "1.0.2"
__maintainer__ = "Brian Horn"
__email__ = "trycatchhorn@gmail.com"
__status__ = "Prototype"
class Container(object):
"""
The interface of a container.
"""
def __init__(self):
"""
Constructs a container and initializes its
count value to zero.
"""
self.count = 0
def __str__(self):
"""
Returns a string representation of this container
by using a visitor.
@return: String representation of the container.
@rtype: C{str}
"""
visitor = StringVisitor()
self.visit(visitor)
return "%s {%s}" % (self.__class__.__name__, str(visitor))
def __hash__(self):
"""
Returns the hash value of this container.
@return: Hash value of the container.
@rtype: C{int}
"""
result = hash(self.__class__)
for obj in self:
result = (result + hash(obj))
return result
@abstractmethod
def __iter__(self):
"""
Abstract method used to support the iterator
protocol.
"""
pass
def get_count(self):
"""
Returns the number of elements, represented by
the count field, in this container.
@return: Number of elements in the container.
@rtype: C{int}
"""
return self.count
def is_empty(self):
"""
Returns if the container has any elements.
@return: True if the container is empty, false otherwise.
@rtype: C{bool}
"""
return self.count == 0
def visit(self, visitor):
"""
Makes the specified visitor visit all the elements
in this container.
@param visitor: The visitor applied to each element.
@type: L{Visitor}
"""
for obj in self:
visitor.visit(obj)
def element(self):
"""
Generator that yields the objects in this container.
"""
for obj in self:
yield obj
|
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Segmentation model
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
from os import makedirs
from os.path import exists
import time
import tensorflow as tf
import sys
import numpy as np
import shutil
import os
# Convolution functions
from models.D3Feat import assemble_FCNN_blocks
from utils.loss import cdist, LOSS_CHOICES
# ----------------------------------------------------------------------------------------------------------------------
#
# Model Class
# \*****************/
#
class KernelPointFCNN:
def __init__(self, flat_inputs, config):
"""
Initiate the model
:param flat_inputs: List of input tensors (flatten)
:param config: configuration class
"""
# Model parameters
self.config = config
self.tensorboard_root = ''
# Path of the result folder
if self.config.saving:
if self.config.saving_path == None:
# self.saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
self.saving_path = time.strftime('results/Log_%m%d%H%M')
if self.config.is_test:
experiment_id = "D3Feat" + time.strftime('%m%d%H%M') + "test"
else:
experiment_id = "D3Feat" + time.strftime('%m%d%H%M')
snapshot_root = 'snapshot/%s' % experiment_id
os.makedirs(snapshot_root, exist_ok=True)
tensorboard_root = 'tensorboard/%s' % experiment_id
os.makedirs(tensorboard_root, exist_ok=True)
shutil.copy2(os.path.join('.', 'training_3DMatch.py'), os.path.join(snapshot_root, 'train.py'))
shutil.copy2(os.path.join('.', 'utils/trainer.py'), os.path.join(snapshot_root, 'trainer.py'))
shutil.copy2(os.path.join('.', 'models/D3Feat.py'), os.path.join(snapshot_root, 'model.py'))
shutil.copy2(os.path.join('.', 'utils/loss.py'), os.path.join(snapshot_root, 'loss.py'))
self.tensorboard_root = tensorboard_root
else:
self.saving_path = self.config.saving_path
if not exists(self.saving_path):
makedirs(self.saving_path)
########
# Inputs
########
# Sort flatten inputs in a dictionary
with tf.variable_scope('anchor_inputs'):
self.anchor_inputs = dict()
self.anchor_inputs['points'] = flat_inputs[:config.num_layers]
self.anchor_inputs['neighbors'] = flat_inputs[config.num_layers:2 * config.num_layers]
self.anchor_inputs['pools'] = flat_inputs[2 * config.num_layers:3 * config.num_layers]
self.anchor_inputs['upsamples'] = flat_inputs[3 * config.num_layers:4 * config.num_layers]
ind = 4 * config.num_layers
self.anchor_inputs['features'] = flat_inputs[ind]
ind += 1
self.anchor_inputs['batch_weights'] = flat_inputs[ind]
ind += 1
self.anchor_inputs['in_batches'] = flat_inputs[ind]
ind += 1
self.anchor_inputs['out_batches'] = flat_inputs[ind]
ind += 1
# self.anchor_inputs['augment_scales'] = flat_inputs[ind]
# ind += 1
# self.anchor_inputs['augment_rotations'] = flat_inputs[ind]
# ind += 1
# self.anchor_inputs['object_inds'] = flat_inputs[ind]
# ind += 1
self.anchor_inputs['stack_lengths'] = flat_inputs[ind]
ind += 1
self.anc_keypts_inds = tf.squeeze(flat_inputs[ind])
ind += 1
self.pos_keypts_inds = tf.squeeze(flat_inputs[ind])
ind += 1
self.anc_id = flat_inputs[ind][0]
self.pos_id = flat_inputs[ind][1]
ind += 1
self.anchor_inputs['backup_points'] = flat_inputs[ind]
if config.dataset == 'KITTI':
ind += 1
self.anchor_inputs['trans'] = flat_inputs[ind]
# self.object_inds = self.anchor_inputs['object_inds']
self.dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
########
# Layers
########
# Create layers
# with tf.device('/gpu:%d' % config.gpu_id):
with tf.variable_scope('KernelPointNetwork', reuse=False) as scope:
self.out_features, self.out_scores = assemble_FCNN_blocks(self.anchor_inputs, self.config, self.dropout_prob)
anc_keypts = tf.gather(self.anchor_inputs['backup_points'], self.anc_keypts_inds)
self.keypts_distance = cdist(anc_keypts, anc_keypts, metric='euclidean')
# self.anchor_keypts_inds, self.positive_keypts_inds, self.keypts_distance = self.anc_key, self.pos_key, self.keypts_distance
# show all the trainable vairble
# all_trainable_vars = tf.trainable_variables()
# for i in range(len(all_trainable_vars)):
# print(i, all_trainable_vars[i])
########
# Losses
########
with tf.variable_scope('loss'):
# calculate the distance between anchor and positive in feature space.
positiveIDS = tf.range(tf.size(self.anc_keypts_inds))
positiveIDS = tf.reshape(positiveIDS, [tf.size(self.anc_keypts_inds)])
self.anc_features = tf.gather(self.out_features, self.anc_keypts_inds)
self.pos_features = tf.gather(self.out_features, self.pos_keypts_inds)
dists = cdist(self.anc_features, self.pos_features, metric='euclidean')
self.dists = dists
# find false negative pairs (within the safe radius).
same_identity_mask = tf.equal(tf.expand_dims(positiveIDS, axis=1), tf.expand_dims(positiveIDS, axis=0))
distance_lessthan_threshold_mask = tf.less(self.keypts_distance, config.safe_radius)
false_negative_mask = tf.logical_and(distance_lessthan_threshold_mask, tf.logical_not(same_identity_mask))
# calculate the contrastive loss using the dist
self.desc_loss, self.accuracy, self.ave_d_pos, self.ave_d_neg = LOSS_CHOICES['circle_loss'](self.dists,
positiveIDS,
pos_margin=0.1,
neg_margin=1.4,
false_negative_mask=false_negative_mask)
# calculate the score loss.
if config.det_loss_weight != 0:
self.anc_scores = tf.gather(self.out_scores, self.anc_keypts_inds)
self.pos_scores = tf.gather(self.out_scores, self.pos_keypts_inds)
self.det_loss = LOSS_CHOICES['det_loss'](self.dists, self.anc_scores, self.pos_scores, positiveIDS)
self.det_loss = tf.scalar_mul(self.config.det_loss_weight, self.det_loss)
else:
self.det_loss = tf.constant(0, dtype=self.desc_loss.dtype)
# if the number of correspondence is less than half of keypts num, then skip
enough_keypts_num = tf.constant(0.5 * config.keypts_num)
condition = tf.less_equal(enough_keypts_num, tf.cast(tf.size(self.anc_keypts_inds), tf.float32))
def true_fn():
return self.desc_loss, self.det_loss, self.accuracy, self.ave_d_pos, self.ave_d_neg
def false_fn():
return tf.constant(0, dtype=self.desc_loss.dtype), \
tf.constant(0, dtype=self.det_loss.dtype), \
tf.constant(-1, dtype=self.accuracy.dtype), \
tf.constant(0, dtype=self.ave_d_pos.dtype), \
tf.constant(0, dtype=self.ave_d_neg.dtype), \
self.desc_loss, self.det_loss, self.accuracy, self.ave_d_pos, self.ave_d_neg = tf.cond(condition, true_fn, false_fn)
# Get L2 norm of all weights
regularization_losses = [tf.nn.l2_loss(v) for v in tf.global_variables() if 'weights' in v.name]
self.regularization_loss = self.config.weights_decay * tf.add_n(regularization_losses)
self.loss = self.desc_loss + self.det_loss + self.regularization_loss
tf.summary.scalar('desc loss', self.desc_loss)
tf.summary.scalar('accuracy', self.accuracy)
tf.summary.scalar('det loss', self.det_loss)
tf.summary.scalar('d_pos', self.ave_d_pos)
tf.summary.scalar('d_neg', self.ave_d_neg)
self.merged = tf.summary.merge_all()
if self.tensorboard_root != '':
self.train_writer = tf.summary.FileWriter(self.tensorboard_root + '/train/')
self.val_writer = tf.summary.FileWriter(self.tensorboard_root + '/val/')
return
def regularization_losses(self):
#####################
# Regularizatizon loss
#####################
# Get L2 norm of all weights
regularization_losses = [tf.nn.l2_loss(v) for v in tf.global_variables() if 'weights' in v.name]
self.regularization_loss = self.config.weights_decay * tf.add_n(regularization_losses)
##############################
# Gaussian regularization loss
##############################
gaussian_losses = []
for v in tf.global_variables():
if 'kernel_extents' in v.name:
# Layer index
layer = int(v.name.split('/')[1].split('_')[-1])
# Radius of convolution for this layer
conv_radius = self.config.first_subsampling_dl * self.config.density_parameter * (2 ** (layer - 1))
# Target extent
target_extent = conv_radius / 1.5
gaussian_losses += [tf.nn.l2_loss(v - target_extent)]
if len(gaussian_losses) > 0:
self.gaussian_loss = self.config.gaussian_decay * tf.add_n(gaussian_losses)
else:
self.gaussian_loss = tf.constant(0, dtype=tf.float32)
#############################
# Offsets regularization loss
#############################
offset_losses = []
if self.config.offsets_loss == 'permissive':
for op in tf.get_default_graph().get_operations():
if op.name.endswith('deformed_KP'):
# Get deformed positions
deformed_positions = op.outputs[0]
# Layer index
layer = int(op.name.split('/')[1].split('_')[-1])
# Radius of deformed convolution for this layer
conv_radius = self.config.first_subsampling_dl * self.config.density_parameter * (2 ** layer)
# Normalized KP locations
KP_locs = deformed_positions / conv_radius
# Loss will be zeros inside radius and linear outside radius
# Mean => loss independent from the number of input points
radius_outside = tf.maximum(0.0, tf.norm(KP_locs, axis=2) - 1.0)
offset_losses += [tf.reduce_mean(radius_outside)]
elif self.config.offsets_loss == 'fitting':
for op in tf.get_default_graph().get_operations():
if op.name.endswith('deformed_d2'):
# Get deformed distances
deformed_d2 = op.outputs[0]
# Layer index
layer = int(op.name.split('/')[1].split('_')[-1])
# Radius of deformed convolution for this layer
KP_extent = self.config.first_subsampling_dl * self.config.KP_extent * (2 ** layer)
# Get the distance to closest input point
KP_min_d2 = tf.reduce_min(deformed_d2, axis=1)
# Normalize KP locations to be independant from layers
KP_min_d2 = KP_min_d2 / (KP_extent ** 2)
# Loss will be the square distance to closest input point.
# Mean => loss independent from the number of input points
offset_losses += [tf.reduce_mean(KP_min_d2)]
if op.name.endswith('deformed_KP'):
# Get deformed positions
deformed_KP = op.outputs[0]
# Layer index
layer = int(op.name.split('/')[1].split('_')[-1])
# Radius of deformed convolution for this layer
KP_extent = self.config.first_subsampling_dl * self.config.KP_extent * (2 ** layer)
# Normalized KP locations
KP_locs = deformed_KP / KP_extent
# Point should not be close to each other
for i in range(self.config.num_kernel_points):
other_KP = tf.stop_gradient(tf.concat([KP_locs[:, :i, :], KP_locs[:, i + 1:, :]], axis=1))
distances = tf.sqrt(1e-10 + tf.reduce_sum(tf.square(other_KP - KP_locs[:, i:i + 1, :]), axis=2))
repulsive_losses = tf.reduce_sum(tf.square(tf.maximum(0.0, 1.5 - distances)), axis=1)
offset_losses += [tf.reduce_mean(repulsive_losses)]
elif self.config.offsets_loss != 'none':
raise ValueError('Unknown offset loss')
if len(offset_losses) > 0:
self.offsets_loss = self.config.offsets_decay * tf.add_n(offset_losses)
else:
self.offsets_loss = tf.constant(0, dtype=tf.float32)
return self.offsets_loss + self.gaussian_loss + self.regularization_loss
def parameters_log(self):
self.config.save(self.saving_path)
|
import glob
import os, sys
import pickle
import numpy as np
from codecs import open as codecs_open
import re
from collections import Counter, defaultdict
UNK_TOKEN = '<unk>' # unknown word
PAD_TOKEN = '<pad>' # pad symbol
WS_TOKEN = '<ws>' # white space (for character embeddings)
RANDOM_SEED = 1234
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
def mkdir_p(path):
try:
if os.path.isdir(path):
return
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
re_html = re.compile('<.*?>')
re_num = re.compile('[0-9]')
re_punct = re.compile('[.,:|/"_\[\]()]')
re_xling_symbols = re.compile('[♫♪%–]')
re_spaces = re.compile(' {2,}')
def sanitize_char(text):
text = re_html.sub('', text)
text = re_num.sub('', text)
text = re_punct.sub('', text)
text = re_xling_symbols.sub('', text)
text = re_spaces.sub('', text)
# text = re.sub('<.*?>', '', text) # html tags in subtitles
# text = re.sub('[0-9]', '', text) # arabic numbers
# text = re.sub('[.,:|/"_\[\]()]', '', text) # punctuations
# text = re.sub('[♫♪%–]', '', text) # cross-lingual symbols
# text = re.sub(' {2,}', ' ', text) # more than two consective spaces
return text.strip().lower()
def char_tokenizer(text):
seq = list(sanitize_char(text))
seq = [(WS_TOKEN if x == ' ' else x) for x in seq]
return seq
def load_vocab(filename, encoding='utf-8'):
dic = dict()
with codecs_open(filename, 'r', encoding=encoding) as f:
for i, line in enumerate(f.readlines()):
if len(line.strip('\n')) > 0:
dic[line.strip('\n')] = i
return dic
def load_embedding(emb_file, vocab_file, vocab_size, encoding='utf-8'):
import gensim
print('Reading pretrained word vectors from file ...')
word2id = load_vocab(vocab_file)
word_vecs = gensim.models.KeyedVectors.load_word2vec_format(emb_file, encoding=encoding, binary=False)
emb_size = word_vecs.syn0.shape[1]
embedding = np.zeros((vocab_size, emb_size))
for word, j in word2id:
j = word2id[word]
if j < vocab_size:
if word in word_vecs:
embedding[j, :] = word_vecs[word]
else:
embedding[j, :] = np.random.uniform(-0.25, 0.25, emb_size)
print('Generated embeddings with shape ' + str(embedding.shape))
return embedding
def load_unicode_block():
"""
unicode block name table downloaded from
https://en.wikipedia.org/wiki/Unicode_block
"""
ret = []
path = os.path.join(THIS_DIR, 'unicode_block.tsv')
with open(path, 'r') as f:
for line in f.readlines():
l = line.strip('\n').split('\t', 1)
m = re.match(r"(U\+[A-F0-9]{4})\t(U\+[A-F0-9]{4})", l[1])
if m:
g = m.group(1, 2)
start = (g[0][2:]).encode('unicode_escape').decode('utf-8')
end = (g[1][2:]).encode('unicode_escape').decode('utf-8')
c = re.compile('[%s-%s]' % (start, end))
ret.append((l[0], c))
return ret
def save_vocab(data_dir, dic, max_size, encoding='utf-8'):
# save vocab
vocab_file = os.path.join(data_dir, 'vocab.txt')
with codecs_open(vocab_file, 'w', encoding=encoding) as f:
for char, idx in sorted(dic.items(), key=lambda x: x[1])[:max_size]:
f.write(char + '\n')
# save metadata
unicode_block = load_unicode_block()
def script(char):
if char not in [UNK_TOKEN, PAD_TOKEN, WS_TOKEN]:
for key, c in unicode_block:
if c.match(char):
category = key
if category in ['Common', 'Inherited']:
category = 'Others'
return category
return 'Others'
meta_file = os.path.join(data_dir, 'metadata.tsv')
with codecs_open(meta_file, 'w', encoding=encoding) as f:
f.write('Char\tScript\n')
for char, idx in sorted(dic.items(), key=lambda x: x[1])[:max_size]:
f.write(char + '\t' + script(char) + '\n')
def save(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
def latest_file(filepath):
files = glob.glob(filepath + "*")
last_file = max(files, key=os.path.getctime)
return last_file
def load(path):
out = None
with open(path, 'rb') as f:
out = pickle.load(f)
return out
class VocabLoader(object):
"""Load vocabulary"""
def __init__(self, data_dir):
self.word2id = None
self.max_sent_len = None
self.class_names = None
self.restore(data_dir)
def restore(self, data_dir):
from utils import load
class_file = os.path.join(data_dir, 'preprocess.cPickle')
restore_params = load(class_file)
self.class_names = restore_params['class_names']
self.max_sent_len = restore_params['max_sent_len']
print('Loaded target classes (length %d).' % len(self.class_names))
vocab_file = os.path.join(data_dir, 'vocab.txt')
self.word2id = load_vocab(vocab_file)
print('Loaded vocabulary (size %d).' % len(self.word2id))
def text2id(self, raw_text, auto_trim=True):
"""
Generate id data from one raw sentence
"""
if not self.max_sent_len:
raise Exception('max_sent_len is not set.')
if not self.word2id:
raise Exception('word2id is not set.')
max_sent_len = self.max_sent_len
toks = char_tokenizer(raw_text)
toks_len = len(toks)
pad_left = 0
pad_right = 0
if toks_len <= max_sent_len:
pad_left = int(int((max_sent_len - toks_len)) / int(2))
pad_right = int(np.ceil((max_sent_len - toks_len) / 2.0))
else:
if auto_trim:
toks = toks[:max_sent_len]
toks_len = len(toks)
else:
return None
toks_ids = [1 for _ in range(pad_left)] + \
[self.word2id[t] if t in self.word2id else 0 for t in toks] + \
[1 for _ in range(pad_right)]
return toks_ids
class TextReader(object):
"""Read raw text"""
def __init__(self, data_dir, class_names):
self.data_dir = data_dir
self.class_names = list(set(class_names))
self.num_classes = len(set(class_names))
self.data_files = None
self.init()
def init(self):
if not os.path.exists(self.data_dir):
sys.exit('Data directory does not exist.')
self.set_filenames()
def set_filenames(self):
data_files = {}
for f in os.listdir(self.data_dir):
f = os.path.join(self.data_dir, f)
if os.path.isfile(f):
chunks = f.split('.')
class_name = chunks[-1]
if class_name in self.class_names:
data_files[class_name] = f
assert data_files
self.data_files = data_files
def prepare_dict(self, vocab_size=10000, encoding='utf-8'):
max_sent_len = 0
c = Counter()
# store the preprocessed raw text to avoid cleaning it again
self.tok_text = defaultdict(list)
for label in self.data_files:
f = self.data_files[label]
with codecs_open(f, 'r', encoding=encoding) as infile:
for line in infile:
toks = char_tokenizer(line)
if len(toks) > max_sent_len:
max_sent_len = len(toks)
for t in toks:
c[t] += 1
self.tok_text[label].append(' '.join(toks))
total_words = len(c)
assert total_words >= vocab_size
word_list = [p[0] for p in c.most_common(vocab_size - 2)]
word_list.insert(0, PAD_TOKEN)
word_list.insert(0, UNK_TOKEN)
self.word2freq = c
self.word2id = dict()
self.max_sent_len = max_sent_len
for idx, w in enumerate(word_list):
self.word2id[w] = idx
save_vocab(self.data_dir, self.word2id, vocab_size)
print('%d words found in training set. Truncated to vocabulary size %d.' % (total_words, vocab_size))
print('Max sentence length in data is %d.' % (max_sent_len))
return
def generate_id_data(self):
self.id_text = defaultdict(list)
for label in self.tok_text:
sequences = self.tok_text[label]
for seq in sequences:
toks = seq.split()
toks_len = len(toks)
pad_left = 0
if toks_len <= self.max_sent_len:
pad_left = int((self.max_sent_len - toks_len) / 2)
pad_right = int(np.ceil((self.max_sent_len - toks_len) / 2.0))
else:
continue
toks_ids = [1 for _ in range(pad_left)] \
+ [self.word2id[t] if t in self.word2id else 0 for t in toks] \
+ [1 for _ in range(pad_right)]
self.id_text[label].append(toks_ids)
return
def shuffle_and_split(self, test_size=50, shuffle=True):
train_x = []
train_y = []
test_x = []
test_y = []
for label in self.id_text:
sequences = self.id_text[label]
length = len(sequences)
train_size = length - test_size
if shuffle:
np.random.seed(RANDOM_SEED)
permutation = np.random.permutation(length)
sequences = [sequences[i] for i in permutation]
# one-hot encoding
label_id = [0] * self.num_classes
label_id[self.class_names.index(label)] = 1
test_x.extend(sequences[train_size:])
test_y.extend([label_id for _ in range(test_size)])
train_x.extend(sequences[:train_size])
train_y.extend([label_id for _ in range(train_size)])
assert len(train_x) == len(train_y)
assert len(test_x) == len(test_y)
train_path = os.path.join(self.data_dir, 'train.cPickle')
test_path = os.path.join(self.data_dir, 'test.cPickle')
save((train_x, train_y), train_path)
save((test_x, test_y), test_path)
print('Split dataset into train/test set: %d for training, %d for evaluation.' % (len(train_y), len(test_y)))
return len(train_y), len(test_y)
def prepare_data(self, vocab_size=10000, test_size=50, shuffle=True):
# test_size <- per class
self.prepare_dict(vocab_size)
self.generate_id_data()
train_size, test_size = self.shuffle_and_split(test_size, shuffle)
# test_size <- total
preprocess_log = {
'vocab_size': vocab_size,
'class_names': self.class_names,
'max_sent_len': self.max_sent_len,
'test_size': test_size,
'train_size': train_size
}
preprocess_path = os.path.join(self.data_dir, 'preprocess.cPickle')
save(preprocess_log, preprocess_path)
return
class DataLoader(object):
"""Load preprocessed data"""
def __init__(self, data_dir, filename, batch_size=100, shuffle=True):
from utils import load
self._x = None
self._y = None
self.shuffle = shuffle
self.load_and_shuffle(data_dir, filename)
self._pointer = 0
self._num_examples = len(self._x)
self.batch_size = batch_size if batch_size > 0 else self._num_examples
self.num_batch = int(np.ceil(self._num_examples / float(self.batch_size)))
self.sent_len = len(self._x[0])
self.num_classes = len(self._y[0])
self.class_names = load(os.path.join(data_dir, 'preprocess.cPickle'))['class_names']
assert len(self.class_names) == self.num_classes
print('Loaded target classes (length %d).' % len(self.class_names))
print('Loaded data with %d examples. %d examples per batch will be used.' % \
(self._num_examples, self.batch_size))
def load_and_shuffle(self, data_dir, filename):
from utils import load
_x, _y = load(os.path.join(data_dir, filename))
assert len(_x) == len(_y)
if self.shuffle:
np.random.seed(RANDOM_SEED)
permutation = np.random.permutation(len(_y))
_x = np.array(_x)[permutation]
_y = np.array(_y)[permutation]
self._x = np.array(_x)
self._y = np.array(_y)
return
def next_batch(self):
if self.batch_size + self._pointer >= self._num_examples:
batch_x, batch_y = self._x[self._pointer:], self._y[self._pointer:]
return batch_x, batch_y
self._pointer += self.batch_size
return (self._x[self._pointer - self.batch_size:self._pointer],
self._y[self._pointer - self.batch_size:self._pointer])
def reset_pointer(self):
self._pointer = 0
if self.shuffle:
np.random.seed(RANDOM_SEED)
permutation = np.random.permutation(self._num_examples)
self._x = self._x[permutation]
self._y = self._y[permutation]
|
from collections import defaultdict
from pandas import DataFrame
from diagrams.base import *
def format_time(data, column_name):
data[column_name + "_f"] = (data[column_name] / 1000).round(2)
return data
def add_highest_scheduling_difference(data):
column_names = data.columns.values
scheduled_columns = list(filter(lambda s: s.startswith("Scheduled"), column_names))
def applyFunc(r):
scheduled = get_values(r, scheduled_columns)
scheduled = list(filter(lambda v: v != 0, scheduled))
return max(scheduled) - min(scheduled)
data["scheduling_delay"] = data.apply(applyFunc, axis=1)
return data
def add_end_time_difference(data):
column_names = data.columns.values
end_columns = list(filter(lambda s: s.startswith("AlgoEnd"), column_names))
def Max(r):
ends = get_values(r, end_columns)
ends = list(filter(lambda v: v != 0, ends))
return max(ends)
def Min(r):
ends = get_values(r, end_columns)
ends = list(filter(lambda v: v != 0, ends))
return min(ends)
def average(r):
ends = get_values(r, end_columns)
ends = list(filter(lambda v: v != 0, ends))
return sum(ends) / len(ends)
data["max_end"] = data.apply(Max, axis=1)
data["min_end"] = data.apply(Min, axis=1)
data["avg_end"] = data.apply(average, axis=1)
data["skew"] = data["max_end"] - data["avg_end"]
data["skew-rel"] = data["skew"] / data["wcoj_time"]
return data
def add_taks_skew(data):
column_names = data.columns.values
tasks_columns = list(filter(lambda s: s.startswith("Tasks"), column_names))
def applyFunc(r):
ends = get_values(r, tasks_columns)
ends = list(filter(lambda v: v != 0, ends))
return max(ends) / min(ends)
def max_index(r):
ends = get_values(r, tasks_columns)
ends = list(filter(lambda v: v != 0, ends))
max_v = max(ends)
max_i = ends.index(max_v)
return max_i
def Max(r):
ends = get_values(r, tasks_columns)
ends = list(filter(lambda v: v != 0, ends))
return max(ends)
def Min(r):
ends = get_values(r, tasks_columns)
ends = list(filter(lambda v: v != 0, ends))
return min(ends)
data["task_skew"] = data.apply(applyFunc, axis=1)
data["most_tasks"] = data.apply(max_index, axis=1)
data["max_tasks"] = data.apply(Max, axis=1)
data["min_tasks"] = data.apply(Min, axis=1)
return data
def read_data(csv_file):
data = pd.read_csv(csv_file, sep=",", comment="#")
fix_count(data)
split_partitioning(data)
data = data[data["partitioning_base"] == WORKSTEALING]
data = add_wcoj_time(data)
data = add_end_time_difference(data)
return data
def output_table(output_filename, data):
grouped = data.groupby(["Query", "Parallelism"])
median = grouped.median()
parallelism = [16, 32, 48, 64, 96]
queries = ["3-clique", "4-clique", "5-clique"]
columns = ["Query"]
for p in parallelism:
columns.append("skew-%i" % p)
rows = defaultdict(lambda: list())
for q in queries:
rows["Query"].append(q)
for p in parallelism:
rows["skew-%i" % p].append("%.1f / %.2f" % (median["skew"][q][p] / 1000, median["skew-rel"][q][p] * 100))
table_frame = DataFrame(rows)
table_frame.to_latex(buf=open(output_filename, "w"),
columns=columns, header=["Query"] + list(map(lambda p: "%i [s] / [%%]" % p, parallelism)),
index=False)
DATASET_LIVEJ = DATASET_FOLDER + "final/graphWCOJ-scaling/liveJ-scaling.csv"
DATASET_ORKUT = DATASET_FOLDER + "final/graphWCOJ-scaling/orkut-scaling.csv"
data = read_data(DATASET_ORKUT)
output_table(GENERATED_PATH + "skew-orkut.tex", data)
data = read_data(DATASET_LIVEJ)
output_table(GENERATED_PATH + "skew-liveJ.tex", data)
# for i in range(0, 96):
# data = format_time(data, "worker-time-" + str(i))
#
# worker_times = data[data.columns.intersection(list(filter(lambda n: n.startswith("worker-time-") and n.endswith("_f"),
# data.columns.values)))]
# worker_times.to_csv("worker-times.csv")
# p = 48
# f = data[data["Query"] == "5-clique"]
# f = f[f["Parallelism"] == p]
# f = f[f["partitioning_base"] == WORKSTEALING]
#
# x = list(range(p))
# keys = f.keys().get_values()
# for r1 in f.itertuples():
# print("row")
# r = dict(zip(keys, r1[1:]))
# duration = r["max_end"] - r["min_end"]
# if (duration != 0):
# # tasks_range = r["max_tasks"] - r["min_tasks"]
# print(duration)
# # print(tasks_range)
# time_ratios = []
# # task_ratios = []
# for w in range(p):
# time_ratios.append(float(r["AlgoEnd-%i" % w] - r["min_end"]) / duration)
# # task_ratios.append(float(r["Tasks-%i" % w] - r["min_tasks"]) / tasks_range)
#
# # ratios = zip(time_ratios, task_ratios)
# ratios = sorted(time_ratios)
# plt.scatter(x, ratios, marker="d")
# # plt.scatter(x, list(map(lambda t: t[1], ratios)), marker="o")
#
#
# plt.tight_layout()
# plt.show()
|
from .models import User
from django.utils.translation import gettext, gettext_lazy as _
from django import forms
from django.contrib.auth.forms import ReadOnlyPasswordHashField
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Password confirmation',
widget=forms.PasswordInput
)
class Meta:
model = User
fields = ('email', 'phone')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField(
label=_("Password"),
help_text=_(
'Raw passwords are not stored, so there is no way to see this '
'user’s password, but you can change the password using '
'<a href=\"../password/\">this form</a>.'
),
)
class Meta:
model = User
fields = ('email', 'phone')
def clean_password(self):
return self.initial["password"]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__title__ = ''
__author__ = 'xuzhao'
__email__ = 'xuzhao@zhique.design'
from django.contrib.auth import get_user_model
from rest_framework import serializers
User = get_user_model()
LOGIN_TYPE = (
('account', '账户密码'),
('email', '邮箱验证码')
)
class UserSerializer(serializers.ModelSerializer):
"""用户模列化"""
class Meta:
model = User
exclude = ('password',)
read_only_fields = ('avatar', 'last_login', 'last_login_ip', 'active')
|
'''
Created on Mar 12, 2019
@author: lhadhazy
'''
from sklearn.base import TransformerMixin
from sklearn import preprocessing
class StandardScaler(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
scaler = preprocessing.StandardScaler()
return scaler.fit_transform(X)
|
from django.shortcuts import get_object_or_404
from paypal.standard.models import ST_PP_COMPLETED
from paypal.standard.ipn.signals import invalid_ipn_received
from orders.models import Order
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
from django.conf import settings
import weasyprint
from io import BytesIO
# TODO: still has to check what this do and how
def payment_notification(sender, **kwargs):
ipn_object = sender
if ipn_object.payment_status == ST_PP_COMPLETED:
# payment was successful
order = get_object_or_404(Order, id=ipn_object.invoice)
# mark the order as paid
order.paid = True
order.save()
# create invoice e - mail
subject = 'My Shop - Invoice no. {}'.format(order.id)
message = 'Please, find attached the invoice for your recent purchase.'
email = EmailMessage(subject,
message,
'devenc234@gmail.com',
[order.email])
# generate PDF
html = render_to_string('orders/order/pdf.html', {'order': order})
out = BytesIO()
stylesheets = [weasyprint.CSS(settings.STATIC_ROOT + 'css/pdf.css')]
weasyprint.HTML(string=html).write_pdf(out,
stylesheets=stylesheets)
# attach PDF file
email.attach('order_{}.pdf'.format(order.id),
out.getvalue(),
'application/pdf')
# send e-mail
email.send()
invalid_ipn_received.connect(payment_notification)
|
"""
"""
import time
import numpy as np
import matplotlib.pyplot as plt
import os
os.chdir("/Users/wham/Dropbox/_code/python/vessel_segmentation")
import vessel_sim_commands as vsm
import tree_processing as tp
import sklearn.decomposition as decomp
data_objects = []
data_cols = []
counter = 0
num_samples = 5
num_pts_list = [100,200,300]
data_cols_list = ["r","g","b"]
m_fig, m_ax = plt.subplots(nrows= 3,ncols = 3)
for i in range(len(num_pts_list)):
for k in range(num_samples):
num_pts = num_pts_list[i]
#run the simulation
print("starting sim {}/{}".format(counter,300))
tic = time.time()
max_iter = 1000
num_iter = 1
growth_type_para = 1
if growth_type_para:
growth_type = "average"
else:
growth_type = "nearest"
fovea_radius = 0.2;lens_depth = 0.3
D_step_para = 0.3
shell_vol = 4.*np.pi*0.5
approx_cover_rad = 0.1*np.sqrt((shell_vol/num_pts)*(3./4.)/np.pi)
weighted_para = False
tic = time.time()
result = vsm.vascular_growth_sim(num_iterations = num_iter,noisy = False,fovea_radius = fovea_radius,lens_depth = lens_depth,max_iter = max_iter,init_num_pts = num_pts,inner_rad = 0.7,outer_rad = 1.2, growth_type = growth_type,weighted_stepsizes = weighted_para, D_step = D_step_para,death_dist = approx_cover_rad)
toc = time.time()
print("time to complete sim with {} pts, growth type {}, and {} iters was: {:.2f}".format(num_pts,growth_type,max_iter,toc-tic))
print("step size was {:.2f}".format(D_step_para))
#save the data, draw the picture
pts = vsm.convert_from_product(result[0])
init_sample = vsm.convert_from_product(result[-1])/1.2
branches = result[1]
branch_membership = result[2]
new_branches, new_branch_membership = vsm.restrict_branches(pts,branches,branch_membership)
vein_radii = vsm.get_vein_radii(len(pts),new_branches,init_radii = 0.2,branch_power = 3)
#save the first three sims
if k <= 2:
#draw the image
for br in new_branches:
#isolate the branch pieces below the xy axes
if len(br)>0:
m_ax[i,k].plot(pts[br,0],pts[br,1],c="k",linewidth = np.mean(vein_radii[br]))
#rescale everything
m_ax[i,k].set_xlim([-1.0,1.0])
m_ax[i,k].set_ylim([-1.0,1.0])
#take away boundary buffers?
m_ax[i,k].axis('off')
c_circle = [0.6/1.2,0.0]; r_circle = 0.15
plot_pts = np.array([[r_circle*np.cos(t)+c_circle[0],r_circle*np.sin(t)+c_circle[1]] for t in np.linspace(-np.pi,np.pi,100)])
m_ax[i,k].plot(plot_pts[:,0],plot_pts[:,1])
#run the TMD filtration
tree = tp.sim_to_tree(pts,branches)
tree_inout = tp.tree_to_inout_nbrs(tree)
leaves = tp.get_leaves(tree_inout)
#getting the radial distance to the root
f_vals_test = [np.linalg.norm(tree_inout[0][1] - obj[1]) for obj in tree_inout]
tmd_test = tp.TMD(tree_inout, f_vals_test)
bc = np.array(tmd_test)
test_im = tp.unweighted_persistent_image(tmd_test, sigma = 0.05, bounds = [[0.,np.max(bc)],[0.,np.max(bc)]],num_grid_pts_edge = 40)
data_objects.append(test_im)
data_cols.append(data_cols_list[i])
#update the counter for our own keeping track purposes
counter +=1
#show the sample images
plt.show()
flattened_data = [np.ravel(obj) for obj in data_objects]
flattened_data_mean = sum([np.ravel(obj) for obj in data_objects])
flattened_data = [obj - flattened_data_mean for obj in flattened_data]
#run PCA on the persistent surfaces
pca_embed = decomp.PCA(n_components = 2)
pca2 = pca_embed.fit_transform(np.array(flattened_data))
plt.scatter(pca2[:,0],pca2[:,1],c = data_cols); plt.show()
#visualize some of the heat maps
m_fig, m_ax = plt.subplots(nrows= 3,ncols = 3)
for i in range(len(num_pts_list)):
for k in range(3):
m_ax[i,k].imshow(data_objects[i*num_samples + k])
plt.show()
|
"""Test for the testing module"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from pytest import raises
from imblearn.base import SamplerMixin
from imblearn.utils.testing import all_estimators
from imblearn.utils.testing import warns
def test_all_estimators():
# check if the filtering is working with a list or a single string
type_filter = 'sampler'
all_estimators(type_filter=type_filter)
type_filter = ['sampler']
estimators = all_estimators(type_filter=type_filter)
for estimator in estimators:
# check that all estimators are sampler
assert issubclass(estimator[1], SamplerMixin)
# check that an error is raised when the type is unknown
type_filter = 'rnd'
with raises(ValueError, match="Parameter type_filter must be 'sampler'"):
all_estimators(type_filter=type_filter)
def test_warns():
import warnings
with warns(UserWarning, match=r'must be \d+$'):
warnings.warn("value must be 42", UserWarning)
with raises(AssertionError, match='pattern not found'):
with warns(UserWarning, match=r'must be \d+$'):
warnings.warn("this is not here", UserWarning)
with warns(UserWarning, match=r'aaa'):
warnings.warn("cccccccccc", UserWarning)
warnings.warn("bbbbbbbbbb", UserWarning)
warnings.warn("aaaaaaaaaa", UserWarning)
a, b, c = ('aaa', 'bbbbbbbbbb', 'cccccccccc')
expected_msg = "'{}' pattern not found in \['{}', '{}'\]".format(a, b, c)
with raises(AssertionError, match=expected_msg):
with warns(UserWarning, match=r'aaa'):
warnings.warn("bbbbbbbbbb", UserWarning)
warnings.warn("cccccccccc", UserWarning)
|
"""
Constants
~~~~~~~~~
Constants and translations used in WoT replay files and the API.
"""
MAP_EN_NAME_BY_ID = {
"01_karelia": "Karelia",
"02_malinovka": "Malinovka",
"04_himmelsdorf": "Himmelsdorf",
"05_prohorovka": "Prokhorovka",
"07_lakeville": "Lakeville",
"06_ensk": "Ensk",
"11_murovanka": "Murovanka",
"13_erlenberg": "Erlenberg",
"10_hills": "Mines",
"15_komarin": "Komarin",
"18_cliff": "Cliff",
"19_monastery": "Abbey",
"28_desert": "Sand River",
"35_steppes": "Steppes",
"37_caucasus": "Mountain Pass",
"33_fjord": "Fjords",
"34_redshire": "Redshire",
"36_fishing_bay": "Fisherman's Bay",
"38_mannerheim_line": "Arctic Region",
"08_ruinberg": "Ruinberg",
"14_siegfried_line": "Siegfried Line",
"22_slough": "Swamp",
"23_westfeld": "Westfield",
"29_el_hallouf": "El Halluf",
"31_airfield": "Airfield",
"03_campania": "Province",
"17_munchen": "Widepark",
"44_north_america": "Live Oaks",
"39_crimea": "South Coast",
"45_north_america": "Highway",
"42_north_america": "Port",
"51_asia": "Dragon Ridge",
"47_canada_a": "Serene Coast",
"85_winter": "Belogorsk-19",
"73_asia_korea": "Sacred Valley",
"60_asia_miao": "Pearl River",
"00_tank_tutorial": "Training area",
"86_himmelsdorf_winter": "Himmelsdorf Winter",
"87_ruinberg_on_fire": "Ruinberg on Fire",
"63_tundra": "Tundra",
"84_winter": "Windstorm",
"83_kharkiv": "Kharkov"
}
WOT_TANKS = {
u'A-20': {'tier': 4},
u'A-32': {'tier': 4},
u'A104_M4A3E8A': {'tier': 6},
u'A43': {'tier': 6},
u'A44': {'tier': 7},
u'AMX38': {'tier': 3},
u'AMX40': {'tier': 4},
u'AMX50_Foch': {'tier': 9},
u'AMX_105AM': {'tier': 5},
u'AMX_12t': {'tier': 6},
u'AMX_13F3AM': {'tier': 6},
u'AMX_13_75': {'tier': 7},
u'AMX_13_90': {'tier': 8},
u'AMX_50Fosh_155': {'tier': 10},
u'AMX_50_100': {'tier': 8},
u'AMX_50_120': {'tier': 9},
u'AMX_50_68t': {'tier': 10},
u'AMX_AC_Mle1946': {'tier': 7},
u'AMX_AC_Mle1948': {'tier': 8},
u'AMX_M4_1945': {'tier': 7},
u'AMX_Ob_Am105': {'tier': 4},
u'ARL_44': {'tier': 6},
u'ARL_V39': {'tier': 6},
u'AT-1': {'tier': 2},
u'Auf_Panther': {'tier': 7},
u'B-1bis_captured': {'tier': 4},
u'B1': {'tier': 4},
u'BDR_G1B': {'tier': 5},
u'BT-2': {'tier': 2},
u'BT-7': {'tier': 3},
u'BT-SV': {'tier': 3},
u'Bat_Chatillon155': {'tier': 10},
u'Bat_Chatillon155_55': {'tier': 9},
u'Bat_Chatillon25t': {'tier': 10},
u'Bison_I': {'tier': 3},
u'Ch01_Type59': {'tier': 8},
u'Ch02_Type62': {'tier': 7},
u'Ch04_T34_1': {'tier': 7},
u'Ch05_T34_2': {'tier': 8},
u'Ch06_Renault_NC31': {'tier': 1},
u'Ch07_Vickers_MkE_Type_BT26': {'tier': 2},
u'Ch08_Type97_Chi_Ha': {'tier': 3},
u'Ch09_M5': {'tier': 4},
u'Ch10_IS2': {'tier': 7},
u'Ch11_110': {'tier': 8},
u'Ch12_111_1_2_3': {'tier': 9},
u'Ch14_T34_3': {'tier': 8},
u'Ch15_59_16': {'tier': 6},
u'Ch16_WZ_131': {'tier': 7},
u'Ch17_WZ131_1_WZ132': {'tier': 8},
u'Ch18_WZ-120': {'tier': 9},
u'Ch19_121': {'tier': 10},
u'Ch20_Type58': {'tier': 6},
u'Ch21_T34': {'tier': 5},
u'Ch22_113': {'tier': 10},
u'Ch23_112': {'tier': 8},
u'Ch24_Type64': {'tier': 6},
u'Chi_Ha': {'tier': 3},
u'Chi_He': {'tier': 4},
u'Chi_Ni': {'tier': 2},
u'Chi_Nu': {'tier': 5},
u'Chi_Nu_Kai': {'tier': 5},
u'Chi_Ri': {'tier': 7},
u'Chi_To': {'tier': 6},
u'Churchill_LL': {'tier': 5},
u'D1': {'tier': 2},
u'D2': {'tier': 3},
u'DW_II': {'tier': 4},
u'DickerMax': {'tier': 6},
u'E-100': {'tier': 10},
u'E-25': {'tier': 7},
u'E-50': {'tier': 9},
u'E-75': {'tier': 9},
u'E50_Ausf_M': {'tier': 10},
u'ELC_AMX': {'tier': 5},
u'FCM_36Pak40': {'tier': 3},
u'FCM_50t': {'tier': 8},
u'Ferdinand': {'tier': 8},
u'G101_StuG_III': {'tier': 4},
u'G103_RU_251': {'tier': 8},
u'G20_Marder_II': {'tier': 3},
u'GAZ-74b': {'tier': 4},
u'GB01_Medium_Mark_I': {'tier': 1},
u'GB03_Cruiser_Mk_I': {'tier': 2},
u'GB04_Valentine': {'tier': 4},
u'GB05_Vickers_Medium_Mk_II': {'tier': 2},
u'GB06_Vickers_Medium_Mk_III': {'tier': 3},
u'GB07_Matilda': {'tier': 4},
u'GB08_Churchill_I': {'tier': 5},
u'GB09_Churchill_VII': {'tier': 6},
u'GB10_Black_Prince': {'tier': 7},
u'GB11_Caernarvon': {'tier': 8},
u'GB12_Conqueror': {'tier': 9},
u'GB13_FV215b': {'tier': 10},
u'GB20_Crusader': {'tier': 5},
u'GB21_Cromwell': {'tier': 6},
u'GB22_Comet': {'tier': 7},
u'GB23_Centurion': {'tier': 8},
u'GB24_Centurion_Mk3': {'tier': 9},
u'GB25_Loyd_Carrier': {'tier': 2},
u'GB26_Birch_Gun': {'tier': 4},
u'GB27_Sexton': {'tier': 3},
u'GB28_Bishop': {'tier': 5},
u'GB29_Crusader_5inch': {'tier': 7},
u'GB30_FV3805': {'tier': 9},
u'GB31_Conqueror_Gun': {'tier': 10},
u'GB32_Tortoise': {'tier': 9},
u'GB39_Universal_CarrierQF2': {'tier': 2},
u'GB40_Gun_Carrier_Churchill': {'tier': 6},
u'GB42_Valentine_AT': {'tier': 3},
u'GB48_FV215b_183': {'tier': 10},
u'GB51_Excelsior': {'tier': 5},
u'GB57_Alecto': {'tier': 4},
u'GB58_Cruiser_Mk_III': {'tier': 2},
u'GB59_Cruiser_Mk_IV': {'tier': 3},
u'GB60_Covenanter': {'tier': 4},
u'GB63_TOG_II': {'tier': 6},
u'GB68_Matilda_Black_Prince': {'tier': 5},
u'GB69_Cruiser_Mk_II': {'tier': 3},
u'GB70_FV4202_105': {'tier': 10},
u'GB71_AT_15A': {'tier': 7},
u'GB72_AT15': {'tier': 8},
u'GB73_AT2': {'tier': 5},
u'GB74_AT8': {'tier': 6},
u'GB75_AT7': {'tier': 7},
u'GB76_Mk_VIC': {'tier': 2},
u'GB77_FV304': {'tier': 6},
u'GB78_Sexton_I': {'tier': 3},
u'GB79_FV206': {'tier': 8},
u'GW_Mk_VIe': {'tier': 2},
u'GW_Tiger_P': {'tier': 8},
u'G_E': {'tier': 10},
u'G_Panther': {'tier': 7},
u'G_Tiger': {'tier': 9},
u'Grille': {'tier': 5},
u'H39_captured': {'tier': 2},
u'Ha_Go': {'tier': 2},
u'Hetzer': {'tier': 4},
u'Hummel': {'tier': 6},
u'IS': {'tier': 7},
u'IS-3': {'tier': 8},
u'IS-4': {'tier': 10},
u'IS-6': {'tier': 8},
u'IS-7': {'tier': 10},
u'IS8': {'tier': 9},
u'ISU-152': {'tier': 8},
u'Indien_Panzer': {'tier': 8},
u'JagdPanther': {'tier': 7},
u'JagdPantherII': {'tier': 8},
u'JagdPzIV': {'tier': 6},
u'JagdPz_E100': {'tier': 10},
u'JagdTiger': {'tier': 9},
u'JagdTiger_SdKfz_185': {'tier': 8},
u'KV-13': {'tier': 7},
u'KV-1s': {'tier': 5},
u'KV-220': {'tier': 5},
u'KV-220_test': {'tier': 5},
u'KV-3': {'tier': 7},
u'KV-5': {'tier': 8},
u'KV1': {'tier': 5},
u'KV2': {'tier': 6},
u'KV4': {'tier': 8},
u'Ke_Ho': {'tier': 4},
u'Ke_Ni': {'tier': 3},
u'LTP': {'tier': 3},
u'Leopard1': {'tier': 10},
u'Lorraine155_50': {'tier': 7},
u'Lorraine155_51': {'tier': 8},
u'Lorraine39_L_AM': {'tier': 3},
u'Lorraine40t': {'tier': 9},
u'Lowe': {'tier': 8},
u'Ltraktor': {'tier': 1},
u'M103': {'tier': 9},
u'M10_Wolverine': {'tier': 5},
u'M12': {'tier': 7},
u'M18_Hellcat': {'tier': 6},
u'M22_Locust': {'tier': 3},
u'M24_Chaffee': {'tier': 5},
u'M24_Chaffee_GT': {'tier': 1},
u'M2_lt': {'tier': 2},
u'M2_med': {'tier': 3},
u'M36_Slagger': {'tier': 6},
u'M37': {'tier': 4},
u'M3_Grant': {'tier': 4},
u'M3_Stuart': {'tier': 3},
u'M3_Stuart_LL': {'tier': 3},
u'M40M43': {'tier': 8},
u'M41': {'tier': 5},
u'M41_Bulldog': {'tier': 7},
u'M46_Patton': {'tier': 9},
u'M48A1': {'tier': 10},
u'M4A2E4': {'tier': 5},
u'M4A3E8_Sherman': {'tier': 6},
u'M4_Sherman': {'tier': 5},
u'M53_55': {'tier': 9},
u'M5_Stuart': {'tier': 4},
u'M6': {'tier': 6},
u'M60': {'tier': 10},
u'M6A2E1': {'tier': 8},
u'M7_Priest': {'tier': 3},
u'M7_med': {'tier': 5},
u'M8A1': {'tier': 4},
u'MS-1': {'tier': 1},
u'MT25': {'tier': 6},
u'Marder_III': {'tier': 4},
u'Matilda_II_LL': {'tier': 5},
u'Maus': {'tier': 10},
u'NC27': {'tier': 1},
u'Nashorn': {'tier': 6},
u'Object263': {'tier': 10},
u'Object268': {'tier': 10},
u'Object416': {'tier': 8},
u'Object_140': {'tier': 10},
u'Object_212': {'tier': 9},
u'Object_261': {'tier': 10},
u'Object_430': {'tier': 10},
u'Object_704': {'tier': 9},
u'Object_907': {'tier': 10},
u'Panther_II': {'tier': 8},
u'Panther_M10': {'tier': 7},
u'PanzerJager_I': {'tier': 2},
u'Pershing': {'tier': 8},
u'Pro_Ag_A': {'tier': 9},
u'Pz35t': {'tier': 2},
u'Pz38_NA': {'tier': 4},
u'Pz38t': {'tier': 3},
u'PzI': {'tier': 2},
u'PzII': {'tier': 2},
u'PzIII_A': {'tier': 3},
u'PzIII_AusfJ': {'tier': 4},
u'PzIII_IV': {'tier': 5},
u'PzII_J': {'tier': 3},
u'PzII_Luchs': {'tier': 4},
u'PzIV_Hydro': {'tier': 5},
u'PzIV_schmalturm': {'tier': 6},
u'PzI_ausf_C': {'tier': 3},
u'PzV': {'tier': 7},
u'PzVI': {'tier': 7},
u'PzVIB_Tiger_II': {'tier': 8},
u'PzVI_Tiger_P': {'tier': 7},
u'PzV_PzIV': {'tier': 6},
u'PzV_PzIV_ausf_Alfa': {'tier': 6},
u'Pz_II_AusfG': {'tier': 3},
u'Pz_IV_AusfA': {'tier': 3},
u'Pz_IV_AusfD': {'tier': 4},
u'Pz_IV_AusfH': {'tier': 5},
u'Pz_Sfl_IVb': {'tier': 4},
u'Pz_Sfl_IVc': {'tier': 5},
u'R104_Object_430_II': {'tier': 9},
u'R106_KV85': {'tier': 6},
u'R107_LTB': {'tier': 7},
u'R109_T54S': {'tier': 8},
u'Ram-II': {'tier': 5},
u'RenaultBS': {'tier': 2},
u'RenaultFT': {'tier': 1},
u'RenaultFT_AC': {'tier': 2},
u'RenaultUE57': {'tier': 3},
u'RhB_Waffentrager': {'tier': 8},
u'S-51': {'tier': 7},
u'S35_captured': {'tier': 3},
u'STA_1': {'tier': 8},
u'ST_B1': {'tier': 10},
u'ST_I': {'tier': 9},
u'SU-100': {'tier': 6},
u'SU-101': {'tier': 8},
u'SU-14': {'tier': 8},
u'SU-152': {'tier': 7},
u'SU-18': {'tier': 2},
u'SU-26': {'tier': 3},
u'SU-5': {'tier': 4},
u'SU-76': {'tier': 3},
u'SU-8': {'tier': 6},
u'SU-85': {'tier': 5},
u'SU100M1': {'tier': 7},
u'SU100Y': {'tier': 6},
u'SU122A': {'tier': 5},
u'SU122_44': {'tier': 7},
u'SU122_54': {'tier': 9},
u'SU14_1': {'tier': 7},
u'SU_85I': {'tier': 5},
u'S_35CA': {'tier': 5},
u'Sherman_Jumbo': {'tier': 6},
u'Somua_Sau_40': {'tier': 4},
u'StuG_40_AusfG': {'tier': 5},
u'Sturer_Emil': {'tier': 7},
u'Sturmpanzer_II': {'tier': 4},
u'T-127': {'tier': 3},
u'T-15': {'tier': 3},
u'T-25': {'tier': 5},
u'T-26': {'tier': 2},
u'T-28': {'tier': 4},
u'T-34': {'tier': 5},
u'T-34-85': {'tier': 6},
u'T-43': {'tier': 7},
u'T-44': {'tier': 8},
u'T-46': {'tier': 3},
u'T-50': {'tier': 4},
u'T-54': {'tier': 9},
u'T-60': {'tier': 2},
u'T-70': {'tier': 3},
u'T110': {'tier': 10},
u'T110E3': {'tier': 10},
u'T110E4': {'tier': 10},
u'T14': {'tier': 5},
u'T150': {'tier': 6},
u'T18': {'tier': 2},
u'T1_Cunningham': {'tier': 1},
u'T1_E6': {'tier': 2},
u'T1_hvy': {'tier': 5},
u'T20': {'tier': 7},
u'T21': {'tier': 6},
u'T23E3': {'tier': 7},
u'T25_2': {'tier': 7},
u'T25_AT': {'tier': 7},
u'T26_E4_SuperPershing': {'tier': 8},
u'T28': {'tier': 8},
u'T28_Prototype': {'tier': 8},
u'T29': {'tier': 7},
u'T2_lt': {'tier': 2},
u'T2_med': {'tier': 2},
u'T30': {'tier': 9},
u'T32': {'tier': 8},
u'T34_hvy': {'tier': 8},
u'T37': {'tier': 6},
u'T40': {'tier': 4},
u'T49': {'tier': 8},
u'T54E1': {'tier': 9},
u'T57': {'tier': 2},
u'T57_58': {'tier': 10},
u'T62A': {'tier': 10},
u'T67': {'tier': 5},
u'T69': {'tier': 8},
u'T71': {'tier': 7},
u'T7_Combat_Car': {'tier': 2},
u'T80': {'tier': 4},
u'T82': {'tier': 3},
u'T92': {'tier': 10},
u'T95': {'tier': 9},
u'Te_Ke': {'tier': 2},
u'Tetrarch_LL': {'tier': 2},
u'Type_61': {'tier': 9},
u'VK1602': {'tier': 5},
u'VK2001DB': {'tier': 4},
u'VK2801': {'tier': 6},
u'VK3001H': {'tier': 5},
u'VK3001P': {'tier': 6},
u'VK3002DB': {'tier': 7},
u'VK3002DB_V1': {'tier': 6},
u'VK3002M': {'tier': 6},
u'VK3601H': {'tier': 6},
u'VK4502A': {'tier': 8},
u'VK4502P': {'tier': 9},
u'VK7201': {'tier': 10},
u'Valentine_LL': {'tier': 4},
u'Waffentrager_E100': {'tier': 10},
u'Waffentrager_IV': {'tier': 9},
u'Wespe': {'tier': 3},
u'_105_leFH18B2': {'tier': 5},
u'_Hotchkiss_H35': {'tier': 2},
u'_M44': {'tier': 6}
}
|
# coding: utf-8
"""
DedupeApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class DedupeApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_dedupe_dedupe_summary(self, **kwargs):
"""
Return summary information about dedupe.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_dedupe_dedupe_summary(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: DedupeDedupeSummary
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dedupe_dedupe_summary" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/dedupe/dedupe-summary'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DedupeDedupeSummary',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_dedupe_report(self, dedupe_report_id, **kwargs):
"""
Retrieve a report for a single dedupe job.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_dedupe_report(dedupe_report_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str dedupe_report_id: Retrieve a report for a single dedupe job. (required)
:param str scope: If specified as \"effective\" or not specified, all fields are returned. If specified as \"user\", only fields with non-default values are shown. If specified as \"default\", the original values are returned.
:return: DedupeReports
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dedupe_report_id', 'scope']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dedupe_report" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dedupe_report_id' is set
if ('dedupe_report_id' not in params) or (params['dedupe_report_id'] is None):
raise ValueError("Missing the required parameter `dedupe_report_id` when calling `get_dedupe_report`")
resource_path = '/platform/1/dedupe/reports/{DedupeReportId}'.replace('{format}', 'json')
path_params = {}
if 'dedupe_report_id' in params:
path_params['DedupeReportId'] = params['dedupe_report_id']
query_params = {}
if 'scope' in params:
query_params['scope'] = params['scope']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DedupeReports',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_dedupe_reports(self, **kwargs):
"""
List dedupe reports.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_dedupe_reports(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str sort: The field that will be used for sorting.
:param int begin: Restrict the query to reports at or after the given time, in seconds since the Epoch.
:param int end: Restrict the query to reports at or before the given time, in seconds since the Epoch.
:param int job_id: Restrict the query to the given job ID.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:param str job_type: Restrict the query to the given job type.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:return: DedupeReportsExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sort', 'begin', 'end', 'job_id', 'resume', 'job_type', 'limit', 'dir']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dedupe_reports" % key
)
params[key] = val
del params['kwargs']
if 'limit' in params and params['limit'] < 1.0:
raise ValueError("Invalid value for parameter `limit` when calling `get_dedupe_reports`, must be a value greater than or equal to `1.0`")
resource_path = '/platform/1/dedupe/reports'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'sort' in params:
query_params['sort'] = params['sort']
if 'begin' in params:
query_params['begin'] = params['begin']
if 'end' in params:
query_params['end'] = params['end']
if 'job_id' in params:
query_params['job_id'] = params['job_id']
if 'resume' in params:
query_params['resume'] = params['resume']
if 'job_type' in params:
query_params['job_type'] = params['job_type']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'dir' in params:
query_params['dir'] = params['dir']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DedupeReportsExtended',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_dedupe_settings(self, **kwargs):
"""
Retrieve the dedupe settings.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_dedupe_settings(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: DedupeSettings
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dedupe_settings" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/dedupe/settings'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DedupeSettings',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def update_dedupe_settings(self, dedupe_settings, **kwargs):
"""
Modify the dedupe settings. All input fields are optional, but one or more must be supplied.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_dedupe_settings(dedupe_settings, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param DedupeSettingsExtended dedupe_settings: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dedupe_settings']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_dedupe_settings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dedupe_settings' is set
if ('dedupe_settings' not in params) or (params['dedupe_settings'] is None):
raise ValueError("Missing the required parameter `dedupe_settings` when calling `update_dedupe_settings`")
resource_path = '/platform/1/dedupe/settings'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'dedupe_settings' in params:
body_params = params['dedupe_settings']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
|
# _*_ coding:utf-8 _*_
import xadmin
from .models import Courses
# from teacheres.models import Teacheres
# class TeachersChoice(object):
# model = Teacheres
# extra = 0
#课程
class CoursesAdmin(object):
list_display = ['name', 'coursesAbstract', 'teacherid']
search_fields = ['name']
list_filter = ['name']
# 列表页直接编辑
list_editable = ['name']
model_icon = 'fa fa-graduation-cap'
# inlines = [TeachersChoice]
xadmin.site.register(Courses, CoursesAdmin)
|
from setuptools import setup, find_packages
import selfea
VERSION = selfea.__version__
# with open("README.rst", "r") as fh:
# long_description = fh.read()
# setup(
# name="selfea",
# version=VERSION,
# author="Jay Kim",
# description="Lazy computation directed acyclic graph builder",
# long_description=long_description,
# long_description_content_type="text/x-rst",
# url="https://github.com/mozjay0619/selfea",
# license="DSB 3-clause",
# packages=find_packages(),
# install_requires=["graphviz>=0.13.2", "bokeh>=2.0.1", "scipy>=1.4.1",
# "scikit-image>=0.17.2", "numpy>=1.18.2", "pandas>=0.25.3"]
# )
setup(
name="selfea",
version=VERSION,
author="Jay Kim",
description="",
# long_description=long_description,
# long_description_content_type="text/x-rst",
url=None,
license="DSB 3-clause",
packages=find_packages(),
# install_requires=["graphviz>=0.13.2"]
)
|
import argparse
import requests
import json
from lxml import html
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def get_average_price(url):
url_without_sort = url.replace('&_sop=15', '')
url_completed_listings = url_without_sort + \
'&LH_Sold=1&LH_Complete=1&_sop=13&_ipg=50'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
failed = False
# Retries for handling network errors
for _ in range(5):
# print ("Retrieving %s"%(url_completed_listings))
response = requests.get(url_completed_listings,
headers=headers, verify=False)
parser = html.fromstring(response.text)
if response.status_code != 200:
failed = True
continue
else:
failed = False
break
if failed:
return []
product_listings = parser.xpath('//li[contains(@id,"results-listing")]')
raw_result_count = parser.xpath(
"//h1[contains(@class,'count-heading')]//text()")
# result_count = ''.join(raw_result_count).strip()
# print("Found {0} for {1}".format(result_count, url_completed_listings))
sum_price = 0
lowest_price = 99999
highest_price = 0
total_item_count = str(raw_result_count[0])
item_name_search_query = raw_result_count[2]
count = 0
for product in product_listings:
# exclude ebay's results with fewer words as they should not apply
if count < int(total_item_count):
count += 1
raw_price = product.xpath(
'.//span[contains(@class,"s-item__price")]//text()')
price_float_string = raw_price[0].replace('$', '')
price_float_string = price_float_string.replace(',', '')
price_float = round(float(price_float_string), 2)
lowest_price = min(lowest_price, price_float)
highest_price = max(highest_price, price_float)
sum_price += price_float
if int(total_item_count) < len(product_listings):
if int(total_item_count) == 0:
average_price = "N/A"
lowest_price = "N/A"
highest_price = "N/A"
total_item_count = 0
else:
average_price = str(round((sum_price / int(total_item_count)), 2))
else:
if len(product_listings) == 0:
average_price = "N/A"
lowest_price = "N/A"
highest_price = "N/A"
total_item_count = 0
else:
average_price = str(round((sum_price / len(product_listings)), 2))
result = {"lowest": lowest_price, "highest": highest_price, "average": average_price,
"item_count": total_item_count, "item_name": item_name_search_query}
return result
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument('url', help='URL')
args = argparser.parse_args()
url = args.url
scraped_data = get_average_price(url)
if scraped_data:
scraped_data_json = json.dumps(scraped_data)
print(scraped_data_json)
else:
print("No data scraped")
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import logging
import os
import pathlib
import typing
import numpy as np
import pandas as pd
import requests
from google.cloud import storage
def main(
source_url: str,
year_report: str,
api_naming_convention: str,
target_file: pathlib.Path,
target_gcs_bucket: str,
target_gcs_path: str,
headers: typing.List[str],
rename_mappings: dict,
pipeline_name: str,
geography: str,
report_level: str,
concat_col: typing.List[str],
) -> None:
logging.info(
f"ACS {pipeline_name} process started at "
+ str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
)
logging.info("Creating 'files' folder")
pathlib.Path("./files").mkdir(parents=True, exist_ok=True)
json_obj_group_id = open("group_ids.json")
group_id = json.load(json_obj_group_id)
json_obj_state_code = open("state_codes.json")
state_code = json.load(json_obj_state_code)
logging.info("Extracting the data from API and loading into dataframe...")
if report_level == "national_level":
df = extract_data_and_convert_to_df_national_level(
group_id, year_report, api_naming_convention, source_url
)
elif report_level == "state_level":
df = extract_data_and_convert_to_df_state_level(
group_id, state_code, year_report, api_naming_convention, source_url
)
logging.info("Replacing values...")
df = df.replace(to_replace={"KPI_Name": group_id})
logging.info("Renaming headers...")
rename_headers(df, rename_mappings)
logging.info("Creating column geo_id...")
if geography == "censustract" or geography == "blockgroup":
df["tract"] = df["tract"].apply(pad_zeroes_to_the_left, args=(6,))
df["state"] = df["state"].apply(pad_zeroes_to_the_left, args=(2,))
df["county"] = df["county"].apply(pad_zeroes_to_the_left, args=(3,))
df = create_geo_id(df, concat_col)
logging.info("Pivoting the dataframe...")
df = df[["geo_id", "KPI_Name", "KPI_Value"]]
df = df.pivot_table(
index="geo_id", columns="KPI_Name", values="KPI_Value", aggfunc=np.sum
).reset_index()
logging.info("Reordering headers...")
df = df[headers]
logging.info(f"Saving to output file.. {target_file}")
try:
save_to_new_file(df, file_path=str(target_file))
except Exception as e:
logging.error(f"Error saving output file: {e}.")
logging.info(
f"Uploading output file to.. gs://{target_gcs_bucket}/{target_gcs_path}"
)
upload_file_to_gcs(target_file, target_gcs_bucket, target_gcs_path)
logging.info(
f"ACS {pipeline_name} process completed at "
+ str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
)
def string_replace(source_url, replace: dict) -> str:
for k, v in replace.items():
source_url_new = source_url.replace(k, v)
return source_url_new
def extract_data_and_convert_to_df_national_level(
group_id: dict, year_report: str, api_naming_convention: str, source_url: str
) -> pd.DataFrame:
list_temp = []
for key in group_id:
logging.info(f"reading data from API for KPI {key}...")
str1 = source_url.replace("~year_report~", year_report)
str2 = str1.replace("~group_id~", key[0:-3])
str3 = str2.replace("~row_position~", key[-3:])
source_url_new = str3.replace("~api_naming_convention~", api_naming_convention)
try:
r = requests.get(source_url_new, stream=True)
logging.info(f"Source url : {source_url_new}")
logging.info(f"status code : {r.status_code}")
if r.status_code == 200:
text = r.json()
frame = load_nested_list_into_df_without_headers(text)
frame["KPI_Name"] = key
list_temp.append(frame)
except OSError as e:
logging.info(f"error : {e}")
pass
logging.info("creating the dataframe...")
df = pd.concat(list_temp)
return df
def load_nested_list_into_df_without_headers(text: typing.List) -> pd.DataFrame:
frame = pd.DataFrame(text)
frame = frame.iloc[1:, :]
return frame
def extract_data_and_convert_to_df_state_level(
group_id: dict,
state_code: dict,
year_report: str,
api_naming_convention: str,
source_url: str,
) -> pd.DataFrame:
list_temp = []
for key in group_id:
for sc in state_code:
logging.info(f"reading data from API for KPI {key}...")
logging.info(f"reading data from API for KPI {sc}...")
str1 = source_url.replace("~year_report~", year_report)
str2 = str1.replace("~group_id~", key[0:-3])
str3 = str2.replace("~row_position~", key[-3:])
str4 = str3.replace("~api_naming_convention~", api_naming_convention)
source_url_new = str4.replace("~state_code~", sc)
try:
r = requests.get(source_url_new, stream=True)
logging.info(f"Source url : {source_url_new}")
logging.info(f"status code : {r.status_code}")
if r.status_code == 200:
text = r.json()
frame = load_nested_list_into_df_without_headers(text)
frame["KPI_Name"] = key
list_temp.append(frame)
except OSError as e:
logging.info(f"error : {e}")
pass
logging.info("creating the dataframe...")
df = pd.concat(list_temp)
return df
def create_geo_id(df: pd.DataFrame, concat_col: str) -> pd.DataFrame:
df["geo_id"] = ""
for col in concat_col:
df["geo_id"] = df["geo_id"] + df[col]
return df
def pad_zeroes_to_the_left(val: str, length: int) -> str:
if len(str(val)) < length:
return ("0" * (length - len(str(val)))) + str(val)
else:
return str(val)
def rename_headers(df: pd.DataFrame, rename_mappings: dict) -> None:
rename_mappings = {int(k): str(v) for k, v in rename_mappings.items()}
df.rename(columns=rename_mappings, inplace=True)
def save_to_new_file(df: pd.DataFrame, file_path: str) -> None:
df.to_csv(file_path, index=False)
def upload_file_to_gcs(file_path: pathlib.Path, gcs_bucket: str, gcs_path: str) -> None:
storage_client = storage.Client()
bucket = storage_client.bucket(gcs_bucket)
blob = bucket.blob(gcs_path)
blob.upload_from_filename(file_path)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
main(
source_url=os.environ["SOURCE_URL"],
year_report=os.environ["YEAR_REPORT"],
api_naming_convention=os.environ["API_NAMING_CONVENTION"],
target_file=pathlib.Path(os.environ["TARGET_FILE"]).expanduser(),
target_gcs_bucket=os.environ["TARGET_GCS_BUCKET"],
target_gcs_path=os.environ["TARGET_GCS_PATH"],
headers=json.loads(os.environ["CSV_HEADERS"]),
rename_mappings=json.loads(os.environ["RENAME_MAPPINGS"]),
pipeline_name=os.environ["PIPELINE_NAME"],
geography=os.environ["GEOGRAPHY"],
report_level=os.environ["REPORT_LEVEL"],
concat_col=json.loads(os.environ["CONCAT_COL"]),
)
|
from django.shortcuts import render
from django.utils import timezone
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from .models import Announcement
from contestsuite.settings import CACHE_TIMEOUT
# Create your views here.
class AnnouncementListView(ListView):
model = Announcement
paginate_by = 5
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
context['cache_timeout'] = CACHE_TIMEOUT
return context
class AnnouncementDetailView(DetailView):
model = Announcement
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
context['cache_timeout'] = CACHE_TIMEOUT
return context
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 21:50:08 2019
@Description : Mostly about lists
@author: manish
"""
primes = [2, 3, 5, 7]
hands = [
['J', 'Q', 'K'],
['2', '2', '2'],
['6', 'A', 'K'], # (Comma after the last element is optional)
]
print(len(hands))
print(primes,hands)
print(type(primes))
planets = ['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune']
print(planets[0])
print(planets[1:3]) # this is inclusive of start and exclusing of final index
print(planets[:3]) # starting 3 (0,1,2)
print(planets[3:]) #
print(planets[1:-1]) # Print all but first and last
print(planets[-3:]) # last 3 planets
print(planets[-3:6]) # print the saturn as it is the -3 from last and [5] not count vise, index wise, from beginning
print(planets[-3:-2]) # print saturn again
planets[:3] = ['Mur', 'Vee', 'Ur']
print(planets)
planets[:4] = ['Mercury', 'Venus', 'Earth']
print(planets)
print(len(planets))
planets.append('Pluto')
print(planets)
print(planets.pop())
print(planets)
print(planets.index('Earth'))
# Following will throw an error :ValueError: 'Pluto' is not in list
#print(planets.index('Pluto'))
print("Earth" in planets)
#Tuples
#
#Tuples are almost exactly the same as lists. They differ in just two ways.
#1: The syntax for creating them uses parentheses instead of square brackets
t = (1, 2, 3)
t = 1, 2, 3 # equivalent to above
t
#(1, 2, 3)
#2: They cannot be modified (they are immutable).
#t[0] = 100
print(len(planets))
planets = ['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune']
for planet in planets:
print(planet, end=' ') # print all on same line
multiplicands = (2, 2, 2, 3, 3, 5)
product = 1
for mult in multiplicands:
product = product * mult
print(product)
s = 'steganograpHy is the practicE of conceaLing a file, message, image, or video within another fiLe, message, image, Or video.'
msg = ''
# print all the uppercase letters in s, one at a time
for char in s:
if char.isupper():
print(char, end='')
for i in range(5):
print("Doing important work. i =", i)
i = 0
while i < 10:
print(i, end=' ')
i += 1
#List comprehensions
print('')
squares = []
for n in range(10):
squares.append(n**2)
print(squares)
squares = [n**2 for n in range(10)]
print(squares)
short_planets = [planet for planet in planets if len(planet) < 6]
print(short_planets)
# str.upper() returns an all-caps version of a string
loud_short_planets = [planet.upper() + '!' for planet in planets if len(planet) < 6]
print(loud_short_planets)
print([32 for planet in planets])
def count_negatives(nums):
# Reminder: in the "booleans and conditionals" exercises, we learned about a quirk of
# Python where it calculates something like True + True + False + True to be equal to 3.
return sum([num < 0 for num in nums])
print(count_negatives([-1,-2,-3,5,6,9,10,-100,-34]))
## List comparison, following will throw an error
#[1, 2, 3, 4] > 2
def elementwise_greater_than(L, thresh):
return [ele > thresh for ele in L]
# Turns out that range(0) == range(-1) - they're both empty. So if meals has length 0 or 1, we just won't do any iterations of our for loop.
def menu_is_boring(meals):
"""Given a list of meals served over some period of time, return True if the
same meal has ever been served two days in a row, and False otherwise.
"""
for index in range(len(meals)-1):
if meals[index] == meals[index+1]:
return True
return False
#In addition, Python's triple quote syntax for strings lets us include newlines literally (i.e. by just hitting 'Enter' on our keyboard, rather than using the special '\n' sequence). We've already seen this in the docstrings we use to document our functions, but we can use them anywhere we want to define a string.
triplequoted_hello = """hello
world"""
print(triplequoted_hello)
# Yes, we can even loop over them
print([char+'! ' for char in planet])
#['P! ', 'l! ', 'u! ', 't! ', 'o! ']
#But a major way in which they differ from lists is that they are immutable. We can't modify them.
#planet[0] = 'B'
# planet.append doesn't work either
#---------------------------------------------------------------------------
#TypeError Traceback (most recent call last)
#<ipython-input-12-6ca42463b9f9> in <module>()
#----> 1 planet[0] = 'B'
# 2 # planet.append doesn't work either
#
#
#TypeError: 'str' object does not support item assignment
# ALL CAPS
claim = "Pluto is a planet!"
claim.upper()
'PLUTO IS A PLANET!'
# all lowercase
claim.lower()
#Going between strings and lists: .split() and .join()
#str.split() turns a string into a list of smaller strings, breaking on whitespace by default. This is super useful for taking you from one big string to a list of words.
words = claim.split()
print(words)
#Occasionally you'll want to split on something other than whitespace:
datestr = '1956-01-31'
year, month, day = datestr.split('-')
#str.join() takes us in the other direction, sewing a list of strings up into one long string, using the string it was called on as a separator.
print('/'.join([month, day, year]))
position = 9
print(planet + ", you'll always be the " + str(position) + "th planet to me.")
#This is getting hard to read and annoying to type. str.format() to the rescue.
print("{}, you'll always be the {}th planet to me.".format(planet, position))
#"Pluto, you'll always be the 9th planet to me."
#Notice how we didn't even have to call str() to convert position from an int. format() takes care of that for us.
pluto_mass = 1.303 * 10**22
earth_mass = 5.9722 * 10**24
population = 52910390
# 2 decimal points 3 decimal points, format as percent separate with commas
print("{} weighs about {:.2} kilograms ({:.3%} of Earth's mass). It is home to {:,} Plutonians.".format(
planet, pluto_mass, pluto_mass / earth_mass, population,
))
# Referring to format() arguments by index, starting from 0
s = """Pluto's a {0}.
No, it's a {1}.
{0}!
{1}!""".format('planet', 'dwarf planet')
print(s)
#Python has dictionary comprehensions with a syntax similar to the list comprehensions we saw in the previous tutorial.
planets = ['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune']
planet_to_initial = {planet: planet[0] for planet in planets}
print(planet_to_initial)
numbers = {'one':1, 'two':2, 'three':3}
for k in numbers:
print("{} = {}".format(k, numbers[k]))
# Get all the initials, sort them alphabetically, and put them in a space-separated string.
print(' '.join(sorted(planet_to_initial.values())))
#The very useful dict.items() method lets us iterate over the keys and values of a dictionary simultaneously. (In Python jargon, an item refers to a key, value pair)
for planet, initial in planet_to_initial.items():
print("{} begins with \"{}\"".format(planet.rjust(10), initial))
#Your function should meet the following criteria
#- Do not include documents where the keyword string shows up only as a part of a larger word. For example, if she were looking for the keyword “closed”, you would not include the string “enclosed.”
#- She does not want you to distinguish upper case from lower case letters. So the phrase “Closed the case.” would be included when the keyword is “closed”
#- Do not let periods or commas affect what is matched. “It is closed.” would be included when the keyword is “closed”. But you can assume there are no other types of punctuation.
def word_search(documents, keyword):
"""
Takes a list of documents (each document is a string) and a keyword.
Returns list of the index values into the original list for all documents
containing the keyword.
Example:
doc_list = ["The Learn Python Challenge Casino.", "They bought a car", "Casinoville"]
>>> word_search(doc_list, 'casino')
>>> [0]
"""
# list to hold the indices of matching documents
indices = []
# Iterate through the indices (i) and elements (doc) of documents
for i, doc in enumerate(documents):
# Split the string doc into a list of words (according to whitespace)
tokens = doc.split()
# Make a transformed list where we 'normalize' each word to facilitate matching.
# Periods and commas are removed from the end of each word, and it's set to all lowercase.
normalized = [token.rstrip('.,').lower() for token in tokens]
# Is there a match? If so, update the list of matching indices.
if keyword.lower() in normalized:
indices.append(i)
return indices
#Wouldn't it be great if we could refer to all the variables in the math module by themselves? i.e. if we could just refer to pi instead of math.pi or mt.pi? Good news: we can do that.
from math import *
print(pi, log(32, 2)
|
graph = dict()
graph['A'] = ['B', 'C']
graph['B'] = ['E','C', 'A']
graph['C'] = ['A', 'B', 'E','F']
graph['E'] = ['B', 'C']
graph['F'] = ['C']
matrix_elements = sorted(graph.keys())
cols = rows = len(matrix_elements)
adjacency_matrix = [[0 for x in range(rows)] for y in range(cols)]
edges_list = []
for key in matrix_elements:
for neighbor in graph[key]:
edges_list.append((key,neighbor))
print(edges_list)
for edge in edges_list:
index_of_first_vertex = matrix_elements.index(edge[0])
index_of_second_vertex = matrix_elements.index(edge[1])
adjacency_matrix[index_of_first_vertex][index_of_second_vertex] = 1
println(adjacency_matrix)
|
#!/bin/python3
import math
import os
import random
import re
import sys
def binary_search(left, right, n):
print(left, right, n)
if left == right:
return left, False
mid = (left+right)//2
middle = scores[mid]
if middle == n:
return mid, True
elif middle > n:
return binary_search(mid+1, right, n)
elif middle < n:
return binary_search(left, mid, n)
# Complete the climbingLeaderboard function below.
def climbingLeaderboard(scores, alice):
answer = []
ranking = 1
rank_list = []
for i in range(len(scores)-1):
next_s, s = scores[i+1], scores[i]
if next_s != s:
rank_list.append(ranking)
ranking += 1
else:
rank_list.append(ranking)
rank_list.append(ranking)
for a in alice:
alice_idx, same = binary_search(0, len(scores), a)
if same:
answer.append(rank_list[alice_idx])
else:
if 0 <= alice_idx < len(scores):
answer.append(rank_list[alice_idx])
else:
answer.append(rank_list[alice_idx-1]+1)
return answer
if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
scores_count = int(input())
scores = list(map(int, input().rstrip().split()))
alice_count = int(input())
alice = list(map(int, input().rstrip().split()))
result = climbingLeaderboard(scores, alice)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
6
|
#!/usr/bin/env python
# Use Python 3
# Install dependencies: pip install tensorflow flask
# Run server: python nsfw_server.py
# Open in browser: http://localhost:8082/classify?image_path=/home/user/image.jpg
import sys
import argparse
import tensorflow as tf
from model import OpenNsfwModel, InputType
from image_utils import create_yahoo_image_loader
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route('/classify', methods=['GET'])
def classify():
filename = request.args["image_path"]
image = create_yahoo_image_loader()(filename)
predictions = sess.run(model.predictions, feed_dict={model.input: image})
# print("\tSFW score:\t{}\n\tNSFW score:\t{}".format(*predictions[0]))
predictions = predictions[0].tolist()
return jsonify(dict(sfw=predictions[0], nsfw=predictions[1]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", default=8082, help="server http port")
args = parser.parse_args()
model = OpenNsfwModel()
with tf.compat.v1.Session() as sess:
model.build(weights_path="data/open_nsfw-weights.npy", input_type=InputType["TENSOR"])
sess.run(tf.compat.v1.global_variables_initializer())
app.run(port=args.port)
|
from setuptools import setup, find_packages
install_requires = ["python-socketio==4.4.0", "Flask==1.1.1"]
setup(
name="pytest-visualizer",
use_scm_version={"write_to": "src/visual/_version.py"},
long_description=open('README.md').read(),
license="MIT",
setup_requires=["setuptools_scm"],
packages=find_packages(where="src"),
package_dir={"": "src"},
# the following makes a plugin available to pytest
entry_points={"pytest11": ["visual = visual.plugin"]},
# custom PyPI classifier for pytest plugins
classifiers=["Framework :: Pytest",
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
)
|
import sys, random
assert sys.version_info >= (3,7), "This script requires at least Python 3.7"
xfactor = random.randint(1,10)
yfactor = -1
count = 0
while xfactor != yfactor:
count= count+1
yfactor = input("what is the number?")
yfactor = int(yfactor)
if xfactor == yfactor:
print("congrats")
elif xfactor > yfactor:
print("try again choose bigger number")
elif xfactor < yfactor:
print("try again choose smaller number")
print(str(count)+" amount of times tried")
|
from typing import Tuple, Optional, Type
class VSM:
init_args: Tuple = ()
def __init__(self):
self.states = []
self.current_state: 'Optional[VSM]' = None
self.help_text = ""
def enter(self, parent_state: 'VSM'):
pass
@staticmethod
def _init_state(state: 'Type[VSM]'):
args = () if not state.init_args else state.init_args
return state(*args)
def add(self, state):
self.states.append(state)
def enter_state(self, state: 'Type[VSM]'):
new_state = VSM._init_state(state)
new_state.enter(self)
self.current_state = new_state
def run(self, sm: 'VSM') -> 'Optional[Type[VSM]]':
if not self.current_state:
return
new_state = self.current_state.run(self)
if new_state:
self.enter_state(new_state)
return new_state
|
import unittest
import pandas as pd
import numpy as np
import collections as col
import copy
import parser
import rule_set as rs
from relation import *
from connection import *
class TestparserMethods(unittest.TestCase):
def test_parse_interval(self):
#good examples
inter1 = "(0.8,4.2)" #neither
inter2 = "(1.8, inf]" #right
inter3 = "]0,2]" #right
inter4 = "]0.8, inf[" #open
inter5 = "(-0.325,0[" #open
inter6 = "[-inf, -4.2]" #both
inter7 = "(-inf,inf)" #neither
inter8 = "]448,448.2)" #neither
self.assertEqual(parser.parse_interval(inter1), pd.Interval(0.8, 4.2, 'neither'))
self.assertEqual(parser.parse_interval(inter2), pd.Interval(1.8, float('inf'), 'right'))
self.assertEqual(parser.parse_interval(inter3), pd.Interval(0,2,'right'))
self.assertEqual(parser.parse_interval(inter4), pd.Interval(0.8, float('inf'), 'neither'))
self.assertEqual(parser.parse_interval(inter5), pd.Interval(-0.325, 0, 'neither'))
self.assertEqual(parser.parse_interval(inter6), pd.Interval(float('-inf'), -4.2, 'both'))
self.assertEqual(parser.parse_interval(inter7), pd.Interval(float('-inf'), float('inf'), 'neither'))
self.assertEqual(parser.parse_interval(inter8), pd.Interval(448, 448.2, 'neither'))
def test_parse_csv_mini(self):
''' Check if parse_cv() produces a list a of rules that gives an equivalent DataFrame when given as argument when initializing a RuleSet (order of the columns is ignored)'''
#reference ruleset
r1 = {'AvgNightCons':pd.Interval(150.0,200.0, 'neither'),'InCommunity':False, 'Rec':'rec1'}
r2 = {'AvgDayCons':pd.Interval(30.0,100.0), 'Rec':'rec2'}
r3 = {'AvgDayCons':pd.Interval(30.0,120.0),'AvgNightCons':pd.Interval(50.0,150.0),'InCommunity':False, 'Rec':'rec2'}
rules = [r1, r2, r3]
ref = rs.RuleSet(rules)
#tested ruleset
csv_name = "data/RuleSetMini.csv"
parsed = parser.parse_csv(csv_name)
checked = rs.RuleSet(parsed)
self.assertTrue(checked.set.sort_index(axis=1).equals(ref.set.sort_index(axis=1)))
def test_parse_csv_small(self):
''' Check if parse_cv() produces a list a of rules that gives an equivalent DataFrame when given as argument when initializing a RuleSet (order of the columns is ignored)'''
#reference ruleset
r1 = col.OrderedDict({'Recommendation': 'Rec1', 'A': pd.Interval(0.0,50.0, 'both'), 'B':pd.Interval(60.0,100.0, 'both'), 'E': False})
r2 = col.OrderedDict({'Recommendation': 'Rec2', 'A': pd.Interval(0.0,50.0, 'both'), 'E': True})
r3 = col.OrderedDict({'Recommendation': 'Rec3', 'A': pd.Interval(0.0,50.0, 'both'), 'E':False})
r4 = col.OrderedDict({'Recommendation': 'Rec4', 'A': float('nan'), 'C':pd.Interval(30.0,70.0, 'both')})
r5 = col.OrderedDict({'Recommendation': 'Rec1', 'A': pd.Interval(10.0,30.0, 'both'), 'D':pd.Interval(10.0,30.0, 'both'), 'E':False})
r6 = col.OrderedDict({'Recommendation': 'Rec4', 'C':pd.Interval(30.0,70.0, 'both')})
r7 = col.OrderedDict({'Recommendation': 'Rec2', 'A':pd.Interval(30.0,60.0, 'both'), 'D':pd.Interval(70.0,120.0, 'both'), 'E':False})
r8 = col.OrderedDict({'Recommendation': 'Rec3', 'A':pd.Interval(30.0,70.0, 'both'), 'B':pd.Interval(60.0,100.0, 'both'), 'E':False})
r9 = col.OrderedDict({'Recommendation': 'Rec4', 'C':pd.Interval(70.0,90.0, 'both')})
rules = [r1, r2, r3, r4, r5, r6, r7, r8, r9]
ref = rs.RuleSet(rules)
#tested ruleset
csv_name = "data/RuleSetSmall.csv"
parsed = parser.parse_csv(csv_name)
checked = rs.RuleSet(parsed)
#print(ref.set.sort_index(axis=1))
#print(checked.set.sort_index(axis=1))
self.assertTrue(checked.set.sort_index(axis=1).equals(ref.set.sort_index(axis=1)))
class TestMiniSet(unittest.TestCase):
def setUp(self):
self.csv_name = "data/RuleSetMini.csv"
self.rules = parser.parse_csv(self.csv_name)
self.ruleset = rs.RuleSet(self.rules)
def test_init(self):
self.assertEqual(self.ruleset.m,len(self.rules[0]))
self.assertEqual(self.ruleset.n,len(self.rules))
self.assertEqual(len(self.ruleset.idm),0)
self.assertEqual(len(self.ruleset.pm),0)
self.assertEqual(self.ruleset.attr_names,['Rec', 'AvgDayCons', 'AvgNightCons', 'InCommunity'])
self.assertEqual(type(self.ruleset.attr_names),list)
def test_build_IDM_PM(self):
a = 4; b = 3; c = 3
ref_IDM = np.zeros((a,b,c))
ref_IDM[0,0,1] = -1; ref_IDM[0,0,2] = -1; ref_IDM[0,1,2] = 1 #Rec
ref_IDM[1,0,1] = Relation.INCLUSION_JI.value; ref_IDM[1,0,2] = Relation.INCLUSION_JI.value; ref_IDM[1,1,2] = Relation.INCLUSION_IJ.value #AvgDayCons
ref_IDM[2,0,1] = Relation.INCLUSION_IJ.value; ref_IDM[2,0,2] = Relation.DIFFERENCE.value; ref_IDM[2,1,2] = Relation.INCLUSION_JI.value #AvgNightCons
ref_IDM[3,0,1] = Relation.INCLUSION_IJ.value; ref_IDM[3,0,2] = Relation.EQUALITY.value; ref_IDM[3,1,2] = Relation.INCLUSION_JI.value #InCommunity
ref_PM = np.zeros((b,c))
ref_PM[0,1] = -12; ref_PM[1,2] = 18
#print("---ref idm original ---")
#print(ref_IDM)
#print("---ref pm original ---")
#print(ref_PM)
#building pm with empty idm
self.assertFalse(self.ruleset.build_PM())
self.assertEqual(len(self.ruleset.pm),0)
#building idm
self.ruleset.build_IDM()
checked = self.ruleset.idm
#print("ref:")
#print(ref)
#print("checked:")
#print(checked)
for i in range(a):
for j in range(b):
for k in range(c):
self.assertEqual(checked[i,j,k],ref_IDM[i,j,k])
#building pm with existing idm
self.assertTrue(self.ruleset.build_PM())
checked = self.ruleset.pm
for i in range(b):
for j in range(c):
self.assertEqual(checked[i,j],ref_PM[i,j])
def test_connection(self):
#Warning, value hardcoded that would need to change if values changes in class Relation
self.assertEqual(self.ruleset.connection(0,0),Connection.ERROR)
dummy_pm1 = [[0, 1, -1],
[0, 0, 0],
[0, 0, 0]]
self.ruleset.pm = dummy_pm1
self.assertEqual(self.ruleset.connection(0,0),Connection.REFERENCE)
self.assertEqual(self.ruleset.connection(0,1),Connection.EQUAL_SAME)
self.assertEqual(self.ruleset.connection(2,0),Connection.EQUAL_DIFF)
self.assertEqual(self.ruleset.connection(1,2),Connection.DISCONNECTED)
self.assertRaises(ValueError,self.ruleset.connection, 3, 0)
self.assertRaises(ValueError,self.ruleset.connection, 1, 6)
dummy_pm2 = [[0, -2, 4],
[0, 0, -66],
[0, 0, 0]]
self.ruleset.pm = dummy_pm2
self.assertEqual(self.ruleset.connection(0,1),Connection.INCLUSION_DIFF)
self.assertEqual(self.ruleset.connection(0,2),Connection.INCLUSION_SAME)
self.assertEqual(self.ruleset.connection(2,1),Connection.OVERLAP_DIFF)
dummy_pm3 = [[0, 9, -27],
[0, 0, 18],
[0, 0, 0]]
self.ruleset.pm = dummy_pm3
self.assertEqual(self.ruleset.connection(1,0),Connection.INCLUSION_SAME)
self.assertEqual(self.ruleset.connection(2,0),Connection.INCLUSION_DIFF)
self.assertEqual(self.ruleset.connection(1,2),Connection.OVERLAP_SAME)
self.assertEqual(self.ruleset.connection(2,2),Connection.REFERENCE)
def test_val_IDC(self):
inter1 = pd.Interval(1,6,'both')
inter2 = pd.Interval(1,6,'neither')
inter3 = pd.Interval(1,3,'both')
inter4 = pd.Interval(6,8,'right')
inter5 = pd.Interval(8,12,'neither')
inter6 = pd.Interval(8,12,'left')
inter7 = pd.Interval(4,9,'left')
inter8 = pd.Interval(2,4,'neither')
inter9 = pd.Interval(3,6,'right')
inter10 = pd.Interval(1,6,'left')
inter11 = pd.Interval(1,6,'right')
inter12 = pd.Interval(1,10,'right')
#same boudaries, different closedness
self.assertEqual(self.ruleset._val_IDC(inter1,inter1), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(inter1,inter2), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(inter2,inter1), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(inter1,inter10), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(inter10,inter1), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(inter1,inter11), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(inter11,inter1), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(inter2,inter10), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(inter10,inter2), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(inter2,inter11), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(inter11,inter2), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(inter10,inter11), Relation.OVERLAP.value)
self.assertEqual(self.ruleset._val_IDC(inter11,inter10), Relation.OVERLAP.value)
#Other
self.assertEqual(self.ruleset._val_IDC(inter1,inter3), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(inter3,inter1), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(inter1,inter8), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(inter8,inter1), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(inter1,inter9), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(inter9,inter1), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(inter1,inter4), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(inter1,inter5), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(inter5,inter1), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(inter4,inter5), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(inter5,inter4), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(inter5,inter6), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(inter6,inter5), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(inter1,inter7), Relation.OVERLAP.value)
self.assertEqual(self.ruleset._val_IDC(inter7,inter1), Relation.OVERLAP.value)
self.assertEqual(self.ruleset._val_IDC(inter8,inter9), Relation.OVERLAP.value)
self.assertEqual(self.ruleset._val_IDC(inter9,inter8), Relation.OVERLAP.value)
self.assertEqual(self.ruleset._val_IDC(inter4,inter6), Relation.OVERLAP.value)
self.assertEqual(self.ruleset._val_IDC(inter6,inter4), Relation.OVERLAP.value)
self.assertEqual(self.ruleset._val_IDC(inter1,inter12), Relation.OVERLAP.value)
self.assertEqual(self.ruleset._val_IDC(inter12,inter1), Relation.OVERLAP.value)
self.assertEqual(self.ruleset._val_IDC(inter2,inter12), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(inter12,inter2), Relation.INCLUSION_JI.value)
f1 = 3.4; f2 = 4.0; nf = np.array([3.4, 8.0])
self.assertEqual(self.ruleset._val_IDC(f1,f1), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(f1,f2), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(f2,f1), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(nf[0],nf[0]), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(nf[0],f1), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(f2,nf[1]), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(nf[0],nf[1]), Relation.DIFFERENCE.value)
nbool = np.array([False,True])
self.assertEqual(self.ruleset._val_IDC(True,True), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(False,False), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(True,False), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(False,True), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(nbool[1],True), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(nbool[1],nbool[1]), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(nbool[0],True), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(nbool[0],nbool[1]), Relation.DIFFERENCE.value)
nan = float('nan'); nnan = np.array([float('nan')])
self.assertEqual(self.ruleset._val_IDC(nan,nan), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(nan,inter1), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(inter3,nan), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(nan,f1), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(f2,nan), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(nan,True), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(False,nan), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(nan,nnan[0]), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(nnan[0],nnan[0]), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(nnan[0],inter1), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(inter3,nnan[0]), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(nnan[0],f1), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(f2,nnan[0]), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(nnan[0],nf[0]), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(nf[1],nnan[0]), Relation.INCLUSION_IJ.value)
self.assertEqual(self.ruleset._val_IDC(nnan[0],nbool[1]), Relation.INCLUSION_JI.value)
self.assertEqual(self.ruleset._val_IDC(nbool[0],nnan[0]), Relation.INCLUSION_IJ.value)
str1 = 'bla'; str2 = "bli"
self.assertEqual(self.ruleset._val_IDC(str1,str1), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(str1,str2), Relation.DIFFERENCE.value)
self.assertEqual(self.ruleset._val_IDC(str2,str1), Relation.DIFFERENCE.value)
self.assertRaises(TypeError,self.ruleset._val_IDC,f1,inter1)
self.assertRaises(TypeError,self.ruleset._val_IDC,nbool[0],inter1)
self.assertRaises(TypeError,self.ruleset._val_IDC,'tre',nf[0])
self.assertEqual(self.ruleset._val_IDC(2,2), Relation.EQUALITY.value)
self.assertEqual(self.ruleset._val_IDC(2,3), Relation.DIFFERENCE.value)
def test_get_val(self):
self.assertEqual(self.ruleset.get_val(0,0),'rec1')
self.assertEqual(self.ruleset.get_val(2,'AvgDayCons'),pd.Interval(30,120))
self.assertRaises(ValueError,self.ruleset.get_val,3,0)
self.assertRaises(ValueError,self.ruleset.get_val,0,'hello')
self.assertRaises(ValueError,self.ruleset.get_val,'AvgDayCons',0)
self.assertRaises(ValueError,self.ruleset.get_val,0,12)
def test_has_type(self):
b1 = True; b2 = False; b1np = np.array([True]); b2np = np.array([False])
f1 = 7.0; f2 = 4.2; fnp = np.array([6.9, 9.6])
inter1 = pd.Interval(5,6); inter2 = pd.Interval(7,8)
self.assertTrue(self.ruleset.has_type(b1,bool))
self.assertTrue(self.ruleset.has_type(b1,np.bool_))
self.assertTrue(self.ruleset.has_type(b1np[0],bool))
self.assertTrue(self.ruleset.has_type(b1np[0],np.bool_))
self.assertTrue(self.ruleset.has_type(f1,float))
self.assertTrue(self.ruleset.has_type(f1,np.float64))
self.assertTrue(self.ruleset.has_type(fnp[0],float))
self.assertTrue(self.ruleset.has_type(fnp[0],np.float64))
self.assertTrue(self.ruleset.has_type(inter1,pd._libs.interval.Interval))
self.assertFalse(self.ruleset.has_type(b1,float))
self.assertFalse(self.ruleset.has_type(f1,np.bool_))
self.assertFalse(self.ruleset.has_type(inter1,float))
def test_update_val(self):
val1 = pd.Interval(30,100) #if put in (1,0), change overlap to inclusion between r0 et r1
val2 = True #if put in (2,3) change inclusion to overlap between r1 vs r2 and r0 vs r2
#self.assertRaise(ValueError,self.ruleset.update_val,3,3,val1)
self.assertRaises(ValueError,self.ruleset.update_val,3,3,val1)
self.ruleset.update_val(0,1,val1)
self.assertEqual(self.ruleset.get_val(0,1),val1)
self.assertEqual(len(self.ruleset.idm),0)
self.ruleset.update_val(0,1,float('nan'))
self.assertTrue(pd.isna(self.ruleset.get_val(0,1)))
#ruleset is back as original
self.ruleset.build_IDM()
self.ruleset.build_PM()
ref_idm = copy.copy(self.ruleset.idm)
ref_pm = copy.copy(self.ruleset.pm)
self.ruleset.update_val(2,3,val2,update=False)
self.assertEqual(self.ruleset.get_val(2,3),val2)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k,i,j])
self.ruleset.update_val(2,3,val2)
ref_idm[3,0,2] = 0
ref_pm[0,2] = 0
#print("-- ref idm 1 ---")
#print(ref_idm)
#print("--- real idm 1 ---")
#print(self.ruleset.idm)
self.assertEqual(self.ruleset.get_val(2,3),val2)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k,i,j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i,j])
self.ruleset.update_val(0,1,val1)
ref_idm[1,0,1] = 1; ref_idm[1,0,2] = 2
ref_pm[0,1] = -4
#print("--ref idm 2---")
#print(ref_idm)
#print("---real idm 2---")
#print(self.ruleset.idm)
self.assertEqual(self.ruleset.get_val(0,1),val1)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k,i,j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i,j])
self.ruleset.update_val(1,3,val2)
ref_idm[3,0,1] = 0; ref_idm[3,1,2] = 1
ref_pm[0,1] = 0; ref_pm[1,2] = 6
#print("--ref idm 3 ---")
#print(ref_idm)
#print("---real idm 3 ---")
#print(self.ruleset.idm)
self.assertEqual(self.ruleset.get_val(1,3),val2)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k,i,j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i,j])
self.ruleset.update_val(0,0,'rec2')
self.ruleset.update_val(2,0,'rec1')
ref_idm[0,0,1] = 1; ref_idm[0,1,2] = -1
ref_pm[1,2] = -6
self.assertEqual(self.ruleset.get_val(0,0),'rec2')
self.assertEqual(self.ruleset.get_val(2,0),'rec1')
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k,i,j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i,j])
def test_update_attr(self):
new_attr = ['Recommendation', 'Attr1', 'Attr2', 'Attr3']
self.ruleset.update_attr(new_attr)
self.assertEqual(self.ruleset.attr_names,new_attr)
self.assertEqual(self.ruleset.attr_names,new_attr)
bad_attr = ['bad_name', 'Attr1', 'Attr2', 'Attr3']
self.assertRaises(ValueError,self.ruleset.update_attr,bad_attr)
self.assertEqual(self.ruleset.attr_names,new_attr)
bad_attr = ['Rec', 'Attr1', '', 'Attr3']
self.assertRaises(ValueError,self.ruleset.update_attr,bad_attr)
self.assertEqual(self.ruleset.attr_names,new_attr)
def test_add_attr(self):
new_attr1 = 'New1'
old_attr = self.ruleset.attr_names
old_m = self.ruleset.m
self.ruleset.add_attr(new_attr1)
self.assertEqual(self.ruleset.attr_names, old_attr+[new_attr1])
self.assertEqual(self.ruleset.m,old_m+1)
self.assertTrue(pd.isna(self.ruleset.set['New1'][0]))
self.assertTrue(pd.isna(self.ruleset.set['New1'][1]))
self.assertTrue(pd.isna(self.ruleset.set['New1'][2]))
self.assertEqual(len(self.ruleset.idm),0) #shows idm is not built when it was empy to start with
ref_idm = [[[0,-1,-1],[0,0,1],[0,0,0]],[[0,3,3],[0,0,2],[0,0,0]],[[0,2,0],[0,0,3],[0,0,0]],[[0,2,1],[0,0,3],[0,0,0]],[[0,1,1],[0,0,1],[0,0,0]]]
ref_pm = [[0,-12,0],[0,0,18],[0,0,0]]
self.ruleset.build_IDM()
self.ruleset.build_PM()
#print("-- ref idm 1 ---")
#print(ref_idm)
#print("--- real idm 1 ---")
#print(self.ruleset.idm)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k][i][j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i][j])
self.ruleset.update_val(0,2,pd.Interval(25,200,'neither'))
new_attr2 = 'New2'
inter1 = pd.Interval(0,200); inter2 = pd.Interval(float('-inf'),float('inf')); inter3 = pd.Interval(0,100)
new_vals = [inter1,inter2,inter3]
ref_idm = [[[0,-1,-1],[0,0,1],[0,0,0]],[[0,3,3],[0,0,2],[0,0,0]],[[0,2,3],[0,0,3],[0,0,0]],[[0,2,1],[0,0,3],[0,0,0]],[[0,1,1],[0,0,1],[0,0,0]],[[0,2,3],[0,0,3],[0,0,0]]]
ref_pm = [[0,-24,-27],[0,0,54],[0,0,0]]
self.ruleset.add_attr(new_attr2,val_list=new_vals)
#print("-- ref idm 2 ---")
#print(ref_idm)
#print("--- real idm 2 ---")
#print(self.ruleset.idm)
self.assertEqual(self.ruleset.attr_names, old_attr+[new_attr1]+[new_attr2])
self.assertEqual(self.ruleset.m,old_m+2)
self.assertEqual(self.ruleset.set['New2'][0],inter1)
self.assertEqual(self.ruleset.set['New2'][1],inter2)
self.assertEqual(self.ruleset.set['New2'][2],inter3)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k][i][j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i][j])
new_attr3 = 'New3'; attr_list = [1,2,3]
ref_idm = [[[0,-1,-1],[0,0,1],[0,0,0]],[[0,3,3],[0,0,2],[0,0,0]],[[0,2,3],[0,0,3],[0,0,0]],[[0,2,1],[0,0,3],[0,0,0]],[[0,1,1],[0,0,1],[0,0,0]],[[0,2,3],[0,0,3],[0,0,0]],[[0,0,0],[0,0,0],[0,0,0]]]
ref_pm = [[0,0,0],[0,0,0],[0,0,0]]
self.ruleset.add_attr(new_attr3,val_list=attr_list)
#print("-- ref idm 3 ---")
#print(ref_idm)
#print("--- real idm 3 ---")
#print(self.ruleset.idm)
self.assertEqual(self.ruleset.attr_names, old_attr+[new_attr1]+[new_attr2]+[new_attr3])
self.assertEqual(self.ruleset.m,old_m+3)
self.assertEqual(len(self.ruleset.idm),old_m+3) #shows idm was updated
self.assertEqual(self.ruleset.set['New3'][0],1)
self.assertEqual(self.ruleset.set['New3'][1],2)
self.assertEqual(self.ruleset.set['New3'][2],3)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k][i][j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i][j])
new_attr3 = 'New3'
self.assertRaises(ValueError,self.ruleset.add_attr,new_attr3)
self.assertEqual(self.ruleset.attr_names,old_attr+[new_attr1]+[new_attr2]+[new_attr3])
new_attr4 = ''
self.assertRaises(ValueError,self.ruleset.add_attr,new_attr3)
self.assertEqual(self.ruleset.attr_names,old_attr+[new_attr1]+[new_attr2]+[new_attr3])
def test_add_rule(self):
self.ruleset.build_IDM()
self.ruleset.build_PM()
old_n = self.ruleset.n
rec_name1 = 'NewRec'
self.ruleset.add_rule(rec_name1)
self.assertEqual(self.ruleset.n,old_n+1)
self.assertEqual(len(self.ruleset.set),old_n+1) #shows ruleset has one more rule
self.assertEqual(self.ruleset.get_val(old_n,0),rec_name1)
for i in range(1,self.ruleset.m):
self.assertTrue(pd.isna(self.ruleset.set.iloc[old_n,i]))
ref_idm = [[[0,-1,-1,-1],[0,0,1,-1],[0,0,0,-1],[0,0,0,0]],[[0,3,3,1],[0,0,2,2],[0,0,0,2],[0,0,0,0]],[[0,2,0,2],[0,0,3,1],[0,0,0,2],[0,0,0,0]],[[0,2,1,2],[0,0,3,1],[0,0,0,2],[0,0,0,0]]]
ref_pm = [[0,-12,0,-4],[0,0,18,-2],[0,0,0,-8],[0,0,0,0]]
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k][i][j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i][j])
rec_name2 = 'rec4'; new_vals = [pd.Interval(30,100),float('nan'),float('nan')]
self.ruleset.add_rule(rec_name2, new_vals)
ref_idm = [[[0,-1,-1,-1,-1],[0,0,1,-1,-1],[0,0,0,-1,-1],[0,0,0,0,-1],[0,0,0,0,0]],[[0,3,3,1,3],[0,0,2,2,1],[0,0,0,2,3],[0,0,0,0,3],[0,0,0,0,0]],[[0,2,0,2,2],[0,0,3,1,1],[0,0,0,2,2],[0,0,0,0,1],[0,0,0,0,0]],[[0,2,1,2,2],[0,0,3,1,1],[0,0,0,2,2],[0,0,0,0,1],[0,0,0,0,0]]]
ref_pm = [[0,-12,0,-4,-12],[0,0,18,-2,-1],[0,0,0,-8,-12],[0,0,0,0,-3],[0,0,0,0,0]]
self.assertEqual(self.ruleset.n,old_n+2)
self.assertEqual(self.ruleset.set['Rec'][self.ruleset.n-1],rec_name2)
self.assertEqual(self.ruleset.set['AvgDayCons'][self.ruleset.n-1],pd.Interval(30,100))
self.assertTrue(pd.isna(self.ruleset.set['AvgNightCons'][self.ruleset.n-1]))
self.assertTrue(pd.isna(self.ruleset.set['InCommunity'][self.ruleset.n-1]))
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k][i][j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i][j])
rec_name3 = 'rec1'; new_vals = [float('nan'),pd.Interval(100,175),False]
self.ruleset.add_rule(rec_name3, new_vals)
ref_idm = [[[0,-1,-1,-1,-1,1],[0,0,1,-1,-1,-1],[0,0,0,-1,-1,-1],[0,0,0,0,-1,-1],[0,0,0,0,0,-1],[0,0,0,0,0,0]],[[0,3,3,1,3,1],[0,0,2,2,1,2],[0,0,0,2,3,2],[0,0,0,0,3,1],[0,0,0,0,0,2],[0,0,0,0,0,0]],[[0,2,0,2,2,6],[0,0,3,1,1,3],[0,0,0,2,2,6],[0,0,0,0,1,3],[0,0,0,0,0,3],[0,0,0,0,0,0]],[[0,2,1,2,2,1],[0,0,3,1,1,3],[0,0,0,2,2,1],[0,0,0,0,1,3],[0,0,0,0,0,3],[0,0,0,0,0,0]]]
ref_pm = [[0,-12,0,-4,-12,6],[0,0,18,-2,-1,-18],[0,0,0,-8,-12,-12],[0,0,0,0,-3,-9],[0,0,0,0,0,-18],[0,0,0,0,0,0]]
self.assertEqual(self.ruleset.n,old_n+3)
self.assertEqual(self.ruleset.set['Rec'][self.ruleset.n-1],rec_name3)
self.assertTrue(pd.isna(self.ruleset.set['AvgDayCons'][self.ruleset.n-1]))
self.assertEqual(self.ruleset.set['AvgNightCons'][self.ruleset.n-1],pd.Interval(100,175))
self.assertEqual(self.ruleset.set['InCommunity'][self.ruleset.n-1],False)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k][i][j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i][j])
self.assertRaises(ValueError,self.ruleset.add_rule,'hello',[3.0])
def test_delete_attr(self):
old_m = self.ruleset.m
self.ruleset.build_IDM()
self.ruleset.build_PM()
del_attr1 = 'AvgDayCons'
self.ruleset.delete_attr(del_attr1)
#print("ruleset after del AvgDayCons")
#print(self.ruleset)
ref_idm = [[[0,-1,-1],[0,0,1],[0,0,0]],[[0,2,0],[0,0,3],[0,0,0]],[[0,2,1],[0,0,3],[0,0,0]]]
ref_pm = [[0,-4,0],[0,0,9],[0,0,0]]
self.assertTrue(del_attr1 not in self.ruleset.set.columns.tolist())
self.assertTrue(del_attr1 not in self.ruleset.attr_names)
self.assertEqual(self.ruleset.m,old_m-1)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k][i][j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i][j])
del_attr2 = 'InCommunity'
self.ruleset.delete_attr(del_attr2)
#print("ruleset after del InCommunity")
#print(self.ruleset)
ref_idm = [[[0,-1,-1],[0,0,1],[0,0,0]],[[0,2,0],[0,0,3],[0,0,0]]]
ref_pm = [[0,-2,0],[0,0,3],[0,0,0]]
self.assertTrue(del_attr2 not in self.ruleset.set.columns.tolist())
self.assertTrue(del_attr2 not in self.ruleset.attr_names)
self.assertEqual(self.ruleset.m,old_m-2)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k][i][j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i][j])
del_attr3 = 'AvgNightCons'
self.ruleset.delete_attr(1)
#print("ruleset after del AvgNightCons")
#print(self.ruleset)
ref_idm = [[[0,-1,-1],[0,0,1],[0,0,0]]]
ref_pm = [[0,-1,-1],[0,0,1],[0,0,0]]
self.assertTrue(del_attr3 not in self.ruleset.set.columns.tolist())
self.assertTrue(del_attr3 not in self.ruleset.attr_names)
self.assertEqual(self.ruleset.m,old_m-3)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k][i][j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i][j])
self.assertRaises(ValueError,self.ruleset.delete_attr,2)
self.assertRaises(ValueError,self.ruleset.delete_attr,'Hello')
self.assertRaises(ValueError,self.ruleset.delete_attr,'Rec')
def test_delete_rule_1(self):
old_n = self.ruleset.n
old_attr = self.ruleset.attr_names
self.ruleset.build_IDM()
self.ruleset.build_PM()
self.ruleset.delete_rule(1) #rule in the middle
ref_idm = [[[0,-1],[0,0]],[[0,3],[0,0]],[[0,0],[0,0]],[[0,1],[0,0]]]
ref_pm = [[0,0],[0,0]]
self.assertEqual(len(self.ruleset.set),old_n-1)
self.assertEqual(self.ruleset.n,old_n-1)
self.assertEqual(self.ruleset.set.columns.tolist(),old_attr)
self.assertEqual(self.ruleset.set.index.tolist(),[0,1])
#print("-- ref idm 1 ---")
#print(ref_idm)
#print("--- real idm 1 ---")
#print(self.ruleset.idm)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k][i][j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i][j])
self.ruleset.delete_rule(1) #rule at the end
ref_idm = [[[0],[0]],[[0],[0]],[[0],[0]],[[0],[0]]]
ref_pm = [[0]]
self.assertEqual(len(self.ruleset.set),old_n-2)
self.assertEqual(self.ruleset.n,old_n-2)
self.assertEqual(self.ruleset.set.columns.tolist(),old_attr)
self.assertEqual(self.ruleset.set.index.tolist(),[0])
self.assertEqual(len(self.ruleset.idm),0)
self.assertEqual(len(self.ruleset.idm),0)
self.ruleset.delete_rule(0) #last remaning rule
self.assertEqual(len(self.ruleset.set),0)
self.assertEqual(self.ruleset.n,0)
self.assertEqual(self.ruleset.set.columns.tolist(),old_attr)
self.assertEqual(self.ruleset.set.index.tolist(),[])
self.assertFalse(self.ruleset.idm.any())
self.assertFalse(self.ruleset.pm.any())
self.assertEqual(len(self.ruleset.idm),0)
self.assertEqual(len(self.ruleset.idm),0)
def test_delete_rule_2(self):
old_n = self.ruleset.n
old_attr = self.ruleset.attr_names
self.ruleset.build_IDM()
self.ruleset.build_PM()
self.ruleset.delete_rule(0) #first rule
ref_idm = [[[0,1],[0,0]],[[0,2],[0,0]],[[0,3],[0,0]],[[0,3],[0,0]]]
ref_pm = [[0,18],[0,0]]
self.assertEqual(len(self.ruleset.set),old_n-1)
self.assertEqual(self.ruleset.n,old_n-1)
self.assertEqual(self.ruleset.set.columns.tolist(),old_attr)
self.assertEqual(self.ruleset.set.index.tolist(),[0,1])
#print("-- ref idm 1 ---")
#print(ref_idm)
#print("--- real idm 1 ---")
#print(self.ruleset.idm)
for k in range(self.ruleset.m):
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.idm[k,i,j],ref_idm[k][i][j])
for i in range(self.ruleset.n):
for j in range(self.ruleset.n):
self.assertEqual(self.ruleset.pm[i,j],ref_pm[i][j])
def test_same_type(self):
b1 = True; b2 = False; b1np = np.array([True]); b2np = np.array([False])
f1 = 7.0; f2 = 4.2; fnp = np.array([6.9, 9.6])
inter1 = pd.Interval(5,6); inter2 = pd.Interval(7,8)
self.assertTrue(self.ruleset.same_type(b1,b2))
self.assertTrue(self.ruleset.same_type(b1,b1np[0]))
self.assertTrue(self.ruleset.same_type(b1np[0],b2np[0]))
self.assertTrue(self.ruleset.same_type(f1,f2))
self.assertTrue(self.ruleset.same_type(f1,fnp[0]))
self.assertTrue(self.ruleset.same_type(fnp[1],fnp[0]))
self.assertTrue(self.ruleset.same_type(inter1,inter2))
self.assertFalse(self.ruleset.same_type(b1,fnp[0]))
self.assertFalse(self.ruleset.same_type(f1,b2))
self.assertFalse(self.ruleset.same_type(inter2,fnp[0]))
class TestExtremeCases(unittest.TestCase):
def test_init_empty(self):
rset = rs.RuleSet([])
self.assertEqual(len(rset.set),0)
self.assertEqual(rset.m,0)
self.assertEqual(rset.n,0)
self.assertEqual(len(rset.idm),0)
self.assertEqual(len(rset.pm),0)
self.assertEqual(rset.attr_names,[])
def test_idm_pm(self):
# computation of idm and pm when there is only one rule in set
r1 = {'AvgNightCons':pd.Interval(150.0,200.0, 'neither'),'InCommunity':False, 'Rec':'rec1'}
r2 = {'AvgDayCons':pd.Interval(30.0,100.0), 'Rec':'rec2'}
r3 = {'AvgDayCons':pd.Interval(30.0,120.0),'AvgNightCons':pd.Interval(50.0,150.0),'InCommunity':False, 'Rec':'rec2'}
rules = [r1]
ruleset = rs.RuleSet(rules)
ruleset.build_IDM()
ruleset.build_PM()
self.assertEqual(len(ruleset.idm),0)
self.assertEqual(len(ruleset.idm),0)
def test_add_rule(self):
#add rule to empty ruleset
rset = rs.RuleSet([])
rec_name1 = 'NewRec'
rset.add_rule(rec_name1)
self.assertEqual(rset.n,1)
self.assertEqual(rset.m,1)
self.assertEqual(rset.attr_names,['Recommendation'])
self.assertEqual(len(rset.set),1)
self.assertEqual(rset.set['Recommendation'][0],rec_name1)
rset = rs.RuleSet([])
rec_name2 = 'NewRec'; values = [float('nan'),pd.Interval(100,175),False]
rset.add_rule(rec_name2, values)
self.assertEqual(rset.n,1)
self.assertEqual(rset.m,4)
self.assertEqual(rset.attr_names,['Recommendation', 'Attr 1', 'Attr 2', 'Attr 3'])
self.assertEqual(len(rset.set),1)
self.assertEqual(rset.set['Recommendation'][0],rec_name2)
self.assertTrue(pd.isna(rset.set['Attr 1'][0]))
self.assertEqual(rset.set['Attr 2'][0],pd.Interval(100,175))
self.assertEqual(rset.set['Attr 3'][0],False)
if __name__ == '__main__':
unittest.main()
|
# constans
CHARS = '\ !%"#&\'()*+,-./0123456789:;?AÁẢÀÃẠÂẤẨẦẪẬĂẮẲẰẴẶBCDĐEÉẺÈẼẸÊẾỂỀỄỆFGHIÍỈÌĨỊJKLMNOÓỎÒÕỌÔỐỔỒỖỘƠỚỞỜỠỢPQRSTUÚỦÙŨỤƯỨỬỪỮỰVWXYÝỶỲỸỴZaáảàãạâấẩầẫậăắẳằẵặbcdđeéẻèẽẹêếểềễệfghiíỉìĩịjklmnoóỏòõọôốổồỗộơớởờỡợpqrstuúủùũụưứửừữựvwxyýỷỳỹỵz' # noqa
CHARS_ = [char for char in CHARS]
PIXEL_INDEX = 127
NO_GEN_IMAGES = 2**5
# sample params
TRAIN_SIZE = 0.95
MAX_LEN_TEXT = 256
IMAGE_SIZE = (1150, 32)
IMG_W, IMG_H = IMAGE_SIZE
NO_CHANNELS = 1
# if K.image_data_format() == 'channels_first':
# INPUT_SHAPE = (NO_CHANNELS, IMG_W, IMG_H)
# else:
# INPUT_SHAPE = (IMG_W, IMG_H, NO_CHANNELS)
INPUT_SHAPE = (IMG_W, IMG_H, NO_CHANNELS)
IMG_BG_TEXT = ("black", "white")
# model params
NO_EPOCHS = 25
NO_LABELS = 216
BATCH_SIZE = 16
CONV_FILTERS = 16
KERNEL_SIZE = (3, 3)
POOL_SIZE = 2
DOWNSAMPLE_FACTOR = POOL_SIZE ** 2
TIME_DENSE_SIZE = 256
RNN_SIZE = 256
# paths
BASE_DATA = ""
SAMPLES_DATA = ""
RAW_DATA = ""
PP_DATA = ""
GEN_DATA = ""
TRANSCRIPTION = ""
TRANSGEN = ""
# naming
|
#!/usr/bin/python
'''!
Program to compute the odds for the game of Baccarat.
@author <a href="email:fulkgl@gmail.com">George L Fulk</a>
'''
def bacc_value(num1, num2):
'''!
Compute the baccarat value with 2 inputed integer rank values (0..12).
'''
if num1 > 9:
num1 = 0
if num2 > 9:
num2 = 0
num1 += num2
if num1 > 9:
num1 -= 10
return num1
def comma(number):
'''!
Convert an integer to comma seperated string.
'''
str_int = ""
sign = ""
quo = number
if number < 0:
sign = '-'
quo = -number
while quo > 999:
rem = quo % 1000
str_int = ",%03d%s" % (rem, str_int)
quo = quo // 1000
return "%s%d%s" % (sign, quo, str_int)
class ComputeBaccaratOdds(object):
'''!
Compute the odds for the game of Baccarat.
'''
def __init__(self, number_decks=8):
'''!
Compute Baccarat odds for the given number of decks of cards.
The range of valid number of decks is limited to 12. The 12 limit
is an attempt to prevent attacks or bad coding using up resources.
@param numberDecks Number of decks to initialized the odds.
The range of valid value is 1 at a minimum up to 12.
@throws java.lang.IllegalArgumentException
Input arguement numberDecks is not valid.
'''
# validate args
if not isinstance(number_decks, int) or \
(number_decks < 0) or (number_decks > 12):
raise ValueError("number_decks(%s) not a legal value" %
str(number_decks))
# create the shoe
self.saved_shoe = 13 * [4 * number_decks]
# save the dragon table
self.dragon_pay_table = 3 * [None]
self.dragon_natural_win = 10
self.dragon_natural_tie = 11
# 0, 1, 2, 3, 4, 5, 6, 7, 8 , 9,nat,nT
self.dragon_pay_table[1-1] = [-1, -1, -1, -1, 1, 2, 4, 6, 10, 30, 1, 0]
self.dragon_pay_table[2-1] = [-1, -1, -1, -1, 1, 3, 4, 7, 8, 20, 1, 0]
self.dragon_pay_table[3-1] = [-1, -1, -1, -1, 2, 2, 4, 4, 10, 30, 1, 0]
# ^ ^
# Number of hand combinations that result in Banker,Player,Tie wins.
self.count_banker = 0
self.count_player = 0
self.count_tie = 0
self.count_naturals = 0
self.count_pair = 0
self.count_nonpair = 0
self.count_banker_3card7 = 0
self.count_player_3card8 = 0
self.count_banker_dragon = [0, 0, 0]
self.freq_banker_dragon = [0, 0, 0]
self.count_player_dragon = [0, 0, 0]
self.freq_player_dragon = [0, 0, 0]
# perform the math computation
self.recompute(self.saved_shoe)
def record(self, value_banker, value_player, count,
is_naturals=True,
is_banker_3cards=False,
is_player_3cards=False):
'''!
Record the results of a hand combination.
'''
diff = value_banker - value_player
if value_player < value_banker:
# Banker wins
self.count_banker += count
if is_banker_3cards and value_banker == 7:
self.count_banker_3card7 += count
if is_naturals: # and not a tie
diff = self.dragon_natural_win
for table_num in range(3): # various dragon tables
dragon_pays = self.dragon_pay_table[table_num][diff]
self.count_banker_dragon[table_num] += count * dragon_pays
if dragon_pays >= 0:
self.freq_banker_dragon[table_num] += count
self.count_player_dragon[table_num] += -count
elif value_player > value_banker:
# Player wins
self.count_player += count
if is_player_3cards and value_player == 8:
self.count_player_3card8 += count
diff = -diff
if is_naturals: # and not a tie
diff = self.dragon_natural_win
for table_num in range(3): # various dragon tables
dragon_pays = self.dragon_pay_table[table_num][diff]
self.count_player_dragon[table_num] += count * dragon_pays
if dragon_pays >= 0:
self.freq_player_dragon[table_num] += count
self.count_banker_dragon[table_num] += -count
else:
# Tie wins
self.count_tie += count
if is_naturals:
diff = self.dragon_natural_tie
# special case, table 3 counts the pushes
self.freq_banker_dragon[3 - 1] += count
self.freq_player_dragon[3 - 1] += count
for table_num in range(3): # various dragon tables
dragon_pays = self.dragon_pay_table[table_num][diff]
self.count_player_dragon[table_num] += count * dragon_pays
self.count_banker_dragon[table_num] += count * dragon_pays
def not_naturals(self, value_p, value_b, shoe_size, shoe, count4):
'''!
Handle the not a naturals situation. Look for a third player and
third banker situation.
'''
# = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13]
draw_table = [3, 4, 4, 5, 5, 6, 6, 2, 3, 3, 3, 3, 3, 3]
if value_p <= 5:
# Player hits
for p3 in range(len(shoe)):
if shoe[p3] != 0:
if value_b <= draw_table[p3]:
# Banker hits
value_p3 = bacc_value(value_p, 1 + p3)
count5 = count4 * shoe[p3]
shoe[p3] -= 1
for b3 in range(len(shoe)):
if shoe[b3] != 0:
count6 = count5 * shoe[b3]
value_b3 = bacc_value(value_b, 1 + b3)
self.record(value_b3, value_p3, count6,
False, # not natural
True, # 3 card banker
True) # 3 card player
shoe[p3] += 1
else:
# Banker stands
count6 = count4 * shoe[p3] * (shoe_size - 1)
value_p3 = bacc_value(value_p, 1 + p3)
self.record(value_b, value_p3, count6,
False, # not natural
False, # not 3 card banker
True) # player 3 cards
else:
# Player stands
if value_b <= 5:
# Banker hits
for b3 in range(len(shoe)):
if shoe[b3] != 0:
value_b3 = bacc_value(value_b, 1 + b3)
count6 = count4 * shoe[b3] * (shoe_size - 1)
self.record(value_b3, value_p, count6,
False, # not natural
True, # 3 card banker
False) # no 3 card player
else:
# Banker stands
count6 = count4 * shoe_size * (shoe_size - 1)
self.record(value_b, value_p, count6, False) # False=!natural
def recompute(self, shoe):
'''!
Recompute the math for the given shoe contents.
The 13 indexed values will represent the number of each of the 13
cards in a suit. The shoe[0] is the number of aces, shoe[1] is the
number of twos, et cetera. Up to shoe[12] is the number of Kings.
@param shoe integer array of length 13
'''
# validate shoe and compute it's size
if not isinstance(shoe, list) or (len(shoe) != 13):
raise ValueError("int[13] required")
shoe_size = 0
for i in shoe:
if not isinstance(i, int) or (i < 0) or (i > 50):
raise ValueError("shoe does not contain valid values")
shoe_size += i
# init the counts
self.count_banker = 0
self.count_player = 0
self.count_tie = 0
self.count_naturals = 0
self.count_pair = 0
self.count_nonpair = 0
self.count_banker_3card7 = 0
self.count_player_3card8 = 0
self.count_banker_dragon = [0, 0, 0]
self.count_player_dragon = [0, 0, 0]
self.freq_banker_dragon = [0, 0, 0]
self.freq_player_dragon = [0, 0, 0]
# Loop through all possible card combinations
for p1 in range(len(shoe)):
if shoe[p1] > 0:
count1 = shoe[p1]
shoe[p1] -= 1
shoe_size -= 1
for b1 in range(len(shoe)):
if shoe[b1] != 0:
count2 = count1 * shoe[b1]
shoe[b1] -= 1
shoe_size -= 1
for p2 in range(len(shoe)):
if shoe[p2] != 0:
count3 = count2 * shoe[p2]
shoe[p2] -= 1
shoe_size -= 1
for b2 in range(len(shoe)):
if shoe[b2] != 0:
count4 = count3 * shoe[b2]
shoe[b2] -= 1
shoe_size -= 1
# -----
# First 2 cards dealt to each side.
#
# count the pair side bet
if p1 == p2:
self.count_pair += count4
else:
self.count_nonpair += count4
#
value_p = bacc_value(1 + p1, 1 + p2)
value_b = bacc_value(1 + b1, 1 + b2)
if (value_p >= 8) or (value_b >= 8):
count6 = count4 * shoe_size * \
(shoe_size - 1)
self.record(value_b, value_p,
count6)
self.count_naturals += count6
else: # not natural
self.not_naturals(value_p, value_b,
shoe_size, shoe,
count4)
# -----
shoe_size += 1
shoe[b2] += 1
# if b2
# for b2=
shoe_size += 1
shoe[p2] += 1
# if p2
# for p2=
shoe_size += 1
shoe[b1] += 1
# if b1
# for b1=
shoe_size += 1
shoe[p1] += 1
# if p1
# for p1=
def __str__(self):
'''!
Return the string representation of this object.
@return String
'''
output = []
total = self.count_banker + self.count_player + self.count_tie
line = "%5s=%22s%8.4f%%%8.4f%%%+9.4f%%" % (
'B', comma(self.count_banker),
self.count_banker * 100.0 / total,
self.count_banker * 100.0 / (self.count_banker + self.count_player),
(self.count_banker * 0.95 - self.count_player) * 100.0 / total)
output.append(line)
line = "%5s=%22s%8.4f%%%8.4f%%%+9.4f%%" % (
'P', comma(self.count_player),
self.count_player * 100.0 / total,
self.count_player * 100.0 / (self.count_banker + self.count_player),
(self.count_player - self.count_banker) * 100.0 / total)
output.append(line)
line = "%5s=%22s%8.4f%%%8.4fx%+9.4f%%" % (
'T', comma(self.count_tie),
self.count_tie * 100.0 / total,
total * 1.0 / self.count_tie,
(self.count_tie * 8.0 - self.count_banker - self.count_player) *
100.0 / total)
output.append(line)
line = "total=%22s" % comma(total)
output.append(line)
line = " #nat=%22s%8.4f%% T9x%+6.3f%%" % (
comma(self.count_naturals),
self.count_naturals * 100.0 / total,
100.0 * (self.count_tie * (2 + 8.0) - total) / total)
output.append(line)
line = "%5s=%22s%8.4f%%%8.4f%%%+9.4f%%" % (
'EZ-B', comma(self.count_banker - self.count_banker_3card7),
(self.count_banker - self.count_banker_3card7) * 100.0 / total,
(self.count_banker - self.count_banker_3card7) * 100.0 /
(self.count_banker + self.count_player),
(self.count_banker - self.count_banker_3card7 - self.count_player) *
100.0 / total)
output.append(line)
line = "%5s=%22s%8.4f%%%8.4fx%+9.4f%%" % (
'B3C7', comma(self.count_banker_3card7),
self.count_banker_3card7 * 100.0 / total,
total * 1.0 / self.count_banker_3card7,
(self.count_banker_3card7 * (1 + 40.0) - total) * 100.0 / total)
output.append(line)
line = "%5s=%22s%8.4f%%%8.4fx%+9.4f%%" % (
'P3C8', comma(self.count_player_3card8),
self.count_player_3card8 * 100.0 / total,
total * 1.0 / self.count_player_3card8,
(self.count_player_3card8 * (1 + 25.0) - total) * 100.0 / total)
output.append(line)
for table_num in range(3): # various dragon tables
comment = ""
if table_num == 2:
comment = "w/T"
line = "%5s=%22s%8.4f%% %3s %+9.4f%%" % (
"DB%d" % (1 + table_num),
comma(self.count_banker_dragon[table_num]),
self.freq_banker_dragon[table_num] * 100.0 / total,
comment,
self.count_banker_dragon[table_num] * 100.0 / total)
output.append(line)
for table_num in range(3): # various dragon tables
comment = ""
if table_num == 2:
comment = "w/T"
line = "%5s=%22s%8.4f%% %3s %+9.4f%%" % (
"DP%d" % (1 + table_num),
comma(self.count_player_dragon[table_num]),
self.freq_player_dragon[table_num] * 100.0 / total,
comment,
self.count_player_dragon[table_num] * 100.0 / total)
output.append(line)
output.append("%5s=%14s /%15s%8.4fx%+9.4f%%" % (
'pair', comma(self.count_pair),
comma(self.count_pair + self.count_nonpair),
self.count_nonpair * 1.0 / self.count_pair,
(self.count_pair * 11.0 - self.count_nonpair) * 100.0 /
(self.count_pair + self.count_nonpair)))
return "\n".join(output)
if __name__ == "__main__":
# command line entry point
ODDS = ComputeBaccaratOdds()
print(ODDS)
|
import ctypes
import pytest
c_lib = ctypes.CDLL('../solutions/0344-reverse-string/reverse-string.so')
@pytest.mark.parametrize('string, ans',
[(b"Hello World", b"dlroW olleH"),
(b"Hannah", b"hannaH")])
def test_reverse_string(string, ans):
c_lib.reverseString(string, len(string))
assert string == ans
|
from datetime import datetime
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from .forms import jsForm, FCMForm, FCMCONCEPTForm, FiltersForm, SortMapsForm, FCMEDGEForm
from .models import FCM
from .models import FCM_CONCEPT
from .models import FCM_CONCEPT_INFO
from .models import FCM_EDGES_IN_FCM_CONCEPT
from .models import FCM_EDGE_INFO
from . models import Tags
from django.contrib import messages
from bs4 import BeautifulSoup
from django.shortcuts import get_object_or_404
from django import forms
import json, pdb
# import urllib.parse as urllib
import urllib2 as urllib
from django.http import HttpResponseForbidden
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.db import DatabaseError
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def index(request):
context = {'a_var': "no_value"}
return render(request, 'fcm_app/index.html', context)
def browse(request):
post_query = False
if request.method == 'POST': # an methodos post, tote post_query true
post_query = True
if request.method == 'GET': # an i methodos GET, tote
if 'hasFilters' in request.GET: # an iparxei to 'hasFilters' sto request
if bool(request.GET['hasFilters']) is True: # an i timi tou einai true, tote
if 'filter-post' in request.session:
del request.session['filter-post']
pass
else:
request.method = 'GET'
elif ('page' in request.GET) and ('filter-post' in request.session):
request.method = 'POST'
else:
pass
else:
request.session['filter-post'] = request.POST
if request.method == 'POST':
request.GET = request.GET.copy()
request.GET['hasFilters'] = 'true'
filter_form = FiltersForm(request.POST)
if 'filter-post' in request.session:
filter_form = FiltersForm(request.session['filter-post'])
if filter_form.is_valid():
filtered_title_and_or_description = filter_form.cleaned_data['filtered_title_and_or_description']
filtered_year = filter_form.cleaned_data['filtered_year']
filtered_country = filter_form.cleaned_data['filtered_country']
filtered_getmine = filter_form.cleaned_data['filtered_getmine']
filtered_tags = filter_form.cleaned_data['filtered_tags']
filtered_sorting_type = filter_form.cleaned_data['filtered_sorting_type']
filtered_sorting_order = filter_form.cleaned_data['filtered_sorting_order']
if request.user.is_authenticated:
all_fcms = FCM.objects.filter(Q(status='1') | Q(user=request.user)).order_by('-creation_date')
else:
all_fcms = FCM.objects.filter(Q(status='1')).order_by('-creation_date')
if filtered_year != "-":
all_fcms = all_fcms.filter(creation_date__year=filtered_year)
if filtered_country != "-":
all_fcms = all_fcms.filter(country=filtered_country)
all_fcms = all_fcms.filter(Q(title__icontains=filtered_title_and_or_description) | Q(
description__icontains=filtered_title_and_or_description)).distinct()
if filtered_tags:
queryset_list = []
for element in filtered_tags:
try:
queryset_list.append(Tags.objects.get(pk=str(element)).fcm_set.all())
except DatabaseError:
pass
except ObjectDoesNotExist:
pass
results_union = FCM.objects.none()
for q in queryset_list:
results_union = (results_union | q )
results_union = results_union.distinct()
all_fcms = results_union & all_fcms
if filtered_getmine:
all_fcms = all_fcms.filter(user_id=request.user.id)
if filtered_sorting_type == 'creation_date':
if filtered_sorting_order == 'ascending':
all_fcms = all_fcms.order_by('creation_date')
else:
all_fcms = all_fcms.order_by('-creation_date')
elif filtered_sorting_type == 'title':
if filtered_sorting_order == 'ascending':
all_fcms = all_fcms.order_by('title')
else:
all_fcms = all_fcms.order_by('-title')
data = {'filtered_title_and_or_description': filtered_title_and_or_description,
'filtered_year': filtered_year,
'filtered_country': filtered_country,
'filtered_getmine': filtered_getmine,
'filtered_tags': filtered_tags,
'filtered_sorting_type': filtered_sorting_type,
'filtered_sorting_order': filtered_sorting_order}
filter_form = FiltersForm(initial=data)
paginator = Paginator(all_fcms, 9)
if post_query == True:
page = 1
else:
page = request.GET.get('page')
try:
all_fcms = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
all_fcms = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
all_fcms = paginator.page(paginator.num_pages)
return render(request, 'fcm_app/browse.html',
{"all_fcms": all_fcms, "filter_form": filter_form, "filter_tags":filtered_tags})
else:
all_fcms = FCM.objects.filter(Q(status='1') | Q(user=request.user)).order_by('-creation_date')
return render(request, 'fcm_app/browse.html', {"all_fcms": all_fcms, "filter_form": filter_form})
else:
#all_fcms = FCM.objects.all()
if request.user.is_authenticated:
all_fcms = FCM.objects.filter(Q(status='1') | Q(user=request.user)).order_by('-creation_date')
else:
all_fcms = FCM.objects.filter(Q(status='1')).order_by('-creation_date')
filter_form = FiltersForm()
paginator = Paginator(all_fcms, 9)
page = request.GET.get('page')
try:
all_fcms = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
all_fcms = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
all_fcms = paginator.page(paginator.num_pages)
return render(request, 'fcm_app/browse.html', {"all_fcms": all_fcms, "filter_form": filter_form})
@login_required
def import_fcm(request):
storage = messages.get_messages(request)
storage.used = True
if request.method == 'POST':
form = FCMForm(request.POST, request.FILES)
if form.is_valid():
try:
print(request.user)
user = request.user
soup = BeautifulSoup(form.cleaned_data['map_html'], "html.parser") # vazo i lxml i html.parser
if len(soup.find("table", class_="yimagetable")) > 0:
print("src in html: " + soup.find("img", class_="yimage")['src'])
print("image name: " + form.cleaned_data['map_image'].name)
if urllib.unquote(soup.find("img", class_="yimage")['src']) == form.cleaned_data['map_image'].name:
if user.is_authenticated():
fcm = FCM(user=user,
title=form.cleaned_data['title'],
country = form.cleaned_data['country'],
status = form.cleaned_data['status'],
description=form.cleaned_data['description'],
creation_date=datetime.now(),
map_image=form.cleaned_data['map_image'],
map_html=form.cleaned_data['map_html'])
fcm.save()
tags = form.cleaned_data['tags']
for tag_element in tags:
try:
new_tag = Tags(name=str(tag_element))
new_tag.save()
except DatabaseError:
pass
fcm.tags.add(str(tag_element))
soup = BeautifulSoup(fcm.map_html, "html.parser") # vazo i lxml i html.parser
x = soup.findAll("div", class_="tooltip")
for div in x:
if str(div['id']).startswith("n"):
fcm_concept = FCM_CONCEPT(fcm=fcm, title=div.text, id_in_fcm=div.get('id'))
fcm_concept.save()
else:
fcm_edge = FCM_EDGES_IN_FCM_CONCEPT(fcm=fcm, text=div.text, id_in_fcm=div.get('id'))
fcm_edge.save()
messages.success(request, 'Successfully imported the System Map. Add more info <a style="color: #a05017;" href="/fcm/view-fcm-concept/' + str(fcm.id) + '/"><u>here</u></a>, or you can browse the rest of the Maps <a style="color: #a05017;" href="/fcm/browse?hasFilters=false"><u>here</u></a>. ')
else:
messages.error(request, "You must login to import a map")
else:
messages.error(request, "The image you uploaded does not match with the html file")
else:
messages.error(request, "The html file was not exported from yEd")
except:
messages.error(request, "Import failed, please check the files you uploaed")
else:
messages.error(request, "form invalid")
form = FCMForm()
return render(request, 'fcm_app/import_fcm.html', {
'form': form
})
def view_fcm(request, fcm_id):
fcm = FCM.objects.get(pk=fcm_id)
# fcm.chartis = str(fcm.chartis)
if fcm.manual == False:
html = fcm.map_html.read()
soup = BeautifulSoup(html, 'html.parser')
body = soup.find('body').prettify().replace('<body>', '').replace('</body>', '')
src = [x['src'] for x in soup.findAll('img')][0]
# body = body.replace('src="' + src + '"', 'src="' + fcm.map_image.url + '" width="100%" class="img-responsive"') #<--ayto xalaei to area highlight
body = body.replace('src="' + src + '"', 'src="' + fcm.map_image.url + '" ')
body = body.replace('onmouseover="showTooltip(', 'onclick="showTooltip2(event, ')
body = body.replace('document.onmousemove = updateTooltip;', '')
# body = body.replace('shape="rect"', 'shape="rect" data-toggle="popover" data-content="Some content"')
script = soup.find('script').prettify()
concepts = FCM_CONCEPT.objects.filter(fcm=fcm)
print(concepts)
info_dict = dict()
for concepts_item in concepts:
try:
concept_info = FCM_CONCEPT_INFO.objects.get(fcm_concept=concepts_item)
info_dict[str(concepts_item.id_in_fcm)] = concept_info.info
except FCM_CONCEPT_INFO.DoesNotExist:
info_dict[str(concepts_item.id_in_fcm)] = 'No more information available'
edges = FCM_EDGES_IN_FCM_CONCEPT.objects.filter(fcm=fcm)
print(edges)
for edge_item in edges:
try:
edge_info = FCM_EDGE_INFO.objects.get(fcm_edge=edge_item)
info_dict[str(edge_item.id_in_fcm)] = edge_info.info
except FCM_EDGE_INFO.DoesNotExist:
info_dict[str(edge_item.id_in_fcm)] = 'No more information available'
print(info_dict)
return render(request, 'fcm_app/view_fcm.html', {
'map_body': body,
'map_image': fcm.map_image,
'script': script,
'fcm': fcm,
'info_dict': info_dict
})
else:
x = fcm.chartis # tha exo to string, pou tha pernao sto html gia na to deihno
#data = {'title': "fd", 'description': x}
#form = jsForm(data)
concepts = FCM_CONCEPT.objects.filter(fcm=fcm)
print(concepts)
info_dict = dict()
for concepts_item in concepts:
try:
concept_info = FCM_CONCEPT_INFO.objects.get(fcm_concept=concepts_item)
info_dict[str(concepts_item.id_in_fcm)] = concept_info.info
except FCM_CONCEPT_INFO.DoesNotExist:
info_dict[str(concepts_item.id_in_fcm)] = 'No more information available'
print(info_dict)
edges = FCM_EDGES_IN_FCM_CONCEPT.objects.filter(fcm=fcm)
print('edges:')
# print(edges)
info_edge_dict = dict()
for edge_item in edges:
try:
edge_info = FCM_EDGE_INFO.objects.get(fcm_edge=edge_item)
info_edge_dict[str(edge_item.id_in_fcm)] = edge_info.info
except FCM_EDGE_INFO.DoesNotExist:
info_edge_dict[str(edge_item.id_in_fcm)] = 'No more information available'
print(info_edge_dict)
original_title = ''
original_username = ''
if fcm.original is not None:
original_id = int(fcm.original)
original_title = FCM.objects.get(pk=int(original_id)).title
original_username = FCM.objects.get(pk=int(original_id)).user.username
return render(request, 'fcm_app/view_fcm4.html', {
'fcm': fcm,
#'data1': x,
#'form': form,
'info_dict': info_dict,
'info_edge_dict': info_edge_dict,
'original_title': original_title,
'original_username': original_username
})
@login_required
def delete_fcm(request, fcm_id):
FCM.objects.get(pk=fcm_id).delete()
return render(request, 'fcm_app/index.html', {})
@login_required
def view_fcm_concept(request, fcm_id):
fcm = FCM.objects.get(pk=fcm_id)
if request.user == fcm.user:
concepts = FCM_CONCEPT.objects.filter(fcm=fcm_id)
relations = FCM_EDGES_IN_FCM_CONCEPT.objects.filter(fcm=fcm_id)
return render(request, 'fcm_app/view_fcm_concept.html', {"fcm_id": fcm_id, "concepts": concepts, "relations": relations})
return HttpResponseForbidden()
@login_required
def view_fcm_concept_info(request, fcm_id, concept_id):
storage = messages.get_messages(request)
storage.used = True
fcm = FCM.objects.get(pk=fcm_id)
if request.user == fcm.user:
concept = FCM_CONCEPT.objects.get(fcm=fcm_id, pk=concept_id)
concept_info = FCM_CONCEPT_INFO()
try:
concept_info = FCM_CONCEPT_INFO.objects.get(fcm_concept=concept_id)
data = {'concept_info': concept_info.info}
except concept_info.DoesNotExist:
data = {}
form = FCMCONCEPTForm(initial=data)
if request.method == 'POST':
form = FCMCONCEPTForm(request.POST)
if form.is_valid():
my_concept = get_object_or_404(FCM_CONCEPT, pk=concept_id)
fcm_concept_info = FCM_CONCEPT_INFO()
try:
fcm_concept_info = FCM_CONCEPT_INFO.objects.get(fcm_concept=my_concept)
fcm_concept_info.info = form.cleaned_data['concept_info']
except fcm_concept_info.DoesNotExist:
fcm_concept_info = FCM_CONCEPT_INFO(fcm_concept=my_concept, info=form.cleaned_data['concept_info'])
fcm_concept_info.save()
messages.success(request, 'edited successfully')
else:
messages.error(request, "an error occured")
return render(request, 'fcm_app/view_fcm_concept_info.html/', {
'form': form, 'concept': concept,
})
return HttpResponseForbidden()
@login_required
def view_fcm_edge_info(request, fcm_id, edge_id):
storage = messages.get_messages(request)
storage.used = True
fcm = FCM.objects.get(pk=fcm_id)
if request.user == fcm.user:
edge = FCM_EDGES_IN_FCM_CONCEPT.objects.get(fcm=fcm_id, pk=edge_id)
edge_info = FCM_EDGE_INFO()
try:
edge_info = FCM_EDGE_INFO.objects.get(fcm_edge=edge_id)
data = {'edge_info': edge_info.info}
except edge_info.DoesNotExist:
data = {}
form = FCMEDGEForm(initial=data)
if request.method == 'POST':
form = FCMEDGEForm(request.POST)
if form.is_valid():
my_edge = get_object_or_404(FCM_EDGES_IN_FCM_CONCEPT, pk=edge_id)
fcm_edge_info = FCM_EDGE_INFO()
try:
fcm_edge_info = FCM_EDGE_INFO.objects.get(fcm_edge=my_edge)
fcm_edge_info.info = form.cleaned_data['edge_info']
except fcm_edge_info.DoesNotExist:
fcm_edge_info = FCM_EDGE_INFO(fcm_edge=my_edge, info=form.cleaned_data['edge_info'])
fcm_edge_info.save()
messages.success(request, 'edited successfully')
else:
messages.error(request, "an error occured")
return render(request, 'fcm_app/view_fcm_edge_info.html/', {
'form': form, 'relation': edge,
})
return HttpResponseForbidden()
@login_required
def my_fcms(request):
if request.method == 'POST':
sort_maps_form = SortMapsForm(request.POST)
if sort_maps_form.is_valid():
my_fcms = []
user = request.user
if user.is_authenticated():
my_fcms = FCM.objects.filter(user=user)
sorting_type = sort_maps_form.cleaned_data['sorting_type']
if sorting_type == 'creation_date':
my_fcms = my_fcms.order_by('-creation_date')
else:
my_fcms = my_fcms.order_by('title')
return render(request, 'fcm_app/my_fcms.html/', {
'my_fcms': my_fcms,
"sort_maps_form": sort_maps_form
})
sort_maps_form = SortMapsForm()
my_fcms = []
user = request.user
if user.is_authenticated():
my_fcms = FCM.objects.filter(user=user)
return render(request, 'fcm_app/my_fcms.html/', {
'my_fcms': my_fcms,
"sort_maps_form": sort_maps_form
})
@login_required
def edit_fcm(request, fcm_id):
storage = messages.get_messages(request)
storage.used = True
another_user = False
fcm = FCM.objects.get(pk=fcm_id)
original_id=-1
if fcm.manual == False:
if request.user == fcm.user:
if request.method == 'POST':
data = {'map_image': fcm.map_image, 'map_html': fcm.map_html}
form = FCMForm(request.POST, data)
if form.is_valid():
print(request.user)
user = request.user
if user.is_authenticated():
fcm.title=form.cleaned_data['title']
fcm.description=form.cleaned_data['description']
fcm.country=form.cleaned_data['country']
fcm.status=form.cleaned_data['status']
fcm.save()
tags = form.cleaned_data['tags']
fcm.tags.clear()
for tag_element in fcm.tags.all():
tag_element.delete()
for tag_element in tags:
try:
new_tag = Tags(name=str(tag_element))
new_tag.save()
except DatabaseError:
pass
fcm.tags.add(str(tag_element))
messages.success(request, 'edited successfully')
else:
messages.error(request, "You must login to edit a map")
else:
messages.error(request, "form invalid")
tags = [t.name for t in fcm.tags.all()]
data = {'title': fcm.title, 'description': fcm.description, 'country': fcm.country, 'status': fcm.status}
print(tags)
form = FCMForm(initial=data)
# pdb.set_trace()
form.fields['map_image'].widget = forms.HiddenInput()
form.fields['map_html'].widget = forms.HiddenInput()
return render(request, 'fcm_app/edit_fcm.html', {
'form': form,
'fcm': fcm,
'tags': tags
})
return HttpResponseForbidden()
else:
if request.method == 'POST':
#data = {'map_image': fcm.map_image, 'map_html': fcm.map_html}
#data = {'chartis': fcm.chartis}
#print(data)
if request.user != fcm.user:
another_user = True
form = jsForm(request.POST)
# pdb.set_trace()
if form.is_valid():
print(request.user)
user = request.user
if user.is_authenticated():
if another_user:
original_id = fcm.id
fcm=FCM(user=user, creation_date=datetime.now(), manual = True, original=original_id)
# fcm.title = form.cleaned_data['title'] + ", updated by:" + str(user.username)
# else:
fcm.title=form.cleaned_data['title']
fcm.description=form.cleaned_data['description']
fcm.country=form.cleaned_data['country']
fcm.status=form.cleaned_data['status']
fcm.chartis = form.cleaned_data['chartis']
fcm.image_url = form.cleaned_data['image']
fcm.save()
tags = form.cleaned_data['tags']
fcm.tags.clear()
for tag_element in tags:
try:
new_tag = Tags(name=str(tag_element))
new_tag.save()
except DatabaseError:
pass
fcm.tags.add(str(tag_element))
description_json = json.loads(form.cleaned_data['chartis'])
print(description_json)
x = description_json
x1 = x['nodes'] # list pou exei dictionaries
x2 = x['edges'] # list
for concept in FCM_CONCEPT.objects.filter(fcm=fcm):
concept.delete()
for edge in FCM_EDGES_IN_FCM_CONCEPT.objects.filter(fcm=fcm):
edge.delete()
for i in x1:
fcm_concept = FCM_CONCEPT(fcm=fcm, title=i['label'], id_in_fcm=i['id'], x_position=i['x'], y_position=i['y'])
fcm_concept.save()
if str(i['concept_info']).strip() != "":
fcm_concept_info = FCM_CONCEPT_INFO(fcm_concept=fcm_concept, info=str(i['concept_info']).strip())
fcm_concept_info.save()
for i in x2:
fcm_edges_in_fcm_concept = FCM_EDGES_IN_FCM_CONCEPT(fcm=fcm, id_in_fcm=i['id'], text=i['label'], from_concept=
FCM_CONCEPT.objects.filter(fcm=fcm).filter(id_in_fcm=i['from'])[0], to_concept=
FCM_CONCEPT.objects.filter(fcm=fcm).filter(id_in_fcm=i['to'])[0])
fcm_edges_in_fcm_concept.save()
if str(i['relation_info']).strip() != "":
fcm_relation_info = FCM_EDGE_INFO(fcm_edge=fcm_edges_in_fcm_concept, info=str(i['relation_info']).strip())
fcm_relation_info.save()
messages.success(request, 'edited successfully')
else:
messages.error(request, "You must login to edit a map")
else:
messages.error(request, "form invalid")
data = {'title': fcm.title, 'description': fcm.description, 'country': fcm.country, 'status': fcm.status, 'chartis': fcm.chartis}
form = jsForm(initial=data)
tags = [t.name for t in fcm.tags.all()]
print(tags)
#form.fields['chartis'].widget = forms.HiddenInput()
#form.fields['map_image'].widget = forms.HiddenInput()
#form.fields['map_html'].widget = forms.HiddenInput()
concept_info_form = FCMCONCEPTForm()
relation_info_form = FCMEDGEForm()
if another_user:
return redirect('/fcm/view-fcm/'+str(fcm.id)+'/')
else:
return render(request, 'fcm_app/edit_fcm2.html', {
'form': form,
'fcm': fcm,
'tags': tags,
'concept_info_form': concept_info_form,
'relation_info_form': relation_info_form
})
# return HttpResponseForbidden()
@login_required
def create_fcm(request):
# s = render_to_string('fcm_app/remove_messages.html', {}, request)
if request.method == 'POST':
form = jsForm(request.POST)
if form.is_valid():
print(request)
print(request.user)
user = request.user
if user.is_authenticated():
fcm = FCM(user=user,
title=form.cleaned_data['title'],
description=form.cleaned_data['description'],
country = form.cleaned_data['country'],
chartis = form.cleaned_data['chartis'],
image_url=form.cleaned_data['image'],
creation_date=datetime.now(),
manual = True)
fcm.save()
tags = form.cleaned_data['tags']
for tag_element in tags:
try:
new_tag = Tags(name=str(tag_element))
new_tag.save()
except DatabaseError:
pass
fcm.tags.add(str(tag_element))
#searchTimi = request.POST.get('timi_pou_thelo', '')
#searchTimi2 = request.POST.get('description', '') # thelei to name, oxi to id
#print("Some output")
print(form.cleaned_data['chartis'])
description_json = json.loads(form.cleaned_data['chartis'])
#import pdb; pdb.set_trace()
print(description_json)
x = description_json
x1 = x['nodes'] #list pou exei dictionaries
x2 = x['edges'] #list
#PROSOHI AN EINAI MIDEN
for i in x1:
fcm_concept = FCM_CONCEPT(fcm=fcm, title = i['label'], id_in_fcm= i['id'], x_position = i['x'], y_position = i['y'])
fcm_concept.save()
if str(i['concept_info']).strip() != "":
fcm_concept_info = FCM_CONCEPT_INFO(fcm_concept=fcm_concept, info=str(i['concept_info']).strip())
fcm_concept_info.save()
for i in x2:
fcm_edges_in_fcm_concept = FCM_EDGES_IN_FCM_CONCEPT(fcm=fcm, id_in_fcm= i['id'], text=i['label'], from_concept=FCM_CONCEPT.objects.filter(fcm=fcm).filter(id_in_fcm=i['from'])[0], to_concept=FCM_CONCEPT.objects.filter(fcm=fcm).filter(id_in_fcm=i['to'])[0])
fcm_edges_in_fcm_concept.save()
if str(i['relation_info']).strip() != "":
fcm_relation_info = FCM_EDGE_INFO(fcm_edge=fcm_edges_in_fcm_concept, info=str(i['relation_info']).strip())
fcm_relation_info.save()
messages.success(request, 'Successfully created the System Map. You can browse the rest of the maps <a style="color: #a05017;" href="/fcm/browse?hasFilters=false"><u>here</u></a>. ')
else:
messages.error(request, "You must login to create a map")
else:
messages.error(request, "form invalid")
return redirect('/fcm/create_map')
form = jsForm()
concept_info_form = FCMCONCEPTForm()
relation_info_form = FCMEDGEForm()
return render(request, 'fcm_app/create_fcm.html', {
'form': form,
'concept_info_form': concept_info_form,
'relation_info_form': relation_info_form
})
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 19 02:20:34 2022
@author: maout
"""
import torch
import numpy as np
from matplotlib import pyplot as plt
from DeterministicParticleFlowControl import torched_DPFC
#import DeterministicParticleFlowControl as dpfc
from utils.utils_pytorch import set_device
###Limit cycle function and analytic gradient for passing for comparison calculations
def f(x,t=0):#LC
x0 = -x[1] + x[0]*(1-x[0]**2 -x[1]**2)
x1 = x[0] + x[1]*(1-x[0]**2 -x[1]**2)
return torch.cat((x0.view(1, -1) ,x1.view(1, -1) ), dim=0)
def f_numpy(x,t=0):#LC
x0 = -x[1] + x[0]*(1-x[0]**2 -x[1]**2)
x1 = x[0] + x[1]*(1-x[0]**2 -x[1]**2)
return np.array([x0,x1])
def glnfss(x,sigma):
x0 = - x[0]*(x[0]**2 + x[1]**2 - 1)/(0.5*sigma**2)
x1 = - x[1]*(x[0]**2 + x[1]**2 - 1)/(0.5*sigma**2)
return np.array([x0,x1])
DEVICE = set_device()
#simulation_precision
dt = 0.001
t_start = 0.
T = 50#0.
#x0 = np.array([1.81, -1.41])
x0 = torch.tensor([-0., -1.0], dtype=torch.float64, device=DEVICE )
timegridall = np.arange(0,T,dt)
F = np.zeros((2,timegridall.size))
#noise amplitude
g = 0.1
for ti,t in enumerate(timegridall):
if ti==0:
F[:,0] = x0.cpu()
else:
F[:,ti] = F[:,ti-1]+ dt* f_numpy(F[:,ti-1])+(g)*np.random.normal(loc=0.0, scale=np.sqrt(dt), size=(2,))
steps = 500 #steps between initial and terminal points
obs_dens = steps
N = 200
M = 40
t1 = timegridall[100]
t2 = timegridall[100+steps]
y1 = torch.tensor(F[:,100], dtype=torch.float64, device=DEVICE)
y2 = torch.tensor(F[:,100+steps], dtype=torch.float64, device=DEVICE)
##create object bridg2d that contains the simulated flows
bridg2d = torched_DPFC(t1,t2,y1,y2,f,g,N,M,dens_est='nonparametric', deterministic=True, device=DEVICE)
plt.figure(figsize=(10,10)),
plt.plot(F[0],F[1],'.', alpha=0.05);
if DEVICE=='cpu':
#plt.plot(bridg2d.Z[0].detach().numpy().T,bridg2d.Z[1].detach().numpy().T,alpha=0.5,c='grey');
plt.plot(bridg2d.B[0].detach().numpy().T,bridg2d.B[1].detach().numpy().T,alpha=0.5,c='grey');
plt.plot(y1[0].detach().numpy(),y1[1].detach().numpy(),'g.',markersize=16);
plt.plot(y2[0].detach().numpy(),y2[1].detach().numpy(),'d',c='maroon',markersize=16);
plt.xlim(-0.5,1.5)
plt.ylim(-1.5,0)
else:
plt.plot(bridg2d.B[0].cpu().detach().numpy().T,bridg2d.B[1].cpu().detach().numpy().T,alpha=0.5,c='grey');
plt.plot(y1[0].cpu().detach().numpy(),y1[1].cpu().detach().numpy(),'g.',markersize=16);
plt.plot(y2[0].cpu().detach().numpy(),y2[1].cpu().detach().numpy(),'d',c='maroon',markersize=16);
plt.title('Invariant density of the limit cycle and backwad flow');
|
import re
from fastest.utils import count_truthy
def used_as_int(statement, variable):
"""
example: used_as_int("a = 4", "a") -> 1 #
example: used_as_int("a + 4", "a") -> 1 #
example: used_as_int("a * 4", "a") -> 1 #
example: used_as_int("a - 4", "a") -> 1 #
:param statement:
:param variable:
:return:
"""
statement = statement.strip()
assignment = re.search(r'{variable}\s*=\s*\d+'.format(variable=variable), statement)
addition = re.search(r'{variable}\s*\+\s*'.format(variable=variable), statement)
addition_inc = re.search(r'{variable}\s*\+=\s*\d+'.format(variable=variable), statement)
multiplication = re.search(r'{variable}\s*\*\s*'.format(variable=variable), statement)
subtraction = re.search(r'{variable}\s*-\s*'.format(variable=variable), statement)
division = re.search(r'{variable}\s*/\s*'.format(variable=variable), statement)
return count_truthy([assignment, addition, subtraction,multiplication, division, addition_inc])
def used_as_str(statement, variable):
"""
example: used_as_str("string_var = 'something'", "string_var") -> 1 #
example: used_as_str("string_var + 'something'", "string_var") -> 1 #
example: used_as_str("string_var * 5", "string_var") -> 1 #
:param statement:
:param variable:
:return:
"""
statement = statement.strip()
assignment = re.match('{variable}\s*=\s*"|\'\w*"|\''.format(variable=variable), statement)
addition = re.match(r'{variable}\s*\+\s*'.format(variable=variable), statement)
multiplication = re.match(r'{variable}\s*\*\d*'.format(variable=variable), statement)
return count_truthy([assignment, addition, multiplication])
def used_as_iterable(statement, variable):
"""
example: used_as_iterable("for word in words", "words") -> 1 #
:param statement:
:param variable:
:return:
"""
statement = statement.strip()
loop = re.match(r'for \w+ in {variable}'.format(variable=variable), statement)
map_fn = re.search(r'map\(.*[^,)],\s*{variable}'.format(variable=variable), statement)
filter_fn = re.search(r'filter\(.*[^,)],\s*{variable}'.format(variable=variable), statement)
reduce_fn = re.search(r'reduce\(.*[^,)],\s*{variable}'.format(variable=variable), statement)
item_index = re.match(r'{variable}\[\d+\]'.format(variable=variable), statement)
return count_truthy([loop, map_fn, filter_fn, reduce_fn, item_index])
def used_as_list(statement, variable):
"""
example: used_as_list("apples.append(10)", "apples") -> 1 #
example: used_as_list("apples = [11, 12]", "apples") -> 1 #
:param statement:
:param variable:
:return:
"""
statement = statement.strip()
assignment = re.match(r'{variable}\s*=\s*\['.format(variable=variable), statement)
assignment_as_instance = re.match(r'{variable}\s*=\s*list\('.format(variable=variable), statement)
append = re.search(r'{variable}.append\('.format(variable=variable), statement)
return count_truthy([assignment_as_instance, assignment, append]) + used_as_iterable(statement, variable)
def used_as_tuple(statement, variable):
"""
example: used_as_tuple("words = (11, 2)", "words") -> 1 #
:param statement:
:param variable:
:return:
"""
statement = statement.strip()
assignment = re.match(r'{variable}\s*=\s*\('.format(variable=variable), statement)
assignment_as_instance = re.match(r'{variable}\s*=\s*tuple\('.format(variable=variable), statement)
insert = re.match(r'{variable}.insert\('.format(variable=variable), statement)
return count_truthy([assignment_as_instance, assignment, insert]) + used_as_iterable(statement, variable)
def used_as_dict(statement, variable):
"""
example: used_as_dict("dict_input['val']", "dict_input") -> 1 #
:param statement:
:param variable:
:return:
"""
statement = statement.strip()
assignment = re.search(r'{variable}\s*=\s*\{{'.format(variable=variable), statement)
key_ref_str = re.search(r'{variable}\[\"|\'\w+\"|\'\]'.format(variable=variable), statement)
key_ref_var = re.search(r'{variable}\[\w+\]'.format(variable=variable), statement)
get_access = re.search(r'{variable}.get\('.format(variable=variable), statement)
return count_truthy([assignment, key_ref_str, key_ref_var, get_access])
|
from typing import Optional, List
from ..options import Options
from ..validation_rule import ValidationRule
from .content import Content
from ....utils.serializer import serialized
ValidationRules = List[ValidationRule]
class Checkbox(Content):
def __init__(
self,
content_id: str,
title: str,
default_state: bool = False,
options: Optional[Options] = None,
validations_rules: Optional[ValidationRules] = None,
):
super().__init__(content_id=content_id, content_type="checkbox")
self.title = title
self.default_state = default_state
if options is not None:
self.options = options.__dict__
if validations_rules is not None:
self.validations_rules = serialized(validations_rules)
|
import os
import sys
sys.path.insert(0, os.path.abspath("../src/investporto"))
# -- Project information -----------------------------------------------------
import investporto
project = "investporto"
copyright = "2020, Sebastian Fischer"
author = "Sebastian Fischer"
version = investporto.__version__.split(".")[0]
release = investporto.__version__
# -- General configuration ---------------------------------------------------
primary_domain = "py"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"sphinxcontrib.programoutput",
"sphinx_autodoc_typehints",
"sphinx_rtd_dark_mode",
]
source_suffix = {
".rst": "restructuredtext",
".txt": "restructuredtext",
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Include documentation from both the class level and __init__
autoclass_content = "both"
# The default autodoc directive flags
autodoc_default_flags = ["members", "show-inheritance"]
|
#===========================================================================
#
# Copyright (c) 2014, California Institute of Technology.
# U.S. Government Sponsorship under NASA Contract NAS7-03001 is
# acknowledged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===========================================================================
""": OneOf property module."""
__version__ = "$Revision: #1 $"
#===========================================================================
from ..StyleProperty import StyleProperty
from .. import convert as cvt
#===========================================================================
__all__ = [ 'OneOf' ]
#===========================================================================
class OneOf( StyleProperty ):
""": A OneOf style property.
"""
#-----------------------------------------------------------------------
def __init__( self, converters, default = None, doc = None ):
""": Create a new OneOf object.
= INPUT VARIABLES
- default The default value that instances will be initialized with.
- doc The docstring for this property.
"""
if doc is None:
doc = "\nThe value must satisfy one of the following converters:"
for c in converters:
doc += " + '%s'\n" % ( c, )
doc += "\n"
self.converters = converters
validator = cvt.Converter( cvt.toOneOf, converters )
StyleProperty.__init__( self, default, validator, doc )
#-----------------------------------------------------------------------
def validate( self, value ):
""": Validate and return a valid value
= ERROR CONDITIONS
- Will throw an exception if the specified value is invalid.
= INPUT VARIABLES
- value The value to set the instance of this property to.
= RETURN VALUE
- Returns a valid value.
"""
# Since we know that the converters list is the first argument we are
# passing into the Converter CTOR up in __init__, we can reference it
# directly here. We need to make sure that the children types all
# have the 'name' set so the error messages don't get confusing. We
# Do this here instead of in the CTOR because the 'name' gets set just
# after __init__ finishes.
for cvtType in self.validator.args[0]:
if isinstance( cvtType, StyleProperty ):
cvtType._name = self.name
# Call the base class validate method
result = StyleProperty.validate( self, value )
return result
#-----------------------------------------------------------------------
|
"""Titan orbital module."""
from datetime import datetime as dt
import numpy as np
# Constantes
UA = 149597870.7 # 1 Astronomical unit (km)
# Default orbital parameters
ORBIT = {
'saturn_orbit': 15.945, # 1 Saturn orbit (days)
'sun_orbit': 10751, # 1 Sun orbit (days)
'obliquity': 26.730882944988142,
'vernal_equinox': '1980-02-22', # Date of the first vernal equinox (before Voyager 1)
'ellipse': {
'A': 6.1664830805512354,
'B': 6.0482745790986066,
'C': 101.03535416292833,
},
}
def readDate(date):
"""Read date as datetime64."""
if hasattr(date, 'time') and isinstance(date.time, dt):
return readDate(date.time)
if isinstance(date, dt):
return np.datetime64(date.date())
if isinstance(date, str):
return np.datetime64(
date.replace('/', '-').replace(' ', 'T').split('T')[0], 'D')
return date
class Orbit:
'''Titan orbit functions and parametes'''
def __init__(self):
'''Init default parameters'''
self.Tday = ORBIT['saturn_orbit']
# Default orbit parameters calculated with NAIF Space Kernels
self.obl = ORBIT['obliquity']
self.orbit = np.timedelta64(ORBIT['sun_orbit'], 'D')
self.eq_v = np.datetime64(ORBIT['vernal_equinox'])
self.A = ORBIT['ellipse']['A']
self.B = ORBIT['ellipse']['B']
self.C = ORBIT['ellipse']['C']
return
def __repr__(self):
return 'Titan orbit functions and parametes'
def Ls(self, date, eps=1.e-7, imax=25):
'''Calculate the solar longitude corresponding to a date.
Parameters
-----------
date : str, numpy.datetime64
Input date (YYYY-MM-DD or YYYY/MM/DD or YYYY-MM-DDThh:mm:ss.ms)
eps : float, optional
Precision of the convergence
imax : int, optional
Number maximum of iteration to reach the convergence, throw a ValueError otherwise.
Note
-----
The value of Ls is the solution of a transcendental equation which
is numerically solved with the Newton method:
L_s^0 = 360 · (Date - Eq^V)/Orbit) - B
L_s^(n+1) = L_s^n - (
L_s^n - L_s^0 + A · sin(2·pi·(L_s^n - C)/360)
)/(
1 + A · 2·pi/360 · cos(2·pi·(L_s^n - C)/360)
)
Return
-------
Ls : real
Solar latitude corresponding to the input date
'''
date = readDate(date)
Ls_0 = ( (360.*(date - self.eq_v).astype(int))/self.orbit.astype(float) - self.B ) % 360
Ls = Ls_0
for ii in range(imax):
dLs = - (Ls - Ls_0 + self.A * np.sin(2*np.pi*(Ls - self.C)/360.)) \
/ (1 + self.A * 2*np.pi/360. * np.sin(2*np.pi*(Ls - self.C)/360.))
Ls = Ls + dLs
if np.abs(dLs) < eps:
break
else:
raise ValueError('Max number of iteration reach without getting convergence.')
return Ls % 360
def date(self, Ls, Ty=0):
'''Calculate the date corresponding to a solar longitude.
Parameters
-----------
Ls : real
Input solar latitude
Ty : int, optional
Number of Titan year after 1980-02-22 (Vernal Equinox before Voyager 1)
Return
-------
date : numpy.datetime64
Date corresponding to the input solar latitude
'''
date = np.round(
self.orbit.astype(int)/360. * (
Ls + self.A * np.sin(2*np.pi*(Ls - self.C)/360.) + self.B + 360 * Ty
)
)
return self.eq_v + np.timedelta64(int(date), 'D')
orbit = Orbit()
|
import sys
from server import main
#from . import server
if __name__ == '__main__':
sys.exit(main())
|
from django.core.management.base import BaseCommand
from products.models import Product
class Command(BaseCommand):
help = 'Restock all products with the given quantity'
def add_arguments(self, parser):
parser.add_argument(
'-q',
'--quantity',
type=int,
action='store',
dest='quantity',
default=10,
help='Quantity to stock',
)
def handle(self, *args, **options):
for product in Product.objects.all():
if product.unitary:
product.stock = options['quantity']
else:
product.stock = options['quantity'] * 100
product.save()
|
#Convert all the words to lower case
#Source https://github.com/saugatapaul1010/Amazon-Fine-Food-Reviews-Analysis
import re
def lower_case(x):
x = str(x).lower()
x = x.replace(",000,000", " m").replace(",000", " k").replace("′", "'").replace("’", "'")\
.replace("won't", " will not").replace("cannot", " can not").replace("can't", " can not")\
.replace("n't", " not").replace("what's", " what is").replace("it's", " it is")\
.replace("'ve", " have").replace("'m", " am").replace("'re", " are")\
.replace("he's", " he is").replace("she's", " she is").replace("'s", " own")\
.replace("%", " percent ").replace("₹", " rupee ").replace("$", " dollar ")\
.replace("€", " euro ").replace("'ll", " will").replace("how's"," how has").replace("y'all"," you all")\
.replace("o'clock"," of the clock").replace("ne'er"," never").replace("let's"," let us")\
.replace("finna"," fixing to").replace("gonna"," going to").replace("gimme"," give me").replace("gotta"," got to").replace("'d"," would")\
.replace("daresn't"," dare not").replace("dasn't"," dare not").replace("e'er"," ever").replace("everyone's"," everyone is")\
.replace("'cause'"," because")
x = re.sub(r"([0-9]+)000000", r"\1m", x)
x = re.sub(r"([0-9]+)000", r"\1k", x)
return x
|
from ._generic import SitemapScraper
class BBCFoodScraper(SitemapScraper):
"""
A scraper for bbc.co.uk/food
"""
NAME = "bbcfood"
RECIPE_URL_FORMAT = "https://www.bbc.co.uk/food/recipes/{id}/"
RECIPE_URL_RE = r"https://www.bbc.co.uk/food/recipes/(?P<id>[^/]+)/?$"
SITEMAP_URL = "https://www.bbc.co.uk/food/sitemap.xml"
|
import math
import os
import random
import re
import sys
from collections import Counter
#Complete the reverseShuffleMerge function below.
def
reverseShuffleMerge (s):
s = list (reversed (s))
remaining_dict, required_dict, added_dict =
{
}
,
{
}
,
{
}
for c
in s:
if c
not in remaining_dict:
remaining_dict[c] = 1
else
:
remaining_dict[c] += 1
for key
, value in remaining_dict.items ():
required_dict[key] = value
// 2
added_dict[key] = 0
char_list =[]
index = 0
min_index = 0
min_char = '|'
while index
<len (s):
char = s[index]
if required_dict
[char] > added_dict[char]:
if char <min_char:
min_char =
char
min_index = index
if remaining_dict
[char] - 1 <
required_dict[char] - added_dict[char]:
while index >
min_index:
index -= 1
char = s
[index]
remaining_dict[char] += 1
added_dict[char] +=
1
char_list.append (char)
min_char =
'|'
remaining_dict[char] -= 1
index +=
1
return "".join (char_list)
if __name__ == '__main__'
:
fptr = open (os.environ['OUTPUT_PATH'], 'w')
s = input ()
result =
reverseShuffleMerge (s)
fptr.write (result +
'\n')
fptr.close ()
|
from PyQt4.Qt import QApplication
class DummyLauncher:
def __init__(self, parent):
self.parent = parent
def set_property(self, name, value):
pass
|
import numpy as np
import collections
import math
import json
from argparse import Namespace
from dataclasses import dataclass, field
import torch
import sacrebleu
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import collate_tokens
@dataclass
class PolicyGradientCriterionConfig(FairseqDataclass):
sample_beam: int = field(default=5, metadata={"help": "number of sample size"})
use_sample_based_baseline: bool = field(default=False)
use_beam_while_training: bool = field(default=False)
@register_criterion(
"policy_gradient", dataclass=PolicyGradientCriterionConfig
)
class PolicyGradientCriterion(FairseqCriterion):
def __init__(self, task, sample_beam, use_sample_based_baseline, use_beam_while_training):
super().__init__(task)
self.sample_beam = sample_beam
self.use_sample_based_baseline = use_sample_based_baseline
self.use_beam_while_training = use_beam_while_training
self.generator = None
def _decode(self, toks, escape_unk=False):
s = self.task.tgt_dict.string(
toks.int().cpu(),
self.task.cfg.eval_bleu_remove_bpe,
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.task.tokenizer:
s = self.task.tokenizer.decode(s)
return s
def forward(self, model, sample, reduce=True):
if self.generator is None:
gen_args = Namespace(**json.loads(self.task.cfg.eval_bleu_args))
gen_args.sample_beam = self.sample_beam
if not self.use_beam_while_training:
gen_args.sampling = True
gen_args.sampling_topp = 0.5
self.generator = self.task.build_generator([model], gen_args)
model.eval()
with torch.no_grad():
hypos = self.generator.generate([model], sample)
model.train()
rewards = []
pad_idx = self.task.tgt_dict.pad()
eos_idx = self.task.tgt_dict.eos()
num_hypos = len(hypos)
num_samples = len(hypos[0])
hypos = [[preds["tokens"] for preds in each] for each in hypos]
for hypo, rtarget in zip(hypos, sample["target"]):
rewards.append([])
ref = self._decode(
utils.strip_pad(rtarget, pad_idx),
escape_unk=True, # don't count <unk> as matches to the hypo
)
for preds in hypo:
hyp = self._decode(preds)
if self.task.cfg.eval_tokenized_bleu:
rewards[-1].append(sacrebleu.corpus_bleu([hyp], [[ref]], tokenize="none").score)
else:
rewards[-1].append(sacrebleu.corpus_bleu([hyp], [[ref]]).score)
hypos = [item for sublist in hypos for item in sublist]
vinputs = {"src_tokens": sample["net_input"]["src_tokens"].tile(
1, num_samples).view(num_hypos * num_samples, -1),
"src_lengths": sample["net_input"]["src_lengths"][:, None].tile(
1, num_samples).view(num_hypos * num_samples)}
vtargets = collate_tokens(hypos, pad_idx, eos_idx,
left_pad=self.task.cfg.left_pad_target)
vinputs["prev_output_tokens"] = collate_tokens(
hypos, pad_idx, eos_idx, left_pad=self.task.cfg.left_pad_target,
move_eos_to_beginning=True)
net_output = model(**vinputs)
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprob = -lprobs.gather(dim=-1, index=vtargets[:, :, None])
non_pad_mask = vtargets.ne(pad_idx).view(num_hypos, num_samples, -1)
rewards = lprob.new_tensor(rewards).view(num_hypos, num_samples, 1)
if self.use_sample_based_baseline:
adv = rewards - rewards.mean(1, keepdim=True)
loss = (lprob.view(num_hypos, num_samples, -1) * adv)[non_pad_mask]
else:
loss = (lprob.view(num_hypos, num_samples, -1) * rewards)[non_pad_mask]
batch_tokens = loss.size(0) / num_samples
avg_rl_loss = torch.sum(loss) / batch_tokens
logging_output = {
'loss': utils.item(avg_rl_loss.data),
'sample_bleu': utils.item(torch.mean(rewards).data),
'ntokens': batch_tokens,
}
return avg_rl_loss, batch_tokens, logging_output
@classmethod
def reduce_metrics(cls, logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
loss = sum(log.get("loss", 0) for log in logging_outputs)
sample_bleu = sum(log.get("sample_bleu", 0) for log in logging_outputs)
metrics.log_scalar("loss", loss, ntokens)
metrics.log_scalar("sample_bleu", sample_bleu, ntokens)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
import torch
from torch import nn
from torch.nn import functional as F
class ContrastiveEmbeddingLoss(nn.Module):
"""
Contrastive embedding loss
paper: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=1.0, reduction="mean"):
"""
Constructor method for the ContrastiveEmbeddingLoss class.
Args:
margin: margin parameter.
reduction: criterion reduction type.
"""
super().__init__()
self.margin = margin
self.reduction = reduction or "none"
def forward(self, embeddings_left, embeddings_right, distance_true):
"""
Forward propagation method for the contrastive loss.
Args:
embeddings_left: left objects embeddings
embeddings_right: right objects embeddings
distance_true: true distances
Returns:
loss
"""
# euclidian distance
diff = embeddings_left - embeddings_right
distance_pred = torch.sqrt(torch.sum(torch.pow(diff, 2), 1))
bs = len(distance_true)
margin_distance = self.margin - distance_pred
margin_distance_ = torch.clamp(margin_distance, min=0.0)
loss = (1 - distance_true) * torch.pow(
distance_pred, 2
) + distance_true * torch.pow(margin_distance_, 2)
if self.reduction == "mean":
loss = torch.sum(loss) / 2.0 / bs
elif self.reduction == "sum":
loss = torch.sum(loss)
return loss
class ContrastiveDistanceLoss(nn.Module):
"""
Contrastive distance loss
"""
def __init__(self, margin=1.0, reduction="mean"):
"""
Constructor method for the ContrastiveDistanceLoss class.
Args:
margin: margin parameter.
reduction: criterion reduction type.
"""
super().__init__()
self.margin = margin
self.reduction = reduction or "none"
def forward(self, distance_pred, distance_true):
"""
Forward propagation method for the contrastive loss.
Args:
distance_pred: predicted distances
distance_true: true distances
Returns:
loss
"""
bs = len(distance_true)
margin_distance = self.margin - distance_pred
margin_distance_ = torch.clamp(margin_distance, min=0.0)
loss = (1 - distance_true) * torch.pow(
distance_pred, 2
) + distance_true * torch.pow(margin_distance_, 2)
if self.reduction == "mean":
loss = torch.sum(loss) / 2.0 / bs
elif self.reduction == "sum":
loss = torch.sum(loss)
return loss
class ContrastivePairwiseEmbeddingLoss(nn.Module):
"""
ContrastivePairwiseEmbeddingLoss – proof of concept criterion.
Still work in progress.
"""
def __init__(self, margin=1.0, reduction="mean"):
"""
Constructor method for the ContrastivePairwiseEmbeddingLoss class.
Args:
margin: margin parameter.
reduction: criterion reduction type.
"""
super().__init__()
self.margin = margin
self.reduction = reduction or "none"
def forward(self, embeddings_pred, embeddings_true):
"""
Work in progress.
Args:
embeddings_pred: predicted embeddings
embeddings_true: true embeddings
Returns:
loss
"""
device = embeddings_pred.device
# s - state space
# d - embeddings space
# a - action space
pairwise_similarity = torch.einsum(
"se,ae->sa", embeddings_pred, embeddings_true
)
bs = embeddings_pred.shape[0]
batch_idx = torch.arange(bs, device=device)
loss = F.cross_entropy(
pairwise_similarity, batch_idx, reduction=self.reduction
)
return loss
__all__ = [
"ContrastiveEmbeddingLoss",
"ContrastiveDistanceLoss",
"ContrastivePairwiseEmbeddingLoss",
]
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from snecs.typedefs import EntityID
from scripts.engine import library, world
from scripts.engine.action import Skill, init_action
from scripts.engine.component import Aesthetic, Position
from scripts.engine.core.constants import DamageType, DirectionType, PrimaryStat, Shape
from scripts.engine.effect import (
ApplyAfflictionEffect,
DamageEffect,
Effect,
MoveActorEffect,
ReduceSkillCooldownEffect,
)
from scripts.engine.world_objects.tile import Tile
if TYPE_CHECKING:
from typing import List, Optional
@init_action
class Move(Skill):
"""
Basic move for an entity.
"""
key = "move"
def __init__(self, user: EntityID, target_tile: Tile, direction):
"""
Only Move needs an init as it overrides the target tile
"""
from scripts.engine import world
# override target
position = world.get_entitys_component(user, Position)
tile = world.get_tile((position.x, position.y))
super().__init__(user, tile, direction)
def build_effects(self, entity: EntityID, potency: float = 1.0) -> List[MoveActorEffect]: # type:ignore
"""
Build the effects of this skill applying to a single entity.
"""
move_effect = MoveActorEffect(
origin=self.user,
target=entity,
success_effects=[],
failure_effects=[],
direction=self.direction,
move_amount=1,
)
return [move_effect]
def get_animation(self, aesthetic: Aesthetic):
# this special case is handled in the MoveActorEffect
return None
@init_action
class BasicAttack(Skill):
"""
Basic attack for an entity
"""
key = "basic_attack"
def build_effects(self, entity: EntityID, potency: float = 1.0) -> List[DamageEffect]: # type:ignore
"""
Build the effects of this skill applying to a single entity.
"""
damage_effect = DamageEffect(
origin=self.user,
success_effects=[],
failure_effects=[],
target=entity,
stat_to_target=PrimaryStat.VIGOUR,
accuracy=library.GAME_CONFIG.base_values.accuracy,
damage=int(library.GAME_CONFIG.base_values.damage * potency),
damage_type=DamageType.MUNDANE,
mod_stat=PrimaryStat.CLOUT,
mod_amount=0.1,
)
return [damage_effect]
def get_animation(self, aesthetic: Aesthetic):
# we can show animations depending on the direction with self.direction
return aesthetic.sprites.attack
@init_action
class Lunge(Skill):
"""
Lunge skill for an entity
"""
key = "lunge"
# FIXME - only applying damage when moving 2 spaces, anything less fails to apply.
def __init__(self, user: EntityID, tile: Tile, direction: DirectionType):
"""
Set the target tile as the current tile since we need to move.
N.B. ignores provided tile.
"""
position = world.get_entitys_component(user, Position)
if position:
_tile = world.get_tile((position.x, position.y))
else:
_tile = world.get_tile((0, 0)) # should always have position but just in case
super().__init__(user, _tile, direction)
self.move_amount = 2
def build_effects(self, entity: EntityID, potency: float = 1.0) -> List[Effect]:
"""
Build the skill effects
"""
# chain the effects conditionally
cooldown_effect = self._build_cooldown_reduction_effect(entity=entity)
damage_effect = self._build_damage_effect(success_effects=[cooldown_effect], potency=potency)
move_effect = self._build_move_effect(entity=entity, success_effects=([damage_effect] if damage_effect else []))
return [move_effect]
def _build_move_effect(self, entity: EntityID, success_effects: List[Effect]) -> MoveActorEffect:
"""
Return the move effect for the lunge
"""
move_effect = MoveActorEffect(
origin=self.user,
target=entity,
success_effects=success_effects,
failure_effects=[],
direction=self.direction,
move_amount=self.move_amount,
)
return move_effect
def _build_damage_effect(self, success_effects: List[Effect], potency: float = 1.0) -> Optional[DamageEffect]:
"""
Return the damage effect for the lunge
"""
target = self._find_target()
damage_effect = None
if target:
damage_effect = DamageEffect(
origin=self.user,
success_effects=success_effects,
failure_effects=[],
target=target,
stat_to_target=PrimaryStat.VIGOUR,
accuracy=library.GAME_CONFIG.base_values.accuracy,
damage=int(library.GAME_CONFIG.base_values.damage * potency),
damage_type=DamageType.MUNDANE,
mod_stat=PrimaryStat.CLOUT,
mod_amount=0.1,
)
return damage_effect
def _find_target(self) -> Optional[EntityID]:
"""
Find the first entity that will be affected by the lunge
"""
increment = (self.direction[0] * (self.move_amount + 1), self.direction[1] * (self.move_amount + 1))
target_tile_pos = (self.target_tile.x + increment[0], self.target_tile.y + increment[1])
entities = world.get_entities_on_tile(world.get_tile(target_tile_pos))
if not entities:
return None
return entities[0]
def _build_cooldown_reduction_effect(self, entity: EntityID) -> ReduceSkillCooldownEffect:
"""
Returns an effect that executes the cooldown effect for the lunge
"""
cooldown_effect = ReduceSkillCooldownEffect(
origin=self.user, target=entity, skill_name=self.name, amount=2, success_effects=[], failure_effects=[]
)
return cooldown_effect
def get_animation(self, aesthetic: Aesthetic):
return aesthetic.sprites.attack
@init_action
class TarAndFeather(Skill):
"""
TarAndFeather skill for an entity
"""
key = "tar_and_feather"
def __init__(self, user: EntityID, target_tile: Tile, direction: DirectionType):
super().__init__(user, target_tile, direction)
self.affliction_name = "flaming"
self.affliction_duration = 5
self.reduced_modifier = 0.5
self.cone_size = 1
def build_effects(self, hit_entity: EntityID, potency: float = 1.0) -> List[Effect]:
"""
Build the skill effects
"""
# get position
position = world.get_entitys_component(hit_entity, Position)
if not position:
return []
# the cone should start where the hit occurred and in the direction of the projectile.
entities_in_cone = world.get_affected_entities(
(position.x, position.y), Shape.CONE, self.cone_size, self.direction
)
# we should also ignore the hit entity and the projectile from the extra effects
entities_in_cone = [x for x in entities_in_cone if x is not hit_entity and x is not self.projectile]
reduced_effects = []
for entity_in_cone in entities_in_cone:
reduced_effects += self._create_effects(target=entity_in_cone, modifier=self.reduced_modifier * potency)
logging.warning(f"creating effects for {entity_in_cone}")
first_hit_effects = self._create_effects(target=hit_entity, success_effects=reduced_effects, modifier=potency)
return first_hit_effects
def _create_effects(self, target: EntityID, success_effects: List[Effect] = None, modifier: float = 1.0):
damage_effect = self._build_damage_effect(target, success_effects or [], modifier)
flaming_effect = self._build_flaming_effect(target, modifier)
return [damage_effect, flaming_effect]
def _build_flaming_effect(self, entity: EntityID, modifier: float):
flaming_effect = ApplyAfflictionEffect(
origin=self.user,
target=entity,
affliction_name=self.affliction_name,
duration=max(1, int(self.affliction_duration * modifier)),
success_effects=[],
failure_effects=[],
)
return flaming_effect
def _build_damage_effect(self, entity: EntityID, success_effects: List[Effect], modifier: float):
damage_effect = DamageEffect(
origin=self.user,
success_effects=success_effects,
failure_effects=[],
target=entity,
stat_to_target=PrimaryStat.VIGOUR,
accuracy=library.GAME_CONFIG.base_values.accuracy,
damage=int(library.GAME_CONFIG.base_values.damage * modifier),
damage_type=DamageType.MUNDANE,
mod_stat=PrimaryStat.CLOUT,
mod_amount=0.1,
)
return damage_effect
def get_animation(self, aesthetic: Aesthetic):
return aesthetic.sprites.attack
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright 2014, Quixey Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in self.c.mpliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import aliyun.ecs.connection
import aliyun.slb.connection
import unittest
class EcsReadOnlyTest(unittest.TestCase):
def setUp(self):
self.c = aliyun.ecs.connection.EcsConnection('cn-hangzhou')
def testRegions(self):
regions = self.c.get_all_regions()
regionids = self.c.get_all_region_ids()
self.assertEqual([r.region_id for r in regions], regionids)
def testZones(self):
zones = self.c.get_all_zones()
zoneids = self.c.get_all_zone_ids()
self.assertEqual([z.zone_id for z in zones], zoneids)
def testClusters(self):
clusters = self.c.get_all_clusters()
self.assertTrue(len(clusters)>0)
def testInstances(self):
instances = self.c.get_all_instance_status()
instanceids = self.c.get_all_instance_ids()
self.assertEqual([i.instance_id for i in instances], instanceids)
inst = self.c.get_instance(instanceids.pop())
self.assertTrue(inst is not None)
def testInstancesInZone(self):
zones = self.c.get_all_zones()
zone_id = zones.pop().zone_id
instances = self.c.get_all_instances(zone_id)
self.assertEqual(instances.pop().zone_id, zone_id)
def testDescribeDisks(self):
disks = self.c.describe_disks()
self.assertTrue(len(disks)>0)
def testDescribeSnapshots(self):
iid = self.c.get_all_instance_ids().pop()
disk = self.c.describe_instance_disks(iid).pop()
snaps = self.c.describe_snapshots(iid, disk)
if len(snaps)>0:
snap = self.c.describe_snapshot(snaps.pop())
self.assertTrue(snap is not None)
def testDescribeImages(self):
imgs = self.c.describe_images()
self.assertTrue(len(imgs)>0)
def testSecurityGroups(self):
groups = self.c.describe_security_groups()
gids = self.c.get_security_group_ids()
self.assertEqual([g.security_group_id for g in groups], gids)
group = self.c.get_security_group(gids.pop())
self.assertTrue(group is not None)
def testAutoSnapshotPolicy(self):
print(self.c.describe_auto_snapshot_policy())
if __name__ == '__main__':
unittest.main()
|
#7- Binary to Decimal and Back Converter - Develop a converter to convert a decimal number to binary or a binary number to its decimal equivalent.
def float_bin(number, places = 3):
whole, dec = str(number).split(".")
whole = int(whole)
dec = int (dec)
res = bin(whole)[2:] + "."
for x in range(places):
whole, dec = str((decimal_converter(dec)) * 2).split(".")
dec = int(dec)
res += whole
return res
def decimal_converter(num):
while num > 1:
num /= 10
return num
def binary(number):
s=str(number)
l=list(s)
g=l[::-1]
s=0
for i in range(len(g)):
s=s+(int(g[i])*(2**i))
return("Your binary number is",s)
n=int(input("Enter 1 for decimal to binary or 2 for binary to decimal"))
if n==1:
foi=int(input("Enter 1 for floting point or 2 for integer type"))
if foi==1:
number=float(input("Enter your floating point number"))
p = int(input("Enter the number of decimal places of the result : \n"))
print(float_bin(number, places = p))
if foi==2:
number=int(input("Enter your number"))
print(bin(number)[2:])
elif n==2:
number = int(input("Enter your binary number"))
print(binary(number))'''
#- Another program to calculate decimal to binary
'''k=[]
def fun(number):
if int(number)<=1:
k.append(int(number))
fun(number*2)
return(k)
n=float(input())
a=int(n)
b=(n-a)*2
(fun(b))
b=bin(a)[2:]
print(b)
for i in k:
s=''.join(map(str,k))
print(s)
print(str(b)+"."+s)
|
'''
The windows_files module handles windows filesystem state, file uploads and template generation.
'''
from __future__ import unicode_literals
import ntpath
import os
from datetime import timedelta
import six
from pyinfra import logger
from pyinfra.api import (
FileUploadCommand,
operation,
OperationError,
OperationTypeError,
)
from pyinfra.api.util import get_file_sha1
from pyinfra.facts.windows import WindowsDate
from pyinfra.facts.windows_files import (
WindowsDirectory,
WindowsFile,
WindowsLink,
WindowsMd5File,
WindowsSha1File,
WindowsSha256File,
)
from .util.compat import fspath
from .util.files import ensure_mode_int
@operation(pipeline_facts={
'file': 'dest',
})
def download(
src, dest,
user=None, group=None, mode=None, cache_time=None, force=False,
sha256sum=None, sha1sum=None, md5sum=None,
state=None, host=None,
):
'''
Download files from remote locations using curl or wget.
+ src: source URL of the file
+ dest: where to save the file
+ user: user to own the files
+ group: group to own the files
+ mode: permissions of the files
+ cache_time: if the file exists already, re-download after this time (in seconds)
+ force: always download the file, even if it already exists
+ sha256sum: sha256 hash to checksum the downloaded file against
+ sha1sum: sha1 hash to checksum the downloaded file against
+ md5sum: md5 hash to checksum the downloaded file against
Example:
.. code:: python
winows_files.download(
name='Download the Docker repo file',
src='https://download.docker.com/linux/centos/docker-ce.repo',
dest='C:\\docker',
)
'''
info = host.get_fact(WindowsFile, name=dest)
# Destination is a directory?
if info is False:
raise OperationError(
'Destination {0} already exists and is not a file'.format(dest),
)
# Do we download the file? Force by default
download = force
# Doesn't exist, lets download it
if info is None:
download = True
# Destination file exists & cache_time: check when the file was last modified,
# download if old
else:
if cache_time:
# Time on files is not tz-aware, and will be the same tz as the server's time,
# so we can safely remove the tzinfo from WindowsDate before comparison.
cache_time = (
host.get_fact(WindowsDate).replace(tzinfo=None) - timedelta(seconds=cache_time)
)
if info['mtime'] and info['mtime'] > cache_time:
download = True
if sha1sum:
if sha1sum != host.get_fact(WindowsSha1File, name=dest):
download = True
if sha256sum:
if sha256sum != host.get_fact(WindowsSha256File, name=dest):
download = True
if md5sum:
if md5sum != host.get_fact(WindowsMd5File, name=dest):
download = True
# If we download, always do user/group/mode as SSH user may be different
if download:
yield (
'$ProgressPreference = "SilentlyContinue"; '
'Invoke-WebRequest -Uri {0} -OutFile {1}'
).format(src, dest)
# if user or group:
# yield chown(dest, user, group)
# if mode:
# yield chmod(dest, mode)
if sha1sum:
yield (
'if ((Get-FileHash -Algorithm SHA1 "{0}").hash -ne {1}) {{ '
'Write-Error "SHA1 did not match!" '
'}}'
).format(dest, sha1sum)
if sha256sum:
yield (
'if ((Get-FileHash -Algorithm SHA256 "{0}").hash -ne {1}) {{ '
'Write-Error "SHA256 did not match!" '
'}}'
).format(dest, sha256sum)
if md5sum:
yield (
'if ((Get-FileHash -Algorithm MD5 "{0}").hash -ne {1}) {{ '
'Write-Error "MD5 did not match!" '
'}}'
).format(dest, md5sum)
else:
host.noop('file {0} has already been downloaded'.format(dest))
@operation(pipeline_facts={
'file': 'dest',
'sha1_file': 'dest',
})
def put(
src, dest,
user=None, group=None, mode=None, add_deploy_dir=True,
create_remote_dir=True, force=False, assume_exists=False,
state=None, host=None,
):
'''
Upload a local file to the remote system.
+ src: local filename to upload
+ dest: remote filename to upload to
+ user: user to own the files
+ group: group to own the files
+ mode: permissions of the files
+ add_deploy_dir: src is relative to the deploy directory
+ create_remote_dir: create the remote directory if it doesn't exist
+ force: always upload the file, even if the remote copy matches
+ assume_exists: whether to assume the local file exists
``create_remote_dir``:
If the remote directory does not exist it will be created using the same
user & group as passed to ``files.put``. The mode will *not* be copied over,
if this is required call ``files.directory`` separately.
Note:
This operation is not suitable for large files as it may involve copying
the file before uploading it.
Examples:
.. code:: python
# Note: This requires a 'files/motd' file on the local filesystem
files.put(
name='Update the message of the day file',
src='data/content.json',
dest='C:\\data\\content.json'
)
'''
# Upload IO objects as-is
if hasattr(src, 'read'):
local_file = src
# Assume string filename
else:
# Add deploy directory?
if add_deploy_dir and state.deploy_dir:
src = os.path.join(state.deploy_dir, src)
local_file = src
if not assume_exists and not os.path.isfile(local_file):
raise IOError('No such file: {0}'.format(local_file))
mode = ensure_mode_int(mode)
remote_file = host.get_fact(WindowsFile, name=dest)
if create_remote_dir:
yield _create_remote_dir(state, host, dest, user, group)
# No remote file, always upload and user/group/mode if supplied
if not remote_file or force:
yield FileUploadCommand(local_file, dest)
# if user or group:
# yield chown(dest, user, group)
# if mode:
# yield chmod(dest, mode)
# File exists, check sum and check user/group/mode if supplied
else:
local_sum = get_file_sha1(src)
remote_sum = host.get_fact(WindowsSha1File, name=dest)
# Check sha1sum, upload if needed
if local_sum != remote_sum:
yield FileUploadCommand(local_file, dest)
# if user or group:
# yield chown(dest, user, group)
# if mode:
# yield chmod(dest, mode)
else:
changed = False
# Check mode
# if mode and remote_file['mode'] != mode:
# yield chmod(dest, mode)
# changed = True
# Check user/group
# if (
# (user and remote_file['user'] != user)
# or (group and remote_file['group'] != group)
# ):
# yield chown(dest, user, group)
# changed = True
if not changed:
host.noop('file {0} is already uploaded'.format(dest))
@operation(pipeline_facts={
'windows_file': 'name',
})
def file(
path,
present=True, assume_present=False,
user=None, group=None, mode=None, touch=False,
create_remote_dir=True,
state=None, host=None,
):
'''
Add/remove/update files.
+ path: path of the remote file
+ present: whether the file should exist
+ assume_present: whether to assume the file exists
+ TODO: user: user to own the files
+ TODO: group: group to own the files
+ TODO: mode: permissions of the files as an integer, eg: 755
+ touch: whether to touch the file
+ create_remote_dir: create the remote directory if it doesn't exist
``create_remote_dir``:
If the remote directory does not exist it will be created using the same
user & group as passed to ``files.put``. The mode will *not* be copied over,
if this is required call ``files.directory`` separately.
Example:
.. code:: python
files.file(
name='Create c:\\temp\\hello.txt',
path='c:\\temp\\hello.txt',
touch=True,
)
'''
if not isinstance(path, six.string_types):
raise OperationTypeError('Name must be a string')
# mode = ensure_mode_int(mode)
info = host.get_fact(WindowsFile, name=path)
# Not a file?!
if info is False:
raise OperationError('{0} exists and is not a file'.format(path))
# Doesn't exist & we want it
if not assume_present and info is None and present:
if create_remote_dir:
yield _create_remote_dir(state, host, path, user, group)
yield 'New-Item -ItemType file {0}'.format(path)
# if mode:
# yield chmod(path, mode)
# if user or group:
# yield chown(path, user, group)
# It exists and we don't want it
elif (assume_present or info) and not present:
yield 'Remove-Item {0}'.format(path)
# # It exists & we want to ensure its state
# elif (assume_present or info) and present:
# if touch:
# yield 'New-Item -ItemType file {0}'.format(path)
#
# # Check mode
# if mode and (not info or info['mode'] != mode):
# yield chmod(path, mode)
#
# # Check user/group
# if (
# (not info and (user or group))
# or (user and info['user'] != user)
# or (group and info['group'] != group)
# ):
# yield chown(path, user, group)
def windows_file(*args, **kwargs):
# COMPAT
# TODO: remove this
logger.warning((
'Use of `windows_files.windows_file` is deprecated, '
'please use `windows_files.file` instead.'
))
return file(*args, **kwargs)
def _create_remote_dir(state, host, remote_filename, user, group):
# Always use POSIX style path as local might be Windows, remote always *nix
remote_dirname = ntpath.dirname(remote_filename)
if remote_dirname:
yield directory(
remote_dirname,
state=state,
host=host,
user=user,
group=group,
)
@operation(pipeline_facts={
'windows_directory': 'name',
})
def directory(
path,
present=True, assume_present=False,
user=None, group=None, mode=None, recursive=False,
state=None, host=None,
):
'''
Add/remove/update directories.
+ path: path of the remote folder
+ present: whether the folder should exist
+ assume_present: whether to assume the directory exists
+ TODO: user: user to own the folder
+ TODO: group: group to own the folder
+ TODO: mode: permissions of the folder
+ TODO: recursive: recursively apply user/group/mode
Examples:
.. code:: python
files.directory(
name='Ensure the c:\\temp\\dir_that_we_want_removed is removed',
path='c:\\temp\\dir_that_we_want_removed',
present=False,
)
files.directory(
name='Ensure c:\\temp\\foo\\foo_dir exists',
path='c:\\temp\\foo\\foo_dir',
recursive=True,
)
# multiple directories
dirs = ['c:\\temp\\foo_dir1', 'c:\\temp\\foo_dir2']
for dir in dirs:
files.directory(
name='Ensure the directory `{}` exists'.format(dir),
path=dir,
)
'''
if not isinstance(path, six.string_types):
raise OperationTypeError('Name must be a string')
info = host.get_fact(WindowsDirectory, name=path)
# Not a directory?!
if info is False:
raise OperationError('{0} exists and is not a directory'.format(path))
# Doesn't exist & we want it
if not assume_present and info is None and present:
yield 'New-Item -Path {0} -ItemType Directory'.format(path)
# if mode:
# yield chmod(path, mode, recursive=recursive)
# if user or group:
# yield chown(path, user, group, recursive=recursive)
#
# Somewhat bare fact, should flesh out more
host.create_fact(
WindowsDate,
kwargs={'name': path},
data={'type': 'directory'},
)
# It exists and we don't want it
elif (assume_present or info) and not present:
# TODO: how to ensure we use 'ps'?
# remove anything in the directory
yield 'Get-ChildItem {0} -Recurse | Remove-Item'.format(path)
# remove directory
yield 'Remove-Item {0}'.format(path)
# It exists & we want to ensure its state
# elif (assume_present or info) and present:
# # Check mode
# if mode and (not info or info['mode'] != mode):
# yield chmod(path, mode, recursive=recursive)
#
# # Check user/group
# if (
# (not info and (user or group))
# or (user and info['user'] != user)
# or (group and info['group'] != group)
# ):
# yield chown(path, user, group, recursive=recursive)
def windows_directory(*args, **kwargs):
# COMPAT
# TODO: remove this
logger.warning((
'Use of `windows_files.windows_directory` is deprecated, '
'please use `windows_files.directory` instead.'
))
return directory(*args, **kwargs)
def _validate_path(path):
try:
path = fspath(path)
except TypeError:
raise OperationTypeError('`path` must be a string or `os.PathLike` object')
@operation(pipeline_facts={
'link': 'path',
})
def link(
path,
target=None, present=True, assume_present=False,
user=None, group=None, symbolic=True, force=True,
create_remote_dir=True,
state=None, host=None,
):
'''
Add/remove/update links.
+ path: the name of the link
+ target: the file/directory the link points to
+ present: whether the link should exist
+ assume_present: whether to assume the link exists
+ user: user to own the link
+ group: group to own the link
+ symbolic: whether to make a symbolic link (vs hard link)
+ create_remote_dir: create the remote directory if it doesn't exist
``create_remote_dir``:
If the remote directory does not exist it will be created using the same
user & group as passed to ``files.put``. The mode will *not* be copied over,
if this is required call ``files.directory`` separately.
Source changes:
If the link exists and points to a different target, pyinfra will remove it and
recreate a new one pointing to then new target.
Examples:
.. code:: python
# simple example showing how to link to a file
files.link(
name=r'Create link C:\\issue2 that points to C:\\issue',
path=r'C:\\issue2',
target=r'C\\issue',
)
'''
_validate_path(path)
if present and not target:
raise OperationError('If present is True target must be provided')
info = host.get_fact(WindowsLink, name=path)
# Not a link?
if info is not None and not info:
raise OperationError('{0} exists and is not a link'.format(path))
add_cmd = 'New-Item -ItemType {0} -Path {1} -Target {2} {3}'.format(
'SymbolicLink' if symbolic else 'HardLink',
path,
target,
'-Force' if force else '',
)
remove_cmd = '(Get-Item {0}).Delete()'.format(path)
# We will attempt to link regardless of current existence
# since we know by now the path is either a link already
# or does not exist
if (info is None or force) and present:
if create_remote_dir:
yield _create_remote_dir(state, host, path, user, group)
yield add_cmd
# if user or group:
# yield chown(path, user, group, dereference=False)
# host.create_fact(
# WindowsLink,
# kwargs={'name': path},
# data={'link_target': target, 'group': group, 'user': user},
# )
# It exists and we don't want it
elif (assume_present or info) and not present:
yield remove_cmd
# host.delete_fact(WindowsLink, kwargs={'name': path})
else:
host.noop('link {0} already exists and force=False'.format(path))
|
#! /usr/bin/env python
'''Generate a site.attrs file to prepare for an unattended installation of a Stacki Frontend.
Usage:
stacki_attrs.py list [options]
stacki_attrs.py [options]
Options:
-h --help Display usage.
--debug Print various data structures during runtime
--template=<template filename> Location of site.attrs.j2
--output_filename=<filename> Location to save site.attrs
--stdout Instead of saving the file, print it to stdout
--fqdn=<fqdn> FQDN of the frontend
--timezone=<timezone> Timezone string
--network=<network address> Network for Stacki traffic
--ip=<ip_address> IP address of frontend
--netmask=<subnet mask> Netmask of frontend
--cidr=<bits in netmask> The CIDR represenation of the netmask
--gateway=<gateway address> Gateway of frontend
--broadcast=<broadcast address> Broadcast of frontend
--interface=<interface name> Device used for Stacki traffic
--mac_address=<mac address> MAC address of the interface
--password=<root password> Password to set for administration
--pass_encrypted Indicate that the password provided is already encrypted
--dns_servers=<server1[,server2]> DNS servers for frontend
'''
from __future__ import print_function
import sys
import os
import string
import random
import pytz
import jinja2
import socket
import hashlib
import subprocess
from pprint import pprint
from stacklib import docopt
from stacklib import ipaddress
# also requires the openssl binary installed!
default_ipv4 = {
'ip': '192.168.42.10',
'netmask': '255.255.255.0',
'broadcast': '',
'gateway': '',
'network': '',
'cidr': '',
}
defaults = {
'fqdn': 'stackifrontend.localdomain',
'interface': 'enp0s8',
'dns_servers': '8.8.8.8',
'timezone': 'America/Los_Angeles',
'password': 'password',
'pass_encrypted': False,
'mac_address': '08:00:d0:0d:c1:89',
'template': '/opt/stack/gen-site-attrs/site.attrs.j2',
'output_filename': './site.attrs',
}
class Attr():
''' Attr represents the logic for creating a valid site.attrs file based on `settings`. '''
# the basic attributes we'll need to set or generate
attr_keys = [
'hostname',
'domain',
'interface',
'network',
'ip',
'netmask',
'cidr',
'broadcast',
'gateway',
'dns_servers',
'timezone',
'password',
'mac_address',
'shadow_pass',
]
def __init__(self, settings):
''' build the object from `settings` '''
self.attrs = dict.fromkeys(Attr.attr_keys)
self.settings = settings
ipv4_settings = dict((k, self.settings[k]) for k in default_ipv4)
try:
self.set_timezone()
self.set_fqdn()
self.set_ethernet_dev()
self.set_mac_address()
for addr, value in ipv4_settings.items():
self.set_address(addr, value)
self.set_dns()
self.set_password()
self.render_attrs_file(settings['template'])
except ValueError as e:
raise
def render_attrs_file(self, template_file):
''' Render the stored attributes as a 'site.attrs', using template `template_file` '''
if not os.path.isfile(template_file):
template_file = './site.attrs.j2'
with open(template_file) as template:
rendered_attrs_file = jinja2.Template(template.read()).render({
'HOSTNAME': self.attrs['hostname'],
'DOMAIN': self.attrs['domain'],
'BACKEND_NETWORK_INTERFACE': self.attrs['interface'],
'BACKEND_NETWORK': self.attrs['network'],
'BACKEND_NETWORK_ADDRESS': self.attrs['ip'],
'BACKEND_NETMASK': self.attrs['netmask'],
'BACKEND_NETMASK_CIDR': self.attrs['cidr'],
'BACKEND_BROADCAST_ADDRESS': self.attrs['broadcast'],
'BACKEND_MAC_ADDRESS': self.attrs['mac_address'],
'GATEWAY': self.attrs['gateway'],
'DNS_SERVERS': self.attrs['dns_servers'],
'TIMEZONE': self.attrs['timezone'],
'SHADOW_PASSWORD': self.attrs['shadow_pass'],
})
self.output = rendered_attrs_file + '\n'
def set_timezone(self):
''' try to fit the timezone to a list of actual timezones '''
timezone = self.settings['timezone']
try:
pytz.timezone(timezone)
except pytz.exceptions.UnknownTimeZoneError:
raise ValueError('Error: Could not validate timezone, "%s"' % timezone)
self.attrs['timezone'] = timezone
def set_fqdn(self):
''' try to split a fqdn into host and domain '''
fqdn = self.settings['fqdn']
# split, assign, look for valueerror
try:
host, domain = fqdn.split('.', 1)
except ValueError as e:
raise ValueError('Error: "%s" is not a fully-qualified domain name' % fqdn)
self.attrs['hostname'] = host
self.attrs['domain'] = domain
def set_ethernet_dev(self):
''' ethernet device names are weird -- just check that it isn't empty '''
device = self.settings['interface']
if not device:
raise ValueError('Error: ethernet device name must not be empty')
self.attrs['interface'] = device
def set_mac_address(self):
''' try to parse the MAC in a few different formats '''
mac_addr = self.settings['mac_address']
if mac_addr.count(':') == 0 and len(mac_addr) == 12:
mac_addr = ':'.join(s.encode('hex') for s in mac_addr.decode('hex'))
elif mac_addr.count(':') == 5 and len(mac_addr) == 17:
# this is the format we want it in...
pass
else:
raise ValueError('Error: MAC address must either be 12 hex digits or 6 hexdigit pairs separated by colons')
self.attrs['mac_address'] = mac_addr
def set_address(self, key, address):
''' check that the address is a valid addressable ipv4 address '''
if key == 'cidr':
self.attrs[key] = address
return
if len(address.split('.')) != 4:
raise ValueError('Error: addresses must be specified in dotted-quad format: "%s"' % address)
try:
socket.inet_aton(address)
except socket.error as e:
raise ValueError('Error: "%s" is not a valid ipv4 address' % address)
# filter the ip through socket.inet_* to ensure legibility
self.attrs[key] = socket.inet_ntoa(socket.inet_aton(address))
def set_dns(self):
''' split string across commas, if any, and check the ip is valid '''
dns = self.settings['dns_servers']
valid_dns_servers = []
for address in dns.split(','):
if len(address.split('.')) != 4:
raise ValueError('Error: addresses must be specified in dotted-quad format: "%s"' % address)
try:
socket.inet_aton(address.strip())
except socket.error as e:
raise ValueError('Error: "%s" is not a valid ipv4 address' % address)
valid_dns_servers.append(socket.inet_ntoa(socket.inet_aton(address.strip())))
# filter the ip through socket.inet_* to get something legible
self.attrs['dns_servers'] = ','.join(valid_dns_servers)
def set_password(self):
''' encrypt the password in the 'crypt' format '''
password = self.settings['password']
if not password:
raise ValueError('Error: password must not be empty')
# PrivateRootPassword
# can't rely on MacOSX underlying C crypt() code
if not self.settings['pass_encrypted']:
openssl_cmd = 'openssl passwd -1 -salt %s %s' % (gen_salt(), password)
encrypted_pass = subprocess.check_output(openssl_cmd.split()).strip()
else:
encrypted_pass = password
self.attrs['shadow_pass'] = encrypted_pass
def gen_salt():
''' generate a best-effort random salt '''
# base list of characters, note len()==64
chars = string.ascii_letters + string.digits + './'
salt = ''
# generate a urandom byte, modulo it by len(chars), use result as index to pick char, append to salt
for i in range(0,8):
salt += chars[ord(os.urandom(1)) % len(chars)]
return salt
if __name__ == '__main__':
arguments = docopt.docopt(__doc__)
# prune out the '--'s
cleaned_args = dict((k.replace('--',''), v) for (k,v) in arguments.iteritems())
debug_flag = False
if cleaned_args['debug']:
del cleaned_args['debug']
debug_flag = True
# print_debug is literally noop if --debug was not passed
print_debug = print if debug_flag else lambda *a, **k: None
print_debug('cleaned_args', cleaned_args)
settings = defaults.copy()
settings.update(default_ipv4)
# grab only the ipv4 values, using 'default_ipv4' for the keys
user_ipv4_settings = dict((k, cleaned_args[k]) for k in default_ipv4)
print_debug('user ipv4 settings: ', user_ipv4_settings)
# overlay only the options actually specified
cleaned_args = dict((k, v) for (k,v) in cleaned_args.iteritems() if v)
print_debug('cleaned_args without Nones', cleaned_args)
settings.update(cleaned_args)
print_debug('combined settings', settings)
ip_addr = user_ipv4_settings['ip']
mask = user_ipv4_settings['netmask']
cidr = user_ipv4_settings['cidr']
if not cidr and not mask and ip_addr and '/' in ip_addr:
settings['ip'], mask = ip_addr.split('/')
if len(mask) > 2 and '.' in mask:
settings['netmask'] = mask
else:
settings['cidr'] = mask
elif ip_addr and mask:
# if they pass both, that's fine
pass
elif ip_addr and cidr:
pass
elif not ip_addr and not mask:
# if they pass neither, they get the defaults
pass
else:
# but if they pass one but not the other...
print('Error: if specifying one, you must specify both ip as well as netmask or cidr')
sys.exit(1)
if mask:
ipstring = unicode(settings['ip'] + '/' + mask)
elif cidr:
ipstring = unicode(settings['ip'] + '/' + cidr)
ip_addr = ipaddress.IPv4Network(ipstring, strict=False)
settings['netmask'] = str(ip_addr.with_netmask).split('/')[1]
settings['cidr'] = str(ip_addr.prefixlen)
# calulate these only if the user didn't specify
if not user_ipv4_settings['network']:
settings['network'] = str(ip_addr.network_address)
# assume the gateway is the first host IP in the network
if not user_ipv4_settings['gateway']:
settings['gateway'] = str(ip_addr[1])
if not user_ipv4_settings['broadcast']:
settings['broadcast'] = str(ip_addr.broadcast_address)
# for 'list', pretty print the defaults overlayed with user args
if cleaned_args.has_key('list'):
del settings['list']
pprint(settings)
sys.exit(0)
# Actually attempt to set the attributes
try:
attrs = Attr(settings)
except ValueError as e:
print(e)
sys.exit(1)
print_debug('compiled attributes', attrs.attrs)
# and finally, render the file and save to disk
if cleaned_args.has_key('stdout'):
print(attrs.output)
else:
filename = settings['output_filename']
if os.path.isdir(filename):
filename = filename + '/' + os.path.basename(defaults['output_filename'])
with open(filename, 'wb') as outfile:
outfile.write(attrs.output)
sys.exit(0)
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
import tensorflow as tf
from .. import variables as vs
from .. import utils
from .. import initializations
def embedding(incoming, input_dim, output_dim, weights_init='truncated_normal',
trainable=True, restore=True, reuse=False, scope=None,
name="Embedding"):
""" Embedding.
Embedding layer for a sequence of ids.
Input:
2-D Tensor [samples, ids].
Output:
3-D Tensor [samples, embedded_ids, features].
Arguments:
incoming: Incoming 2-D Tensor.
input_dim: list of `int`. Vocabulary size (number of ids).
output_dim: list of `int`. Embedding size.
weights_init: `str` (name) or `Tensor`. Weights initialization.
(see tflearn.initializations) Default: 'truncated_normal'.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model
name: A name for this layer (optional). Default: 'Embedding'.
"""
input_shape = utils.get_incoming_shape(incoming)
assert len(input_shape) == 2, "Incoming Tensor shape must be 2-D"
n_inputs = int(np.prod(input_shape[1:]))
W_init = weights_init
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
name = scope.name
with tf.device('/cpu:0'):
W = vs.variable("W", shape=[input_dim, output_dim],
initializer=W_init, trainable=trainable,
restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
inference = tf.cast(incoming, tf.int32)
inference = tf.nn.embedding_lookup(W, inference)
inference = tf.transpose(inference, [1, 0, 2])
inference = tf.reshape(inference, shape=[-1, output_dim])
inference = tf.split(0, n_inputs, inference)
# TODO: easy access those var
# inference.W = W
# inference.scope = scope
return inference
|
from contract import DappMethodAdmin,DappMethodInfo,Test
from privateKey import my_address, private_key
from web3.auto import w3
DAPP_ID = 1
ONE_ETHER = 10 ** 18
def getInfo():
count = DappMethodInfo.functions.getStoreMethodCount(DAPP_ID).call()
print("当前DAPP的方法数量为:",count)
for i in range(count):
infos = DappMethodInfo.functions.getMethodInfoByIndex(DAPP_ID,i).call()
print("索引为%d当前方法信息为:"%i)
print("当前方法调用合约地址:",infos[0])
print("当前方法是否可支付ETH:",infos[1])
print("当前方法支付的ETH最小数量:",infos[2]/ONE_ETHER)
print("当前方法的一些信息:",infos[3])
print("当前方法的默认编码数据:",infos[4])
print("当前方法是否可见:",not infos[5])
print("--------------------------------------")
def addMethod():
args = (DAPP_ID,Test.address,False,0,"registerName|User register his name|string|none",b'')
nonce = w3.eth.getTransactionCount(my_address)
unicorn_txn = DappMethodAdmin.functions.addMethod(*args).buildTransaction({
'nonce': nonce,
'gasPrice': w3.toWei(10, 'gwei'),
})
signed_txn = w3.eth.account.signTransaction(
unicorn_txn, private_key=private_key)
hash = w3.eth.sendRawTransaction(signed_txn.rawTransaction)
print("增加方法交易已经发送")
getInfo()
addMethod()
getInfo()
|
import requests
import json
from PXDetector import PXDetector
motion_url = 'http://localhost:5000/detect_motion'
frame_url = 'http://localhost:5000/get_still'
#Detect motion for 30s:
payload = {'timeout':30,}
r=requests.post(motion_url,json=payload).json()
print(r)
#Grab still image:
r=requests.post(frame_url) #Returns image as part of response.
with open("test.jpg","wb") as f:
f.write(r.content)
f.close()
|
from tesi_ao import sandbox, package_data
import matplotlib.pyplot as plt
import numpy as np
def plot_calibration_reproducibility():
'''
mcl.fits, mcl1.fits, mcl2.fits sono 3 misure di calibrazione ripetute in rapida sequenza
'''
fname0 = package_data.file_name_mcl('mcl0')
fname1 = package_data.file_name_mcl('mcl1')
fname2 = package_data.file_name_mcl('mcl2')
mcl0 = sandbox.MemsCommandLinearization.load(fname0)
mcl1 = sandbox.MemsCommandLinearization.load(fname1)
mcl2 = sandbox.MemsCommandLinearization.load(fname2)
plt.plot(mcl0._cmd_vector[3], mcl1._deflection[3] -
mcl0._deflection[3], '.-', label='meas1')
plt.plot(mcl0._cmd_vector[3], mcl2._deflection[3] -
mcl0._deflection[3], '.-', label='meas2')
plt.legend()
plt.grid(True)
plt.xlabel('Command [au]')
plt.ylabel('Deflection error wrt meas0 [au]')
def main_calibrate_all_actuators():
wyko, bmc = sandbox.create_devices()
mcl, cplm, cpla = sandbox.main_calibration(
wyko, bmc, mcl_fname='/tmp/mcl_all.fits', scan_fname='/tmp/cpl_all.fits')
return mcl, cplm, cpla
def max_wavefront(wf):
coord_max = np.argwhere(
np.abs(wf) == np.max(np.abs(wf)))[0]
return wf[coord_max[0], coord_max[1]], coord_max
|
"""Module for custom callbacks, especially visualization(UMAP)."""
import logging
import pathlib
from typing import Any, Dict, List, Optional, Union
import anndata
import numpy as np
import scanpy as sc
import tensorflow as tf
from scipy import sparse as scsparse
from tensorflow import config as tfconfig
from tensorflow.keras import callbacks
from discern import functions, io
def _plot_umap(cells: anndata.AnnData,
logdir: pathlib.Path,
epoch: Union[int, str],
disable_pca: bool = False):
n_comps = min(20, min(cells.shape) - 1)
if not disable_pca:
sc.tl.pca(cells, svd_solver='arpack', n_comps=n_comps)
sc.pp.neighbors(cells, use_rep='X' if disable_pca else "X_pca")
sc.tl.umap(cells)
for key in cells.obs.columns[cells.obs.dtypes == "category"]:
sc.pl.umap(cells,
color=key,
title=str(logdir),
show=False,
size=50.0,
sort_order=False,
save='_epoch_{}_{}.png'.format(epoch, key))
class VisualisationCallback(callbacks.Callback): # pylint: disable=too-few-public-methods
"""Redo prediction on datasets and visualize via UMAP.
Args:
outdir (pathlib.Path): Output directory for the figures.
data (anndata.AnnData): Input cells.
batch_size (int): Numer of cells to visualize.
freq (int): Frequency for computing visualisations in epochs. Defaults 10.
"""
_outdir: pathlib.Path
_initial_data: anndata.AnnData
_labels: np.ndarray
_data: Dict[Union[str, int], tf.data.Dataset]
_batch_size: int
_freq: int
def __init__(self,
outdir: Union[str, pathlib.Path],
data: anndata.AnnData,
batch_size: int,
freq: int = 10):
"""Initialize the callback and do one UMAP plot with original data."""
#pylint: disable=too-many-arguments
super().__init__()
self._outdir = pathlib.Path(outdir).joinpath("UMAP")
self._initial_data = data
self._batch_size = batch_size
self._data = dict()
self._freq = freq
n_threads = tfconfig.threading.get_inter_op_parallelism_threads()
n_threads += tfconfig.threading.get_intra_op_parallelism_threads()
if n_threads > 0:
sc.settings.n_jobs = n_threads
sc.settings.autosave = True
self._outdir.mkdir(exist_ok=True, parents=True)
def on_train_begin(self, logs: Optional[Dict[str, float]] = None): # pylint: disable=unused-argument
"""Run on training start.
Args:
logs (Optional[Dict[str, float]]): logs, not used only for compatibility reasons.
"""
n_labels = self.model.input_shape["batch_input_enc"][-1]
input_enc = self._initial_data.obs.batch.cat.codes.values.astype(
np.int32)
input_enc = tf.one_hot(input_enc, depth=n_labels, dtype=tf.float32)
cells = self._initial_data.X
if scsparse.issparse(cells):
cells = cells.todense()
cells = tf.cast(cells, tf.float32)
self._data["original"] = {
'input_data': cells,
'batch_input_enc': input_enc,
'batch_input_dec': input_enc,
}
self._data["latent"] = {
'encoder_input': cells,
'encoder_labels': input_enc,
}
name_to_code = {
name: code
for code, name in enumerate(
self._initial_data.obs.batch.cat.categories)
}
labels = self._initial_data.obs.batch.value_counts(
sort=False, dropna=True).index.values
for name in labels:
tmp = np.zeros_like(input_enc)
tmp[:, name_to_code[name]] = 1
self._data[name] = {
'input_data': cells,
'batch_input_enc': input_enc,
'batch_input_dec': tf.cast(tmp, tf.float32),
}
logdir = self._outdir.joinpath("projected_to_original")
logdir.mkdir(exist_ok=True, parents=True)
sc.settings.figdir = logdir
_plot_umap(self._initial_data, logdir, 0)
def _do_prediction_and_plotting(self, epoch: Union[str, int],
batch_size: int):
loglevel = logging.getLogger(__name__).getEffectiveLevel()
logging.getLogger("anndata").setLevel(loglevel)
for dataset, dataiterator in self._data.items():
if dataiterator.keys() != self._data["original"].keys():
continue
predictions = self.model.predict(dataiterator,
batch_size=batch_size)[:2]
predictions = functions.sample_counts(counts=predictions[0],
probabilities=predictions[1],
var=self._initial_data.var,
uns=self._initial_data.uns)
logdir = self._outdir.joinpath("projected_to_{}".format(dataset))
logdir.mkdir(parents=True, exist_ok=True)
sc.settings.figdir = logdir
predictions = anndata.AnnData(predictions,
obs=self._initial_data.obs,
var=self._initial_data.var)
merged = predictions.concatenate(
self._initial_data[self._initial_data.obs.batch ==
dataset].copy(),
join='inner',
batch_categories=['_autoencoded', '_valid'],
batch_key='origin')
merged.obs.batch = merged.obs.apply(
lambda row: row.batch + row.origin, axis=1).astype("category")
if "celltype" in merged.obs.columns:
merged.obs.celltype = merged.obs.celltype.astype("category")
merged.obs = merged.obs.drop(columns=["origin"], errors="ignore")
_plot_umap(merged, logdir, epoch)
encoder = self.model.get_layer("encoder")
logdir = self._outdir.joinpath("latent_codes")
logdir.mkdir(exist_ok=True, parents=True)
sc.settings.figdir = logdir
latent = encoder.predict(self._data["latent"],
batch_size=batch_size)[0]
latent = anndata.AnnData(latent, obs=self._initial_data.obs)
_plot_umap(latent, logdir, epoch, disable_pca=True)
def on_epoch_end(self,
epoch: int,
logs: Optional[Dict[str, float]] = None): # pylint: disable=unused-argument
"""Run on epoch end. Executes only at specified frequency.
Args:
epoch (int): Epochnumber.
logs (Optional[Dict[str, float]]): losses and metrics passed by tensorflow fit .
Defaults to None.
"""
if epoch > 0 and epoch % self._freq == 0:
self._do_prediction_and_plotting(epoch + 1, self._batch_size)
def on_train_end(self, logs: Optional[Dict[str, float]] = None): # pylint: disable=unused-argument
"""Run on training end.
Args:
logs (Optional[Dict[str, float]]): losses and metrics passed by tensorflow fit .
Defaults to None.
"""
self._do_prediction_and_plotting("end", self._batch_size)
def create_callbacks(early_stopping_limits: Dict[str, Any],
exp_folder: pathlib.Path,
inputdata: Optional[io.DISCERNData] = None,
umap_cells_no: Optional[int] = None,
profile_batch: int = 2,
freq_of_viz: int = 30) -> List[callbacks.Callback]:
"""Generate list of callbacks used by tensorflow model.fit.
Args:
early_stopping_limits ( Dict[str,Any):
Patience, min_delta, and delay for early stopping.
exp_folder (str): Folder where everything is saved.
inputdata (io.DISCERNData, optional): Input data to use. Defaults to None
umap_cells_no (int): Number of cells for UMAP.
profile_batch (int): Number of the batch to do extensive profiling.
Defaults to 2. (see tf.keras.callbacks.Tensorboard)
freq_of_viz (int): Frequency of visualization callback in epochs. Defaults to 30.
Returns:
List[callbacks.Callback]: callbacks used by tensorflow model.fit.
"""
# pylint: disable=too-many-arguments
logdir = pathlib.Path(exp_folder).joinpath("job")
used_callbacks = list()
used_callbacks.append(callbacks.TerminateOnNaN())
used_callbacks.append(DelayedEarlyStopping(**early_stopping_limits))
used_callbacks.append(
callbacks.TensorBoard(log_dir=str(logdir),
histogram_freq=20,
profile_batch=profile_batch,
update_freq='epoch'))
if inputdata is not None:
batch_size = inputdata.batch_size
data = inputdata[inputdata.obs.split == "valid"].copy()
data.obs = data.obs.drop(columns=["split", "barcodes"],
errors="ignore")
labels = data.obs.batch.value_counts(sort=True, dropna=True)
labels = labels[:10].index.values
data = data[data.obs.batch.isin(labels)]
umap_cells_no = min(umap_cells_no, data.X.shape[0])
idx = np.random.choice(np.arange(data.X.shape[0]),
umap_cells_no,
replace=False)
used_callbacks.append(
VisualisationCallback(logdir, data[idx], batch_size, freq_of_viz))
return used_callbacks
class DelayedEarlyStopping(tf.keras.callbacks.EarlyStopping):
"""Stop when a monitored quantity has stopped improving after some delay time.
Args:
delay (int): Number of epochs to wait until applying early stopping.
Defaults to 0, which means standard early stopping.
monitor (str): Quantity to be monitored.
min_delta (float): Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement. Defaults to `val_loss`.
patience (int): Number of epochs with no improvement
after which training will be stopped. Defaults to 0.
verbose (int): verbosity mode. Defaults to 0.
mode (str): One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity. Defaults to `auto`.
baseline (float, optional): Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline. Defaults to None.
restore_best_weights (bool): Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used. Defaults to False.
"""
# pylint: disable=too-few-public-methods
_delay: int
def __init__(self,
delay: int = 0,
monitor: str = 'val_loss',
min_delta: float = 0.,
patience: int = 0,
verbose: int = 0,
mode: str = 'auto',
baseline: Optional[float] = None,
restore_best_weights: bool = False):
"""Initialize the callback."""
# pylint: disable=too-many-arguments
self._delay = int(delay)
super().__init__(min_delta=min_delta,
monitor=monitor,
patience=patience,
verbose=verbose,
mode=mode,
baseline=baseline,
restore_best_weights=restore_best_weights)
def on_epoch_end(self, epoch: int, logs: Optional[Dict[str, Any]] = None):
"""Call on epoch end to check for early stopping."""
if epoch < self._delay:
return
super().on_epoch_end(epoch=epoch, logs=logs)
return
|
'''
The worst rouge like in existance
By: Owen Wattenmaker, Max Lambek
background taken from: http://legend-tony980.deviantart.com/art/Alternate-Kingdom-W1-Castle-Background-382965761
character model taken from: http://piq.codeus.net/picture/33378/chibi_knight
'''
#TODO
##################################################################
### -Make the character not stop after finishing the attack ###
### -add in attack value ###
### -scrolling camera ###
### -Attacking enemys ###
##################################################################
import pygame, sys, time, random
from pygame.locals import *
# set up pygame
pygame.init()
mainClock = pygame.time.Clock()
playerImage = pygame.image.load('character\player1\player1_right_stationary.png')
background = pygame.image.load('background.png')
computerimage = pygame.image.load('cherry.png')
# set up the window
WINDOWWIDTH = 1280
WINDOWHEIGHT = 570
r=0
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)
pygame.display.set_caption('Worst Rouge Like')
playerStretchedImage = pygame.transform.scale(playerImage, (300, 300))
player = pygame.Rect(1, 300, 5, 5)
computer = pygame.Rect(750,400,10,10)
orientation = 'right'
airborn = False
moveLeft = False
moveRight = False
jump = False
MOVESPEED = 6
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
# change the keyboard variables
if event.key == K_LEFT:
moveRight = False
moveLeft = True
playerImage = pygame.image.load('knight_left.png')
playerStretchedImage = pygame.transform.scale(playerImage, (300, 300))
orientation = 'left'
if event.key == K_RIGHT:
moveLeft = False
moveRight = True
playerImage = pygame.image.load('knight_right.png')
playerStretchedImage = pygame.transform.scale(playerImage, (300, 300))
orientation = 'right'
if event.key == K_UP and airborn == False:
airborn = True
verticalVelocity = 10
player.top -= 1
if event.key == ord('z'):
moveLeft = False
moveRight = False
if orientation == 'right':
playerImage = pygame.image.load('knight_right_attack.png')
playerStretchedImage = pygame.transform.scale(playerImage, (300, 300))
if orientation == 'left':
playerImage = pygame.image.load('knight_left_attack.png')
playerStretchedImage = pygame.transform.scale(playerImage, (300, 300))
if event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == K_LEFT:
moveLeft = False
if event.key == K_RIGHT:
moveRight = False
if event.key == ord('z'):
if orientation == 'right':
playerImage = pygame.image.load('knight_right.png')
playerStretchedImage = pygame.transform.scale(playerImage, (300, 300))
if orientation == 'left':
playerImage = pygame.image.load('knight_left.png')
playerStretchedImage = pygame.transform.scale(playerImage, (300, 300))
computer.left -= MOVESPEED
print player.top
print player. left
# move the player
if moveLeft and player.left > 0:
player.left -= MOVESPEED
if moveRight and player.right < WINDOWWIDTH:
player.right += MOVESPEED
if airborn:
print airborn
verticalVelocity -= .56
player.top -= verticalVelocity
if player.top > 300:
airborn = False
print airborn
if player.top > 300:
player.top = 300
windowSurface.fill((r,0,0))
windowSurface.blit(background,(0,0))
#windowSurface.blit(player,(x - CameraX,y - CameraY))
# draw the block onto the surface
windowSurface.blit(playerStretchedImage, player)
windowSurface.blit(computerimage, computer)
# draw the window onto the screen
pygame.display.update()
mainClock.tick(60)
|
# -*- coding: utf-8 -*-
"""
Display a scrollable history list of commands.
"""
from typing import List, Callable
from PyQt5.QtWidgets import QBoxLayout, QListWidget, QListWidgetItem, QLabel
from py_hanabi.commands.command import Command
__author__ = "Jakrin Juangbhanich"
__email__ = "juangbhanich.k@gmail.com"
class CommandListItem(QListWidgetItem):
def __init__(self, *__args):
super().__init__(*__args)
self.command: Command = None
self.index: int = 0
class WidgetHistory:
def __init__(self):
self.list_widget: QListWidget = None
self.layout: QBoxLayout = None
self.list_widget: QListWidget = None
self.action_set_command_index: Callable[[int], None] = None
pass
def setup(self, layout: QBoxLayout, action_set_command_index: Callable[[int], None]):
self.action_set_command_index = action_set_command_index
self.layout = layout
self.list_widget = QListWidget()
label = QLabel("History")
self.layout.addWidget(label)
self.layout.addWidget(self.list_widget)
self.list_widget.currentItemChanged.connect(self.on_item_changed)
def on_item_changed(self, item):
if item is not None:
self.action_set_command_index(item.index)
else:
self.action_set_command_index(None)
def update(self, history: List[Command]):
self.list_widget.clear()
for i, command in enumerate(history):
item = CommandListItem(self.list_widget)
item.command = command
item.index = i
command_number = str(i + 1).zfill(3)
item.setText(f"{command_number}: {command.name}")
|
print("data structs baby!!")
# asdadadadadadad
|
#!/bin/bash
for SUBID in 04 05 06 07 08 09 10 11 12 13 14
do
sbatch submission_connectivity.sh $SUBID
done
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.takeout_service."""
from __future__ import annotations
import datetime
import json
from core import feconf
from core import utils
from core.constants import constants
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import rights_domain
from core.domain import takeout_domain
from core.domain import takeout_service
from core.domain import topic_domain
from core.platform import models
from core.tests import test_utils
(
app_feedback_report_models, auth_models, base_models, blog_models,
collection_models, config_models, email_models, exploration_models,
feedback_models, improvements_models, question_models, skill_models,
story_models, subtopic_models, suggestion_models, topic_models, user_models
) = models.Registry.import_models([
models.NAMES.app_feedback_report, models.NAMES.auth,
models.NAMES.base_model, models.NAMES.blog, models.NAMES.collection,
models.NAMES.config, models.NAMES.email, models.NAMES.exploration,
models.NAMES.feedback, models.NAMES.improvements, models.NAMES.question,
models.NAMES.skill, models.NAMES.story, models.NAMES.subtopic,
models.NAMES.suggestion, models.NAMES.topic, models.NAMES.user
])
class TakeoutServiceProfileUserUnitTests(test_utils.GenericTestBase):
"""Tests for the takeout service for profile user."""
USER_ID_1 = 'user_1'
PROFILE_ID_1 = 'profile_1'
USER_1_ROLE = feconf.ROLE_ID_CURRICULUM_ADMIN
PROFILE_1_ROLE = feconf.ROLE_ID_MOBILE_LEARNER
USER_1_EMAIL = 'user1@example.com'
GENERIC_USERNAME = 'user'
GENERIC_DATE = datetime.datetime(2019, 5, 20)
GENERIC_EPOCH = utils.get_time_in_millisecs(GENERIC_DATE)
GENERIC_IMAGE_URL = 'www.example.com/example.png'
GENERIC_USER_BIO = 'I am a user of Oppia!'
GENERIC_SUBJECT_INTERESTS = ['Math', 'Science']
GENERIC_LANGUAGE_CODES = ['en', 'es']
GENERIC_DISPLAY_ALIAS = 'display_alias'
GENERIC_DISPLAY_ALIAS_2 = 'display_alias2'
EXPLORATION_IDS = ['exp_1']
EXPLORATION_IDS_2 = ['exp_2']
COLLECTION_IDS = ['23', '42', '4']
COLLECTION_IDS_2 = ['32', '44', '6']
STORY_IDS = ['12', '22', '32']
STORY_IDS_2 = ['42', '52', '62']
TOPIC_IDS = ['11', '21', '31']
TOPIC_IDS_2 = ['41', '51', '61']
SKILL_ID_1 = 'skill_id_1'
SKILL_ID_2 = 'skill_id_2'
SKILL_ID_3 = 'skill_id_3'
DEGREE_OF_MASTERY = 0.5
DEGREE_OF_MASTERY_2 = 0.6
EXP_VERSION = 1
STATE_NAME = 'state_name'
STORY_ID_1 = 'story_id_1'
COMPLETED_NODE_IDS_1 = ['node_id_1', 'node_id_2']
def set_up_non_trivial(self):
"""Set up all models for use in testing.
1) Simulates skill mastery of user_1 and profile_1.
2) Simulates completion of some activities of user_1 and profile_1.
3) Simulates incomplete status of some activities.
4) Creates user LearnerGoalsModel.
5) Populates ExpUserLastPlaythroughModel of user.
6) Creates user LearnerPlaylsts.
7) Simulates collection progress of user.
8) Simulates story progress of user.
9) Creates new collection rights.
10) Simulates a general suggestion.
11) Creates new exploration rights.
12) Populates user settings.
"""
# Setup for UserSkillModel.
user_models.UserSkillMasteryModel(
id=user_models.UserSkillMasteryModel.construct_model_id(
self.USER_ID_1, self.SKILL_ID_3),
user_id=self.USER_ID_1,
skill_id=self.SKILL_ID_3,
degree_of_mastery=self.DEGREE_OF_MASTERY_2).put()
user_models.UserSkillMasteryModel(
id=user_models.UserSkillMasteryModel.construct_model_id(
self.PROFILE_ID_1, self.SKILL_ID_1),
user_id=self.PROFILE_ID_1,
skill_id=self.SKILL_ID_1,
degree_of_mastery=self.DEGREE_OF_MASTERY).put()
# Setup for CompletedActivitiesModel.
user_models.CompletedActivitiesModel(
id=self.USER_ID_1,
exploration_ids=self.EXPLORATION_IDS_2,
collection_ids=self.COLLECTION_IDS_2,
story_ids=self.STORY_IDS_2,
learnt_topic_ids=self.TOPIC_IDS_2).put()
user_models.CompletedActivitiesModel(
id=self.PROFILE_ID_1,
exploration_ids=self.EXPLORATION_IDS,
collection_ids=self.COLLECTION_IDS,
story_ids=self.STORY_IDS,
learnt_topic_ids=self.TOPIC_IDS).put()
# Setup for IncompleteACtivitiesModel.
user_models.IncompleteActivitiesModel(
id=self.PROFILE_ID_1,
exploration_ids=self.EXPLORATION_IDS,
collection_ids=self.COLLECTION_IDS,
story_ids=self.STORY_IDS_2,
partially_learnt_topic_ids=self.TOPIC_IDS).put()
# Setup for ExpUserLastPlaythroughModel.
user_models.ExpUserLastPlaythroughModel(
id='%s.%s' % (self.PROFILE_ID_1, self.EXPLORATION_IDS[0]),
user_id=self.PROFILE_ID_1, exploration_id=self.EXPLORATION_IDS[0],
last_played_exp_version=self.EXP_VERSION,
last_played_state_name=self.STATE_NAME).put()
# Setup for LearnerGoalsModel.
user_models.LearnerGoalsModel(
id=self.PROFILE_ID_1,
topic_ids_to_learn=self.TOPIC_IDS).put()
# Setup for LearnerPlaylistModel.
user_models.LearnerPlaylistModel(
id=self.PROFILE_ID_1,
exploration_ids=self.EXPLORATION_IDS,
collection_ids=self.COLLECTION_IDS).put()
# Setup for CollectionProgressModel.
user_models.CollectionProgressModel(
id='%s.%s' % (self.PROFILE_ID_1, self.COLLECTION_IDS[0]),
user_id=self.PROFILE_ID_1,
collection_id=self.COLLECTION_IDS[0],
completed_explorations=self.EXPLORATION_IDS).put()
# Setup for StoryProgressModel.
user_models.StoryProgressModel(
id='%s.%s' % (self.PROFILE_ID_1, self.STORY_ID_1),
user_id=self.PROFILE_ID_1,
story_id=self.STORY_ID_1,
completed_node_ids=self.COMPLETED_NODE_IDS_1).put()
# Setup for UserSettingsModel.
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.USER_1_EMAIL,
roles=[self.USER_1_ROLE],
username=self.GENERIC_USERNAME,
normalized_username=self.GENERIC_USERNAME,
last_agreed_to_terms=self.GENERIC_DATE,
last_started_state_editor_tutorial=self.GENERIC_DATE,
last_started_state_translation_tutorial=self.GENERIC_DATE,
last_logged_in=self.GENERIC_DATE,
last_created_an_exploration=self.GENERIC_DATE,
last_edited_an_exploration=self.GENERIC_DATE,
profile_picture_data_url=self.GENERIC_IMAGE_URL,
default_dashboard='learner', creator_dashboard_display_pref='card',
user_bio=self.GENERIC_USER_BIO,
subject_interests=self.GENERIC_SUBJECT_INTERESTS,
first_contribution_msec=1,
preferred_language_codes=self.GENERIC_LANGUAGE_CODES,
preferred_site_language_code=self.GENERIC_LANGUAGE_CODES[0],
preferred_audio_language_code=self.GENERIC_LANGUAGE_CODES[0],
display_alias=self.GENERIC_DISPLAY_ALIAS
).put()
user_models.UserSettingsModel(
id=self.PROFILE_ID_1,
email=self.USER_1_EMAIL,
roles=[self.PROFILE_1_ROLE],
username=None,
normalized_username=None,
last_agreed_to_terms=self.GENERIC_DATE,
last_started_state_editor_tutorial=None,
last_started_state_translation_tutorial=None,
last_logged_in=self.GENERIC_DATE,
last_created_an_exploration=None,
last_edited_an_exploration=None,
profile_picture_data_url=None,
default_dashboard='learner', creator_dashboard_display_pref='card',
user_bio=self.GENERIC_USER_BIO,
subject_interests=self.GENERIC_SUBJECT_INTERESTS,
first_contribution_msec=None,
preferred_language_codes=self.GENERIC_LANGUAGE_CODES,
preferred_site_language_code=self.GENERIC_LANGUAGE_CODES[0],
preferred_audio_language_code=self.GENERIC_LANGUAGE_CODES[0],
display_alias=self.GENERIC_DISPLAY_ALIAS_2
).put()
def set_up_trivial(self):
"""Setup for trivial test of export_data functionality."""
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.USER_1_EMAIL,
roles=[self.USER_1_ROLE]
).put()
user_models.UserSettingsModel(
id=self.PROFILE_ID_1,
email=self.USER_1_EMAIL,
roles=[self.PROFILE_1_ROLE]
).put()
def test_export_data_for_profile_user_trivial_raises_error(self):
"""Trivial test of export_data functionality."""
self.set_up_trivial()
error_msg = 'Takeout for profile users is not yet supported.'
with self.assertRaisesRegex(NotImplementedError, error_msg):
takeout_service.export_data_for_user(self.PROFILE_ID_1)
def test_export_data_for_profile_user_nontrivial_raises_error(self):
"""Nontrivial test of export_data functionality."""
self.set_up_non_trivial()
error_msg = 'Takeout for profile users is not yet supported.'
with self.assertRaisesRegex(NotImplementedError, error_msg):
takeout_service.export_data_for_user(self.PROFILE_ID_1)
class TakeoutServiceFullUserUnitTests(test_utils.GenericTestBase):
"""Tests for the takeout service for full user."""
USER_ID_1 = 'user_1'
PROFILE_ID_1 = 'profile_1'
THREAD_ID_1 = 'thread_id_1'
THREAD_ID_2 = 'thread_id_2'
BLOG_POST_ID_1 = 'blog_post_id_1'
BLOG_POST_ID_2 = 'blog_post_id_2'
TOPIC_ID_1 = 'topic_id_1'
TOPIC_ID_2 = 'topic_id_2'
USER_1_ROLE = feconf.ROLE_ID_CURRICULUM_ADMIN
PROFILE_1_ROLE = feconf.ROLE_ID_MOBILE_LEARNER
USER_1_EMAIL = 'user1@example.com'
GENERIC_USERNAME = 'user'
GENERIC_PIN = '12345'
GENERIC_DATE = datetime.datetime(2019, 5, 20)
GENERIC_EPOCH = utils.get_time_in_millisecs(GENERIC_DATE)
GENERIC_IMAGE_URL = 'www.example.com/example.png'
GENERIC_USER_BIO = 'I am a user of Oppia!'
GENERIC_SUBJECT_INTERESTS = ['Math', 'Science']
GENERIC_LANGUAGE_CODES = ['en', 'es']
GENERIC_DISPLAY_ALIAS = 'display_alias'
GENERIC_DISPLAY_ALIAS_2 = 'display_alias2'
USER_1_IMPACT_SCORE = 0.87
USER_1_TOTAL_PLAYS = 33
USER_1_AVERAGE_RATINGS = 4.37
USER_1_NUM_RATINGS = 22
USER_1_WEEKLY_CREATOR_STATS_LIST = [
{
('2019-05-21'): {
'average_ratings': 4.00,
'total_plays': 5
}
},
{
('2019-05-28'): {
'average_ratings': 4.95,
'total_plays': 10
}
}
]
EXPLORATION_IDS = ['exp_1']
EXPLORATION_IDS_2 = ['exp_2']
STORY_IDS = ['12', '22', '32']
STORY_IDS_2 = ['42', '52', '62']
TOPIC_IDS = ['11', '21', '31']
TOPIC_IDS_2 = ['41', '51', '61']
CREATOR_IDS = ['4', '8', '16']
CREATOR_USERNAMES = ['username4', 'username8', 'username16']
COLLECTION_IDS = ['23', '42', '4']
COLLECTION_IDS_2 = ['32', '44', '6']
TOPIC_IDS = ['12', '13', '14']
GENERAL_FEEDBACK_THREAD_IDS = ['42', '4', '8']
MESSAGE_IDS_READ_BY_USER = [0, 1]
SKILL_ID_1 = 'skill_id_1'
SKILL_ID_2 = 'skill_id_2'
SKILL_ID_3 = 'skill_id_3'
DEGREE_OF_MASTERY = 0.5
DEGREE_OF_MASTERY_2 = 0.6
EXP_VERSION = 1
STATE_NAME = 'state_name'
STORY_ID_1 = 'story_id_1'
STORY_ID_2 = 'story_id_2'
COMPLETED_NODE_IDS_1 = ['node_id_1', 'node_id_2']
COMPLETED_NODE_IDS_2 = ['node_id_3', 'node_id_4']
THREAD_ENTITY_TYPE = feconf.ENTITY_TYPE_EXPLORATION
THREAD_ENTITY_ID = 'exp_id_2'
THREAD_STATUS = 'open'
THREAD_SUBJECT = 'dummy subject'
THREAD_HAS_SUGGESTION = True
THREAD_SUMMARY = 'This is a great summary.'
THREAD_MESSAGE_COUNT = 0
MESSAGE_TEXT = 'Export test text.'
MESSAGE_RECEIEVED_VIA_EMAIL = False
CHANGE_CMD = {}
SCORE_CATEGORY_1 = 'category_1'
SCORE_CATEGORY_2 = 'category_2'
SCORE_CATEGORY = (
suggestion_models.SCORE_TYPE_TRANSLATION +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'English')
GENERIC_MODEL_ID = 'model-id-1'
COMMIT_TYPE = 'create'
COMMIT_MESSAGE = 'This is a commit.'
COMMIT_CMDS = [
{'cmd': 'some_command'},
{'cmd2': 'another_command'}
]
PLATFORM_ANDROID = 'android'
# Timestamp in sec since epoch for Mar 7 2021 21:17:16 UTC.
REPORT_SUBMITTED_TIMESTAMP = datetime.datetime.fromtimestamp(1615151836)
# Timestamp in sec since epoch for Mar 19 2021 17:10:36 UTC.
TICKET_CREATION_TIMESTAMP = datetime.datetime.fromtimestamp(1616173836)
TICKET_ID = '%s.%s.%s' % (
'random_hash', TICKET_CREATION_TIMESTAMP.second, '16CharString1234')
REPORT_TYPE_SUGGESTION = 'suggestion'
CATEGORY_OTHER = 'other'
PLATFORM_VERSION = '0.1-alpha-abcdef1234'
DEVICE_COUNTRY_LOCALE_CODE_INDIA = 'in'
ANDROID_DEVICE_MODEL = 'Pixel 4a'
ANDROID_SDK_VERSION = 28
ENTRY_POINT_NAVIGATION_DRAWER = 'navigation_drawer'
TEXT_LANGUAGE_CODE_ENGLISH = 'en'
AUDIO_LANGUAGE_CODE_ENGLISH = 'en'
ANDROID_REPORT_INFO = {
'user_feedback_other_text_input': 'add an admin',
'event_logs': ['event1', 'event2'],
'logcat_logs': ['logcat1', 'logcat2'],
'package_version_code': 1,
'language_locale_code': 'en',
'entry_point_info': {
'entry_point_name': 'crash',
},
'text_size': 'MEDIUM_TEXT_SIZE',
'only_allows_wifi_download_and_update': True,
'automatically_update_topics': False,
'is_curriculum_admin': False
}
ANDROID_REPORT_INFO_SCHEMA_VERSION = 1
SUGGESTION_LANGUAGE_CODE = 'en'
SUBMITTED_TRANSLATIONS_COUNT = 2
SUBMITTED_TRANSLATION_WORD_COUNT = 100
ACCEPTED_TRANSLATIONS_COUNT = 1
ACCEPTED_TRANSLATIONS_WITHOUT_REVIEWER_EDITS_COUNT = 0
ACCEPTED_TRANSLATION_WORD_COUNT = 50
REJECTED_TRANSLATIONS_COUNT = 0
REJECTED_TRANSLATION_WORD_COUNT = 0
# Timestamp dates in sec since epoch for Mar 19 2021 UTC.
CONTRIBUTION_DATES = [
datetime.date.fromtimestamp(1616173836),
datetime.date.fromtimestamp(1616173837)
]
def set_up_non_trivial(self):
"""Set up all models for use in testing.
1) Simulates the creation of a user, user_1, and their stats model.
2) Simulates skill mastery of user_1 with two skills.
3) Simulates subscriptions to threads, activities, and collections.
4) Simulates creation and edit of an exploration by user_1.
5) Creates an ExplorationUserDataModel.
6) Simulates completion of some activities.
7) Simulates incomplete status of some activities.
8) Creates user LearnerGoalsModel.
9) Populates ExpUserLastPlaythroughModel of user.
10) Creates user LearnerPlaylsts.
11) Simulates collection progress of user.
12) Simulates story progress of user.
13) Creates new collection rights.
14) Simulates a general suggestion.
15) Creates new exploration rights.
16) Populates user settings.
17) Creates two reply-to ids for feedback.
18) Creates a task closed by the user.
19) Simulates user_1 scrubbing a report.
20) Creates new BlogPostModel and BlogPostRightsModel.
21) Creates a TranslationContributionStatsModel.
"""
# Setup for UserStatsModel.
user_models.UserStatsModel(
id=self.USER_ID_1,
impact_score=self.USER_1_IMPACT_SCORE,
total_plays=self.USER_1_TOTAL_PLAYS,
average_ratings=self.USER_1_AVERAGE_RATINGS,
num_ratings=self.USER_1_NUM_RATINGS,
weekly_creator_stats_list=self.USER_1_WEEKLY_CREATOR_STATS_LIST
).put()
# Setup for UserSkillModel.
user_models.UserSkillMasteryModel(
id=user_models.UserSkillMasteryModel.construct_model_id(
self.USER_ID_1, self.SKILL_ID_1),
user_id=self.USER_ID_1,
skill_id=self.SKILL_ID_1,
degree_of_mastery=self.DEGREE_OF_MASTERY).put()
user_models.UserSkillMasteryModel(
id=user_models.UserSkillMasteryModel.construct_model_id(
self.USER_ID_1, self.SKILL_ID_2),
user_id=self.USER_ID_1,
skill_id=self.SKILL_ID_2,
degree_of_mastery=self.DEGREE_OF_MASTERY).put()
user_models.UserSkillMasteryModel(
id=user_models.UserSkillMasteryModel.construct_model_id(
self.PROFILE_ID_1, self.SKILL_ID_3),
user_id=self.PROFILE_ID_1,
skill_id=self.SKILL_ID_3,
degree_of_mastery=self.DEGREE_OF_MASTERY_2).put()
# Setup for UserSubscriptionsModel.
for creator_id in self.CREATOR_IDS:
user_models.UserSettingsModel(
id=creator_id,
username='username' + creator_id,
email=creator_id + '@example.com'
).put()
user_models.UserSubscriptionsModel(
id=self.USER_ID_1, creator_ids=self.CREATOR_IDS,
collection_ids=self.COLLECTION_IDS,
exploration_ids=self.EXPLORATION_IDS,
general_feedback_thread_ids=self.GENERAL_FEEDBACK_THREAD_IDS,
last_checked=self.GENERIC_DATE).put()
# Setup for UserContributionsModel.
self.save_new_valid_exploration(
self.EXPLORATION_IDS[0], self.USER_ID_1, end_state_name='End')
exp_services.update_exploration(
self.USER_ID_1, self.EXPLORATION_IDS[0],
[exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
# Setup for ExplorationUserDataModel.
user_models.ExplorationUserDataModel(
id='%s.%s' % (self.USER_ID_1, self.EXPLORATION_IDS[0]),
user_id=self.USER_ID_1,
exploration_id=self.EXPLORATION_IDS[0], rating=2,
rated_on=self.GENERIC_DATE,
draft_change_list={'new_content': {}},
draft_change_list_last_updated=self.GENERIC_DATE,
draft_change_list_exp_version=3,
draft_change_list_id=1).put()
# Setup for CompletedActivitiesModel.
user_models.CompletedActivitiesModel(
id=self.USER_ID_1,
exploration_ids=self.EXPLORATION_IDS,
collection_ids=self.COLLECTION_IDS,
story_ids=self.STORY_IDS,
learnt_topic_ids=self.TOPIC_IDS).put()
user_models.CompletedActivitiesModel(
id=self.PROFILE_ID_1,
exploration_ids=self.EXPLORATION_IDS_2,
collection_ids=self.COLLECTION_IDS_2,
story_ids=self.STORY_IDS_2,
learnt_topic_ids=self.TOPIC_IDS_2).put()
# Setup for IncompleteACtivitiesModel.
user_models.IncompleteActivitiesModel(
id=self.USER_ID_1,
exploration_ids=self.EXPLORATION_IDS,
collection_ids=self.COLLECTION_IDS,
story_ids=self.STORY_IDS,
partially_learnt_topic_ids=self.TOPIC_IDS).put()
# Setup for ExpUserLastPlaythroughModel.
user_models.ExpUserLastPlaythroughModel(
id='%s.%s' % (self.USER_ID_1, self.EXPLORATION_IDS[0]),
user_id=self.USER_ID_1, exploration_id=self.EXPLORATION_IDS[0],
last_played_exp_version=self.EXP_VERSION,
last_played_state_name=self.STATE_NAME).put()
# Setup for LearnerGoalsModel.
user_models.LearnerGoalsModel(
id=self.USER_ID_1,
topic_ids_to_learn=self.TOPIC_IDS).put()
user_models.LearnerGoalsModel(
id=self.PROFILE_ID_1,
topic_ids_to_learn=self.TOPIC_IDS_2).put()
# Setup for LearnerPlaylistModel.
user_models.LearnerPlaylistModel(
id=self.USER_ID_1,
exploration_ids=self.EXPLORATION_IDS,
collection_ids=self.COLLECTION_IDS).put()
user_models.LearnerPlaylistModel(
id=self.PROFILE_ID_1,
exploration_ids=self.EXPLORATION_IDS_2,
collection_ids=self.COLLECTION_IDS_2).put()
# Setup for CollectionProgressModel.
user_models.CollectionProgressModel(
id='%s.%s' % (self.USER_ID_1, self.COLLECTION_IDS[0]),
user_id=self.USER_ID_1,
collection_id=self.COLLECTION_IDS[0],
completed_explorations=self.EXPLORATION_IDS).put()
user_models.CollectionProgressModel(
id='%s.%s' % (self.PROFILE_ID_1, self.COLLECTION_IDS_2[0]),
user_id=self.PROFILE_ID_1,
collection_id=self.COLLECTION_IDS_2[0],
completed_explorations=self.EXPLORATION_IDS_2).put()
# Setup for StoryProgressModel.
user_models.StoryProgressModel(
id='%s.%s' % (self.USER_ID_1, self.STORY_ID_1),
user_id=self.USER_ID_1,
story_id=self.STORY_ID_1,
completed_node_ids=self.COMPLETED_NODE_IDS_1).put()
user_models.StoryProgressModel(
id='%s.%s' % (self.PROFILE_ID_1, self.STORY_ID_2),
user_id=self.PROFILE_ID_1,
story_id=self.STORY_ID_2,
completed_node_ids=self.COMPLETED_NODE_IDS_2).put()
# Setup for CollectionRightsModel.
collection_models.CollectionRightsModel(
id=self.COLLECTION_IDS[0],
owner_ids=[self.USER_ID_1],
editor_ids=[self.USER_ID_1],
voice_artist_ids=[self.USER_ID_1],
viewer_ids=[self.USER_ID_1],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0
).save(
'cid', 'Created new collection right',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
# Setup for GeneralSuggestionModel.
suggestion_models.GeneralSuggestionModel.create(
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
feconf.ENTITY_TYPE_EXPLORATION,
self.EXPLORATION_IDS[0], 1,
suggestion_models.STATUS_IN_REVIEW, self.USER_ID_1,
'reviewer_1', self.CHANGE_CMD, self.SCORE_CATEGORY,
'exploration.exp1.thread_1', None)
# Setup for TopicRightsModel.
topic_models.TopicRightsModel(
id=self.TOPIC_ID_1,
manager_ids=[self.USER_ID_1],
topic_is_published=True
).commit(
'committer_id',
'New topic rights',
[{'cmd': topic_domain.CMD_CREATE_NEW}])
topic_models.TopicRightsModel(
id=self.TOPIC_ID_2,
manager_ids=[self.USER_ID_1],
topic_is_published=True
).commit(
'committer_id',
'New topic rights',
[{'cmd': topic_domain.CMD_CREATE_NEW}])
# Setup for ExplorationRightsModel.
exploration_models.ExplorationRightsModel(
id=self.EXPLORATION_IDS[0],
owner_ids=[self.USER_ID_1],
editor_ids=[self.USER_ID_1],
voice_artist_ids=[self.USER_ID_1],
viewer_ids=[self.USER_ID_1],
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC,
viewable_if_private=False,
first_published_msec=0.0
).save(
'cid', 'Created new exploration right',
[{'cmd': rights_domain.CMD_CREATE_NEW}])
# Setup for UserSettingsModel.
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.USER_1_EMAIL,
roles=[self.USER_1_ROLE],
username=self.GENERIC_USERNAME,
normalized_username=self.GENERIC_USERNAME,
last_agreed_to_terms=self.GENERIC_DATE,
last_started_state_editor_tutorial=self.GENERIC_DATE,
last_started_state_translation_tutorial=self.GENERIC_DATE,
last_logged_in=self.GENERIC_DATE,
last_created_an_exploration=self.GENERIC_DATE,
last_edited_an_exploration=self.GENERIC_DATE,
profile_picture_data_url=self.GENERIC_IMAGE_URL,
default_dashboard='learner', creator_dashboard_display_pref='card',
user_bio=self.GENERIC_USER_BIO,
subject_interests=self.GENERIC_SUBJECT_INTERESTS,
first_contribution_msec=1,
preferred_language_codes=self.GENERIC_LANGUAGE_CODES,
preferred_site_language_code=self.GENERIC_LANGUAGE_CODES[0],
preferred_audio_language_code=self.GENERIC_LANGUAGE_CODES[0],
display_alias=self.GENERIC_DISPLAY_ALIAS,
pin=self.GENERIC_PIN
).put()
user_models.UserSettingsModel(
id=self.PROFILE_ID_1,
email=self.USER_1_EMAIL,
roles=[self.PROFILE_1_ROLE],
username=None,
normalized_username=None,
last_agreed_to_terms=self.GENERIC_DATE,
last_started_state_editor_tutorial=None,
last_started_state_translation_tutorial=None,
last_logged_in=self.GENERIC_DATE,
last_created_an_exploration=None,
last_edited_an_exploration=None,
profile_picture_data_url=None,
default_dashboard='learner', creator_dashboard_display_pref='card',
user_bio=self.GENERIC_USER_BIO,
subject_interests=self.GENERIC_SUBJECT_INTERESTS,
first_contribution_msec=None,
preferred_language_codes=self.GENERIC_LANGUAGE_CODES,
preferred_site_language_code=self.GENERIC_LANGUAGE_CODES[0],
preferred_audio_language_code=self.GENERIC_LANGUAGE_CODES[0],
display_alias=self.GENERIC_DISPLAY_ALIAS_2
).put()
suggestion_models.GeneralVoiceoverApplicationModel(
id='application_1_id',
target_type='exploration',
target_id='exp_id',
status=suggestion_models.STATUS_IN_REVIEW,
author_id=self.USER_ID_1,
final_reviewer_id='reviewer_id',
language_code=self.SUGGESTION_LANGUAGE_CODE,
filename='application_audio.mp3',
content='<p>Some content</p>',
rejection_message=None).put()
suggestion_models.GeneralVoiceoverApplicationModel(
id='application_2_id',
target_type='exploration',
target_id='exp_id',
status=suggestion_models.STATUS_IN_REVIEW,
author_id=self.USER_ID_1,
final_reviewer_id=None,
language_code=self.SUGGESTION_LANGUAGE_CODE,
filename='application_audio.mp3',
content='<p>Some content</p>',
rejection_message=None).put()
suggestion_models.TranslationContributionStatsModel.create(
language_code=self.SUGGESTION_LANGUAGE_CODE,
contributor_user_id=self.USER_ID_1,
topic_id=self.TOPIC_ID_1,
submitted_translations_count=self.SUBMITTED_TRANSLATIONS_COUNT,
submitted_translation_word_count=(
self.SUBMITTED_TRANSLATION_WORD_COUNT),
accepted_translations_count=self.ACCEPTED_TRANSLATIONS_COUNT,
accepted_translations_without_reviewer_edits_count=(
self.ACCEPTED_TRANSLATIONS_WITHOUT_REVIEWER_EDITS_COUNT),
accepted_translation_word_count=(
self.ACCEPTED_TRANSLATION_WORD_COUNT),
rejected_translations_count=self.REJECTED_TRANSLATIONS_COUNT,
rejected_translation_word_count=(
self.REJECTED_TRANSLATION_WORD_COUNT),
contribution_dates=self.CONTRIBUTION_DATES
)
user_models.UserContributionRightsModel(
id=self.USER_ID_1,
can_review_translation_for_language_codes=['hi', 'en'],
can_review_voiceover_for_language_codes=['hi'],
can_review_questions=True).put()
user_models.UserContributionProficiencyModel(
id='%s.%s' % (self.SCORE_CATEGORY_1, self.USER_ID_1),
user_id=self.USER_ID_1,
score_category=self.SCORE_CATEGORY_1,
score=1.5,
onboarding_email_sent=False
).put()
user_models.UserContributionProficiencyModel(
id='%s.%s' % (self.SCORE_CATEGORY_2, self.USER_ID_1),
user_id=self.USER_ID_1,
score_category=self.SCORE_CATEGORY_2,
score=2,
onboarding_email_sent=False
).put()
collection_models.CollectionRightsSnapshotMetadataModel(
id=self.GENERIC_MODEL_ID, committer_id=self.USER_ID_1,
commit_type=self.COMMIT_TYPE, commit_message=self.COMMIT_MESSAGE,
commit_cmds=self.COMMIT_CMDS
).put()
collection_models.CollectionSnapshotMetadataModel(
id=self.GENERIC_MODEL_ID, committer_id=self.USER_ID_1,
commit_type=self.COMMIT_TYPE, commit_message=self.COMMIT_MESSAGE,
commit_cmds=self.COMMIT_CMDS
).put()
skill_models.SkillSnapshotMetadataModel(
id=self.GENERIC_MODEL_ID, committer_id=self.USER_ID_1,
commit_type=self.COMMIT_TYPE, commit_message=self.COMMIT_MESSAGE,
commit_cmds=self.COMMIT_CMDS
).put()
subtopic_models.SubtopicPageSnapshotMetadataModel(
id=self.GENERIC_MODEL_ID, committer_id=self.USER_ID_1,
commit_type=self.COMMIT_TYPE, commit_message=self.COMMIT_MESSAGE,
commit_cmds=self.COMMIT_CMDS
).put()
topic_models.TopicRightsSnapshotMetadataModel(
id=self.GENERIC_MODEL_ID, committer_id=self.USER_ID_1,
commit_type=self.COMMIT_TYPE, commit_message=self.COMMIT_MESSAGE,
commit_cmds=self.COMMIT_CMDS
).put()
topic_models.TopicSnapshotMetadataModel(
id=self.GENERIC_MODEL_ID, committer_id=self.USER_ID_1,
commit_type=self.COMMIT_TYPE, commit_message=self.COMMIT_MESSAGE,
commit_cmds=self.COMMIT_CMDS
).put()
story_models.StorySnapshotMetadataModel(
id=self.GENERIC_MODEL_ID, committer_id=self.USER_ID_1,
commit_type=self.COMMIT_TYPE, commit_message=self.COMMIT_MESSAGE,
commit_cmds=self.COMMIT_CMDS
).put()
question_models.QuestionSnapshotMetadataModel(
id=self.GENERIC_MODEL_ID, committer_id=self.USER_ID_1,
commit_type=self.COMMIT_TYPE, commit_message=self.COMMIT_MESSAGE,
commit_cmds=self.COMMIT_CMDS
).put()
config_models.ConfigPropertySnapshotMetadataModel(
id=self.GENERIC_MODEL_ID, committer_id=self.USER_ID_1,
commit_type=self.COMMIT_TYPE, commit_message=self.COMMIT_MESSAGE,
commit_cmds=self.COMMIT_CMDS
).put()
exploration_models.ExplorationRightsSnapshotMetadataModel(
id=self.GENERIC_MODEL_ID, committer_id=self.USER_ID_1,
commit_type=self.COMMIT_TYPE, commit_message=self.COMMIT_MESSAGE,
commit_cmds=self.COMMIT_CMDS
).put()
improvements_models.TaskEntryModel(
id=self.GENERIC_MODEL_ID,
composite_entity_id=self.GENERIC_MODEL_ID,
entity_type=improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.GENERIC_MODEL_ID,
entity_version=1,
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=improvements_models.TASK_TARGET_TYPE_STATE,
target_id=self.GENERIC_MODEL_ID,
status=improvements_models.TASK_STATUS_OPEN,
resolver_id=self.USER_ID_1
).put()
config_models.PlatformParameterSnapshotMetadataModel(
id=self.GENERIC_MODEL_ID, committer_id=self.USER_ID_1,
commit_type=self.COMMIT_TYPE, commit_message=self.COMMIT_MESSAGE,
commit_cmds=self.COMMIT_CMDS
).put()
user_models.UserEmailPreferencesModel(
id=self.USER_ID_1,
site_updates=False,
editor_role_notifications=False,
feedback_message_notifications=False,
subscription_notifications=False
).put()
auth_models.UserAuthDetailsModel(
id=self.USER_ID_1,
parent_user_id=self.PROFILE_ID_1
).put()
# Set-up for AppFeedbackReportModel scrubbed by user.
report_id = '%s.%s.%s' % (
self.PLATFORM_ANDROID, self.REPORT_SUBMITTED_TIMESTAMP.second,
'randomInteger123')
app_feedback_report_models.AppFeedbackReportModel(
id=report_id,
platform=self.PLATFORM_ANDROID,
scrubbed_by=None,
ticket_id='%s.%s.%s' % (
'random_hash', self.TICKET_CREATION_TIMESTAMP.second,
'16CharString1234'),
submitted_on=self.REPORT_SUBMITTED_TIMESTAMP,
local_timezone_offset_hrs=0,
report_type=self.REPORT_TYPE_SUGGESTION,
category=self.CATEGORY_OTHER,
platform_version=self.PLATFORM_VERSION,
android_device_country_locale_code=(
self.DEVICE_COUNTRY_LOCALE_CODE_INDIA),
android_device_model=self.ANDROID_DEVICE_MODEL,
android_sdk_version=self.ANDROID_SDK_VERSION,
entry_point=self.ENTRY_POINT_NAVIGATION_DRAWER,
text_language_code=self.TEXT_LANGUAGE_CODE_ENGLISH,
audio_language_code=self.AUDIO_LANGUAGE_CODE_ENGLISH,
android_report_info=self.ANDROID_REPORT_INFO,
android_report_info_schema_version=(
self.ANDROID_REPORT_INFO_SCHEMA_VERSION)
).put()
report_entity = (
app_feedback_report_models.AppFeedbackReportModel.get_by_id(
report_id))
report_entity.scrubbed_by = self.USER_ID_1
report_entity.update_timestamps()
report_entity.put()
# Set-up for the BlogPostModel.
blog_post_model = blog_models.BlogPostModel(
id=self.BLOG_POST_ID_1,
author_id=self.USER_ID_1,
content='content sample',
title='sample title',
published_on=datetime.datetime.utcnow(),
url_fragment='sample-url-fragment',
tags=['tag', 'one'],
thumbnail_filename='thumbnail'
)
blog_post_model.update_timestamps()
blog_post_model.put()
blog_post_rights_for_post_1 = blog_models.BlogPostRightsModel(
id=self.BLOG_POST_ID_1,
editor_ids=[self.USER_ID_1],
blog_post_is_published=True,
)
blog_post_rights_for_post_1.update_timestamps()
blog_post_rights_for_post_1.put()
blog_post_rights_for_post_2 = blog_models.BlogPostRightsModel(
id=self.BLOG_POST_ID_2,
editor_ids=[self.USER_ID_1],
blog_post_is_published=False,
)
blog_post_rights_for_post_2.update_timestamps()
blog_post_rights_for_post_2.put()
def set_up_trivial(self):
"""Setup for trivial test of export_data functionality."""
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.USER_1_EMAIL,
roles=[self.USER_1_ROLE]
).put()
user_models.UserSettingsModel(
id=self.PROFILE_ID_1,
email=self.USER_1_EMAIL,
roles=[self.PROFILE_1_ROLE]
).put()
user_models.UserSubscriptionsModel(id=self.USER_ID_1).put()
def test_export_nonexistent_full_user_raises_error(self):
"""Setup for nonexistent user test of export_data functionality."""
with self.assertRaisesRegex(
user_models.UserSettingsModel.EntityNotFoundError,
'Entity for class UserSettingsModel with id fake_user_id '
'not found'):
takeout_service.export_data_for_user('fake_user_id')
def test_export_data_for_full_user_trivial_is_correct(self):
"""Trivial test of export_data functionality."""
self.set_up_trivial()
self.maxDiff = None
# Generate expected output.
app_feedback_report = {}
collection_progress_data = {}
collection_rights_data = {
'editable_collection_ids': [],
'owned_collection_ids': [],
'viewable_collection_ids': [],
'voiced_collection_ids': []
}
completed_activities_data = {}
contribution_data = {}
exploration_rights_data = {
'editable_exploration_ids': [],
'owned_exploration_ids': [],
'viewable_exploration_ids': [],
'voiced_exploration_ids': []
}
exploration_data = {}
general_feedback_message_data = {}
general_feedback_thread_data = {}
general_feedback_thread_user_data = {}
general_suggestion_data = {}
last_playthrough_data = {}
learner_goals_data = {}
learner_playlist_data = {}
incomplete_activities_data = {}
user_settings_data = {
'email': 'user1@example.com',
'roles': [feconf.ROLE_ID_CURRICULUM_ADMIN],
'banned': False,
'username': None,
'normalized_username': None,
'last_agreed_to_terms_msec': None,
'last_started_state_editor_tutorial_msec': None,
'last_started_state_translation_tutorial_msec': None,
'last_logged_in_msec': None,
'last_edited_an_exploration_msec': None,
'last_created_an_exploration_msec': None,
'profile_picture_filename': None,
'default_dashboard': 'learner',
'creator_dashboard_display_pref': 'card',
'user_bio': None,
'subject_interests': [],
'first_contribution_msec': None,
'preferred_language_codes': [],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'display_alias': None,
}
skill_data = {}
stats_data = {}
story_progress_data = {}
subscriptions_data = {
'exploration_ids': [],
'collection_ids': [],
'creator_usernames': [],
'general_feedback_thread_ids': [],
'last_checked_msec': None
}
task_entry_data = {
'task_ids_resolved_by_user': [],
'issue_descriptions': [],
'resolution_msecs': [],
'statuses': []
}
topic_rights_data = {
'managed_topic_ids': []
}
expected_voiceover_application_data = {}
expected_contrib_proficiency_data = {}
expected_contribution_rights_data = {}
expected_collection_rights_sm = {}
expected_collection_sm = {}
expected_skill_sm = {}
expected_subtopic_page_sm = {}
expected_topic_rights_sm = {}
expected_topic_sm = {}
expected_translation_contribution_stats = {}
expected_story_sm = {}
expected_question_sm = {}
expected_config_property_sm = {}
expected_exploration_rights_sm = {}
expected_exploration_sm = {}
expected_platform_parameter_sm = {}
expected_user_auth_details = {}
expected_user_email_preferences = {}
expected_blog_post_data = {}
expected_blog_post_rights = {
'editable_blog_post_ids': []
}
expected_user_data = {
'app_feedback_report': app_feedback_report,
'blog_post': expected_blog_post_data,
'blog_post_rights': expected_blog_post_rights,
'user_stats': stats_data,
'user_settings': user_settings_data,
'user_subscriptions': subscriptions_data,
'user_skill_mastery': skill_data,
'user_contributions': contribution_data,
'exploration_user_data': exploration_data,
'completed_activities': completed_activities_data,
'incomplete_activities': incomplete_activities_data,
'exp_user_last_playthrough': last_playthrough_data,
'learner_goals': learner_goals_data,
'learner_playlist': learner_playlist_data,
'task_entry': task_entry_data,
'topic_rights': topic_rights_data,
'collection_progress': collection_progress_data,
'story_progress': story_progress_data,
'general_feedback_thread': general_feedback_thread_data,
'general_feedback_thread_user':
general_feedback_thread_user_data,
'general_feedback_message': general_feedback_message_data,
'collection_rights': collection_rights_data,
'general_suggestion': general_suggestion_data,
'exploration_rights': exploration_rights_data,
'general_voiceover_application':
expected_voiceover_application_data,
'user_contribution_proficiency': expected_contrib_proficiency_data,
'user_contribution_rights': expected_contribution_rights_data,
'collection_rights_snapshot_metadata':
expected_collection_rights_sm,
'collection_snapshot_metadata':
expected_collection_sm,
'skill_snapshot_metadata':
expected_skill_sm,
'subtopic_page_snapshot_metadata':
expected_subtopic_page_sm,
'topic_rights_snapshot_metadata':
expected_topic_rights_sm,
'topic_snapshot_metadata': expected_topic_sm,
'translation_contribution_stats':
expected_translation_contribution_stats,
'story_snapshot_metadata': expected_story_sm,
'question_snapshot_metadata': expected_question_sm,
'config_property_snapshot_metadata':
expected_config_property_sm,
'exploration_rights_snapshot_metadata':
expected_exploration_rights_sm,
'exploration_snapshot_metadata': expected_exploration_sm,
'platform_parameter_snapshot_metadata':
expected_platform_parameter_sm,
'user_auth_details': expected_user_auth_details,
'user_email_preferences': expected_user_email_preferences
}
# Perform export and compare.
user_takeout_object = takeout_service.export_data_for_user(
self.USER_ID_1)
observed_data = user_takeout_object.user_data
observed_images = user_takeout_object.user_images
self.assertEqual(expected_user_data, observed_data)
observed_json = json.dumps(observed_data)
expected_json = json.dumps(expected_user_data)
self.assertEqual(json.loads(expected_json), json.loads(observed_json))
expected_images = []
self.assertEqual(expected_images, observed_images)
def test_exports_have_single_takeout_dict_key(self):
"""Test to ensure that all export policies that specify a key for the
Takeout dict are also models that specify this policy are type
MULTIPLE_INSTANCES_PER_USER.
"""
self.set_up_non_trivial()
# We set up the feedback_thread_model here so that we can easily
# access it when computing the expected data later.
feedback_thread_model = feedback_models.GeneralFeedbackThreadModel(
entity_type=self.THREAD_ENTITY_TYPE,
entity_id=self.THREAD_ENTITY_ID,
original_author_id=self.USER_ID_1,
status=self.THREAD_STATUS,
subject=self.THREAD_SUBJECT,
has_suggestion=self.THREAD_HAS_SUGGESTION,
summary=self.THREAD_SUMMARY,
message_count=self.THREAD_MESSAGE_COUNT
)
feedback_thread_model.put()
thread_id = feedback_services.create_thread(
self.THREAD_ENTITY_TYPE,
self.THREAD_ENTITY_ID,
self.USER_ID_1,
self.THREAD_SUBJECT,
self.MESSAGE_TEXT
)
feedback_services.create_message(
thread_id,
self.USER_ID_1,
self.THREAD_STATUS,
self.THREAD_SUBJECT,
self.MESSAGE_TEXT
)
# Retrieve all models for export.
all_models = [
clazz
for clazz in test_utils.get_storage_model_classes()
if (not clazz.__name__ in
test_utils.BASE_MODEL_CLASSES_WITHOUT_DATA_POLICIES)
]
for model in all_models:
export_method = model.get_model_association_to_user()
export_policy = model.get_export_policy()
num_takeout_keys = 0
for field_export_policy in export_policy.values():
if (field_export_policy ==
base_models
.EXPORT_POLICY
.EXPORTED_AS_KEY_FOR_TAKEOUT_DICT):
num_takeout_keys += 1
if (export_method ==
base_models.MODEL_ASSOCIATION_TO_USER
.MULTIPLE_INSTANCES_PER_USER):
# If the id is used as a Takeout key, then we should not
# have any fields exported as the key for the Takeout.
self.assertEqual(
num_takeout_keys,
0 if model.ID_IS_USED_AS_TAKEOUT_KEY else 1)
else:
self.assertEqual(num_takeout_keys, 0)
def test_exports_follow_export_policies(self):
"""Test to ensure that all fields that should be exported
per the export policy are exported, and exported in the proper format.
"""
self.set_up_non_trivial()
# We set up the feedback_thread_model here so that we can easily
# access it when computing the expected data later.
feedback_thread_model = feedback_models.GeneralFeedbackThreadModel(
entity_type=self.THREAD_ENTITY_TYPE,
entity_id=self.THREAD_ENTITY_ID,
original_author_id=self.USER_ID_1,
status=self.THREAD_STATUS,
subject=self.THREAD_SUBJECT,
has_suggestion=self.THREAD_HAS_SUGGESTION,
summary=self.THREAD_SUMMARY,
message_count=self.THREAD_MESSAGE_COUNT
)
feedback_thread_model.put()
thread_id = feedback_services.create_thread(
self.THREAD_ENTITY_TYPE,
self.THREAD_ENTITY_ID,
self.USER_ID_1,
self.THREAD_SUBJECT,
self.MESSAGE_TEXT
)
feedback_services.create_message(
thread_id,
self.USER_ID_1,
self.THREAD_STATUS,
self.THREAD_SUBJECT,
self.MESSAGE_TEXT
)
# Retrieve all models for export.
all_models = [
clazz
for clazz in test_utils.get_storage_model_classes()
if (not clazz.__name__ in
test_utils.BASE_MODEL_CLASSES_WITHOUT_DATA_POLICIES)
]
# Iterate over models and test export policies.
for model in all_models:
export_method = model.get_model_association_to_user()
export_policy = model.get_export_policy()
renamed_export_keys = model.get_field_names_for_takeout()
exported_field_names = []
field_used_as_key_for_takeout_dict = None
for field_name in model._properties: # pylint: disable=protected-access
if (export_policy[field_name] ==
base_models.EXPORT_POLICY.EXPORTED):
if field_name in renamed_export_keys:
exported_field_names.append(
renamed_export_keys[field_name]
)
else:
exported_field_names.append(field_name)
elif (export_policy[field_name] ==
base_models
.EXPORT_POLICY.EXPORTED_AS_KEY_FOR_TAKEOUT_DICT):
field_used_as_key_for_takeout_dict = field_name
if (export_method ==
base_models
.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER):
self.assertEqual(len(exported_field_names), 0)
elif (export_method ==
base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER):
exported_data = model.export_data(self.USER_ID_1)
self.assertEqual(
sorted([str(key) for key in exported_data.keys()]),
sorted(exported_field_names)
)
elif (export_method ==
base_models
.MODEL_ASSOCIATION_TO_USER
.ONE_INSTANCE_SHARED_ACROSS_USERS):
self.assertIsNotNone(
model.get_field_name_mapping_to_takeout_keys)
exported_data = model.export_data(self.USER_ID_1)
field_mapping = model.get_field_name_mapping_to_takeout_keys()
self.assertEqual(
sorted(exported_field_names),
sorted(field_mapping.keys())
)
self.assertEqual(
sorted(exported_data.keys()),
sorted(field_mapping.values())
)
elif (export_method ==
base_models
.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER):
exported_data = model.export_data(self.USER_ID_1)
for model_id in exported_data.keys():
# If we are using a field as a Takeout key.
if field_used_as_key_for_takeout_dict:
# Ensure that we export the field.
self.assertEqual(
model_id,
getattr(
model,
field_used_as_key_for_takeout_dict)
)
self.assertEqual(
sorted([
str(key)
for key in exported_data[model_id].keys()]),
sorted(exported_field_names)
)
def test_export_data_for_full_user_nontrivial_is_correct(self):
"""Nontrivial test of export_data functionality."""
self.set_up_non_trivial()
# We set up the feedback_thread_model here so that we can easily
# access it when computing the expected data later.
feedback_thread_model = feedback_models.GeneralFeedbackThreadModel(
entity_type=self.THREAD_ENTITY_TYPE,
entity_id=self.THREAD_ENTITY_ID,
original_author_id=self.USER_ID_1,
status=self.THREAD_STATUS,
subject=self.THREAD_SUBJECT,
has_suggestion=self.THREAD_HAS_SUGGESTION,
summary=self.THREAD_SUMMARY,
message_count=self.THREAD_MESSAGE_COUNT
)
feedback_thread_model.update_timestamps()
feedback_thread_model.put()
blog_post_model = blog_models.BlogPostModel(
id=self.BLOG_POST_ID_1,
author_id=self.USER_ID_1,
content='content sample',
title='sample title',
published_on=datetime.datetime.utcnow(),
url_fragment='sample-url-fragment',
tags=['tag', 'one'],
thumbnail_filename='thumbnail'
)
blog_post_model.update_timestamps()
blog_post_model.put()
expected_stats_data = {
'impact_score': self.USER_1_IMPACT_SCORE,
'total_plays': self.USER_1_TOTAL_PLAYS,
'average_ratings': self.USER_1_AVERAGE_RATINGS,
'num_ratings': self.USER_1_NUM_RATINGS,
'weekly_creator_stats_list': self.USER_1_WEEKLY_CREATOR_STATS_LIST
}
expected_user_skill_data = {
self.SKILL_ID_1: self.DEGREE_OF_MASTERY,
self.SKILL_ID_2: self.DEGREE_OF_MASTERY
}
expected_contribution_data = {
'created_exploration_ids': [self.EXPLORATION_IDS[0]],
'edited_exploration_ids': [self.EXPLORATION_IDS[0]]
}
expected_exploration_data = {
self.EXPLORATION_IDS[0]: {
'rating': 2,
'rated_on_msec': self.GENERIC_EPOCH,
'draft_change_list': {'new_content': {}},
'draft_change_list_last_updated_msec': self.GENERIC_EPOCH,
'draft_change_list_exp_version': 3,
'draft_change_list_id': 1,
'mute_suggestion_notifications': (
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE),
'mute_feedback_notifications': (
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
}
}
expected_completed_activities_data = {
'completed_exploration_ids': self.EXPLORATION_IDS,
'completed_collection_ids': self.COLLECTION_IDS,
'completed_story_ids': self.STORY_IDS,
'learnt_topic_ids': self.TOPIC_IDS
}
expected_incomplete_activities_data = {
'incomplete_exploration_ids': self.EXPLORATION_IDS,
'incomplete_collection_ids': self.COLLECTION_IDS,
'incomplete_story_ids': self.STORY_IDS,
'partially_learnt_topic_ids': self.TOPIC_IDS
}
expected_last_playthrough_data = {
self.EXPLORATION_IDS[0]: {
'exp_version': self.EXP_VERSION,
'state_name': self.STATE_NAME
}
}
expected_learner_goals_data = {
'topic_ids_to_learn': self.TOPIC_IDS
}
expected_learner_playlist_data = {
'playlist_exploration_ids': self.EXPLORATION_IDS,
'playlist_collection_ids': self.COLLECTION_IDS
}
expected_collection_progress_data = {
self.COLLECTION_IDS[0]: self.EXPLORATION_IDS
}
expected_story_progress_data = {
self.STORY_ID_1: self.COMPLETED_NODE_IDS_1
}
thread_id = feedback_services.create_thread(
self.THREAD_ENTITY_TYPE,
self.THREAD_ENTITY_ID,
self.USER_ID_1,
self.THREAD_SUBJECT,
self.MESSAGE_TEXT
)
feedback_services.create_message(
thread_id,
self.USER_ID_1,
self.THREAD_STATUS,
self.THREAD_SUBJECT,
self.MESSAGE_TEXT
)
expected_general_feedback_thread_data = {
feedback_thread_model.id: {
'entity_type': self.THREAD_ENTITY_TYPE,
'entity_id': self.THREAD_ENTITY_ID,
'status': self.THREAD_STATUS,
'subject': self.THREAD_SUBJECT,
'has_suggestion': self.THREAD_HAS_SUGGESTION,
'summary': self.THREAD_SUMMARY,
'message_count': self.THREAD_MESSAGE_COUNT,
'last_updated_msec': utils.get_time_in_millisecs(
feedback_thread_model.last_updated)
},
thread_id: {
'entity_type': self.THREAD_ENTITY_TYPE,
'entity_id': self.THREAD_ENTITY_ID,
'status': self.THREAD_STATUS,
'subject': self.THREAD_SUBJECT,
'has_suggestion': False,
'summary': None,
'message_count': 2,
'last_updated_msec': utils.get_time_in_millisecs(
feedback_models.
GeneralFeedbackThreadModel.
get_by_id(thread_id).last_updated)
}
}
expected_general_feedback_thread_user_data = {
thread_id: {
'message_ids_read_by_user': self.MESSAGE_IDS_READ_BY_USER
}
}
expected_general_feedback_message_data = {
thread_id + '.0': {
'thread_id': thread_id,
'message_id': 0,
'updated_status': self.THREAD_STATUS,
'updated_subject': self.THREAD_SUBJECT,
'text': self.MESSAGE_TEXT,
'received_via_email': self.MESSAGE_RECEIEVED_VIA_EMAIL
},
thread_id + '.1': {
'thread_id': thread_id,
'message_id': 1,
'updated_status': self.THREAD_STATUS,
'updated_subject': self.THREAD_SUBJECT,
'text': self.MESSAGE_TEXT,
'received_via_email': self.MESSAGE_RECEIEVED_VIA_EMAIL
}
}
expected_collection_rights_data = {
'owned_collection_ids': (
[self.COLLECTION_IDS[0]]),
'editable_collection_ids': (
[self.COLLECTION_IDS[0]]),
'voiced_collection_ids': (
[self.COLLECTION_IDS[0]]),
'viewable_collection_ids': [self.COLLECTION_IDS[0]]
}
expected_general_suggestion_data = {
'exploration.exp1.thread_1': {
'suggestion_type': (
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': feconf.ENTITY_TYPE_EXPLORATION,
'target_id': self.EXPLORATION_IDS[0],
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_IN_REVIEW,
'change_cmd': self.CHANGE_CMD
}
}
expected_exploration_rights_data = {
'owned_exploration_ids': (
[self.EXPLORATION_IDS[0]]),
'editable_exploration_ids': (
[self.EXPLORATION_IDS[0]]),
'voiced_exploration_ids': (
[self.EXPLORATION_IDS[0]]),
'viewable_exploration_ids': [self.EXPLORATION_IDS[0]]
}
expected_user_settings_data = {
'email': self.USER_1_EMAIL,
'roles': [feconf.ROLE_ID_CURRICULUM_ADMIN],
'username': self.GENERIC_USERNAME,
'normalized_username': self.GENERIC_USERNAME,
'last_agreed_to_terms_msec': self.GENERIC_EPOCH,
'last_started_state_editor_tutorial_msec': self.GENERIC_EPOCH,
'last_started_state_translation_tutorial_msec': self.GENERIC_EPOCH,
'last_logged_in_msec': self.GENERIC_EPOCH,
'last_edited_an_exploration_msec': self.GENERIC_EPOCH,
'last_created_an_exploration_msec': self.GENERIC_EPOCH,
'profile_picture_filename': 'user_settings_profile_picture.png',
'default_dashboard': 'learner',
'creator_dashboard_display_pref': 'card',
'user_bio': self.GENERIC_USER_BIO,
'subject_interests': self.GENERIC_SUBJECT_INTERESTS,
'first_contribution_msec': 1,
'preferred_language_codes': self.GENERIC_LANGUAGE_CODES,
'preferred_site_language_code': self.GENERIC_LANGUAGE_CODES[0],
'preferred_audio_language_code': self.GENERIC_LANGUAGE_CODES[0],
'display_alias': self.GENERIC_DISPLAY_ALIAS,
}
expected_subscriptions_data = {
'creator_usernames': self.CREATOR_USERNAMES,
'collection_ids': self.COLLECTION_IDS,
'exploration_ids': self.EXPLORATION_IDS,
'general_feedback_thread_ids': self.GENERAL_FEEDBACK_THREAD_IDS +
[thread_id],
'last_checked_msec': self.GENERIC_EPOCH
}
expected_task_entry_data = {
'task_ids_resolved_by_user': [self.GENERIC_MODEL_ID]
}
expected_topic_data = {
'managed_topic_ids': [self.TOPIC_ID_1, self.TOPIC_ID_2]
}
expected_voiceover_application_data = {
'application_1_id': {
'target_type': 'exploration',
'target_id': 'exp_id',
'status': 'review',
'language_code': 'en',
'filename': 'application_audio.mp3',
'content': '<p>Some content</p>',
'rejection_message': None
},
'application_2_id': {
'target_type': 'exploration',
'target_id': 'exp_id',
'status': 'review',
'language_code': 'en',
'filename': 'application_audio.mp3',
'content': '<p>Some content</p>',
'rejection_message': None
}
}
expected_contribution_rights_data = {
'can_review_translation_for_language_codes': ['hi', 'en'],
'can_review_voiceover_for_language_codes': ['hi'],
'can_review_questions': True
}
expected_contrib_proficiency_data = {
self.SCORE_CATEGORY_1: {
'onboarding_email_sent': False,
'score': 1.5
},
self.SCORE_CATEGORY_2: {
'onboarding_email_sent': False,
'score': 2
}
}
expected_collection_rights_sm = {
self.GENERIC_MODEL_ID: {
'commit_type': self.COMMIT_TYPE,
'commit_message': self.COMMIT_MESSAGE,
}
}
expected_collection_sm = {
self.GENERIC_MODEL_ID: {
'commit_type': self.COMMIT_TYPE,
'commit_message': self.COMMIT_MESSAGE,
}
}
expected_skill_sm = {
self.GENERIC_MODEL_ID: {
'commit_type': self.COMMIT_TYPE,
'commit_message': self.COMMIT_MESSAGE,
}
}
expected_subtopic_page_sm = {
self.GENERIC_MODEL_ID: {
'commit_type': self.COMMIT_TYPE,
'commit_message': self.COMMIT_MESSAGE,
}
}
expected_topic_rights_sm = {
self.GENERIC_MODEL_ID: {
'commit_type': self.COMMIT_TYPE,
'commit_message': self.COMMIT_MESSAGE,
}
}
expected_topic_sm = {
self.GENERIC_MODEL_ID: {
'commit_type': self.COMMIT_TYPE,
'commit_message': self.COMMIT_MESSAGE,
}
}
expected_story_sm = {
self.GENERIC_MODEL_ID: {
'commit_type': self.COMMIT_TYPE,
'commit_message': self.COMMIT_MESSAGE,
}
}
expected_question_sm = {
self.GENERIC_MODEL_ID: {
'commit_type': self.COMMIT_TYPE,
'commit_message': self.COMMIT_MESSAGE,
}
}
expected_config_property_sm = {
self.GENERIC_MODEL_ID: {
'commit_type': self.COMMIT_TYPE,
'commit_message': self.COMMIT_MESSAGE,
}
}
expected_exploration_rights_sm = {
self.GENERIC_MODEL_ID: {
'commit_type': self.COMMIT_TYPE,
'commit_message': self.COMMIT_MESSAGE,
}
}
expected_exploration_sm = {
'exp_1-1': {
'commit_type': 'create',
'commit_message':
'New exploration created with title \'A title\'.'
},
'exp_1-2': {
'commit_type': 'edit',
'commit_message': 'Test edit'
}
}
expected_platform_parameter_sm = {
self.GENERIC_MODEL_ID: {
'commit_type': self.COMMIT_TYPE,
'commit_message': self.COMMIT_MESSAGE,
}
}
expected_user_email_preferences = {}
expected_user_auth_details = {}
expected_app_feedback_report = {
'%s.%s.%s' % (
self.PLATFORM_ANDROID, self.REPORT_SUBMITTED_TIMESTAMP.second,
'randomInteger123'): {
'scrubbed_by': self.USER_ID_1,
'ticket_id': self.TICKET_ID,
'submitted_on': self.REPORT_SUBMITTED_TIMESTAMP.isoformat(),
'local_timezone_offset_hrs': 0,
'report_type': self.REPORT_TYPE_SUGGESTION,
'category': self.CATEGORY_OTHER,
'platform_version': self.PLATFORM_VERSION}}
expected_blog_post_data = {
'content': 'content sample',
'title': 'sample title',
'published_on': utils.get_time_in_millisecs(
blog_post_model.published_on),
'url_fragment': 'sample-url-fragment',
'tags': ['tag', 'one'],
'thumbnail_filename': 'thumbnail'
}
expected_blog_post_rights = {
'editable_blog_post_ids': [
self.BLOG_POST_ID_1,
self.BLOG_POST_ID_2
],
}
expected_translation_contribution_stats_data = {
'%s.%s.%s' % (
self.SUGGESTION_LANGUAGE_CODE, self.USER_ID_1,
self.TOPIC_ID_1): {
'language_code': self.SUGGESTION_LANGUAGE_CODE,
'topic_id': self.TOPIC_ID_1,
'submitted_translations_count': (
self.SUBMITTED_TRANSLATIONS_COUNT),
'submitted_translation_word_count': (
self.SUBMITTED_TRANSLATION_WORD_COUNT),
'accepted_translations_count': (
self.ACCEPTED_TRANSLATIONS_COUNT),
'accepted_translations_without_reviewer_edits_count': (
self
.ACCEPTED_TRANSLATIONS_WITHOUT_REVIEWER_EDITS_COUNT),
'accepted_translation_word_count': (
self.ACCEPTED_TRANSLATION_WORD_COUNT),
'rejected_translations_count': (
self.REJECTED_TRANSLATIONS_COUNT),
'rejected_translation_word_count': (
self.REJECTED_TRANSLATION_WORD_COUNT),
'contribution_dates': [
date.isoformat() for date in self.CONTRIBUTION_DATES]
}
}
expected_user_data = {
'user_stats': expected_stats_data,
'user_settings': expected_user_settings_data,
'user_subscriptions': expected_subscriptions_data,
'user_skill_mastery': expected_user_skill_data,
'user_contributions': expected_contribution_data,
'exploration_user_data': expected_exploration_data,
'completed_activities': expected_completed_activities_data,
'incomplete_activities': expected_incomplete_activities_data,
'exp_user_last_playthrough': expected_last_playthrough_data,
'learner_goals': expected_learner_goals_data,
'learner_playlist': expected_learner_playlist_data,
'task_entry': expected_task_entry_data,
'topic_rights': expected_topic_data,
'collection_progress': expected_collection_progress_data,
'story_progress': expected_story_progress_data,
'general_feedback_thread':
expected_general_feedback_thread_data,
'general_feedback_thread_user':
expected_general_feedback_thread_user_data,
'general_feedback_message':
expected_general_feedback_message_data,
'collection_rights':
expected_collection_rights_data,
'general_suggestion': expected_general_suggestion_data,
'exploration_rights': expected_exploration_rights_data,
'general_voiceover_application':
expected_voiceover_application_data,
'user_contribution_proficiency': expected_contrib_proficiency_data,
'user_contribution_rights': expected_contribution_rights_data,
'collection_rights_snapshot_metadata':
expected_collection_rights_sm,
'collection_snapshot_metadata':
expected_collection_sm,
'skill_snapshot_metadata':
expected_skill_sm,
'subtopic_page_snapshot_metadata':
expected_subtopic_page_sm,
'topic_rights_snapshot_metadata':
expected_topic_rights_sm,
'topic_snapshot_metadata': expected_topic_sm,
'translation_contribution_stats':
expected_translation_contribution_stats_data,
'story_snapshot_metadata': expected_story_sm,
'question_snapshot_metadata': expected_question_sm,
'config_property_snapshot_metadata':
expected_config_property_sm,
'exploration_rights_snapshot_metadata':
expected_exploration_rights_sm,
'exploration_snapshot_metadata': expected_exploration_sm,
'platform_parameter_snapshot_metadata':
expected_platform_parameter_sm,
'user_email_preferences': expected_user_email_preferences,
'user_auth_details': expected_user_auth_details,
'app_feedback_report': expected_app_feedback_report,
'blog_post': expected_blog_post_data,
'blog_post_rights': expected_blog_post_rights
}
user_takeout_object = takeout_service.export_data_for_user(
self.USER_ID_1)
observed_data = user_takeout_object.user_data
observed_images = user_takeout_object.user_images
self.assertItemsEqual(observed_data, expected_user_data)
observed_json = json.dumps(observed_data)
expected_json = json.dumps(expected_user_data)
self.assertItemsEqual(
json.loads(observed_json), json.loads(expected_json))
expected_images = [
takeout_domain.TakeoutImage(
self.GENERIC_IMAGE_URL, 'user_settings_profile_picture.png')
]
self.assertEqual(len(expected_images), len(observed_images))
for i, _ in enumerate(expected_images):
self.assertEqual(
expected_images[i].b64_image_data,
observed_images[i].b64_image_data
)
self.assertEqual(
expected_images[i].image_export_path,
observed_images[i].image_export_path
)
def test_export_for_full_user_does_not_export_profile_data(self):
"""Test that exporting data for a full user does not export
data for any profile user, atleast for the models that were
populated for the profile user.
"""
self.set_up_non_trivial()
profile_user_settings_data = {
'email': self.USER_1_EMAIL,
'roles': [self.PROFILE_1_ROLE],
'username': None,
'normalized_username': None,
'last_agreed_to_terms_msec': self.GENERIC_DATE,
'last_started_state_editor_tutorial_msec': None,
'last_started_state_translation_tutorial': None,
'last_logged_in_msec': self.GENERIC_DATE,
'last_created_an_exploration': None,
'last_edited_an_exploration': None,
'profile_picture_data_url': None,
'default_dashboard': 'learner',
'creator_dashboard_display_pref': 'card',
'user_bio': self.GENERIC_USER_BIO,
'subject_interests': self.GENERIC_SUBJECT_INTERESTS,
'first_contribution_msec': None,
'preferred_language_codes': self.GENERIC_LANGUAGE_CODES,
'preferred_site_language_code': self.GENERIC_LANGUAGE_CODES[0],
'preferred_audio_language_code': self.GENERIC_LANGUAGE_CODES[0],
'display_alias': self.GENERIC_DISPLAY_ALIAS_2
}
user_skill_data = {
self.SKILL_ID_3: self.DEGREE_OF_MASTERY_2
}
completed_activities_data = {
'completed_exploration_ids': self.EXPLORATION_IDS_2,
'completed_collection_ids': self.COLLECTION_IDS_2,
'completed_story_ids': self.STORY_IDS,
'learnt_topic_ids': self.TOPIC_IDS
}
incomplete_activities_data = {}
last_playthrough_data = {}
learner_goals_data = {}
learner_playlist_data = {
'playlist_exploration_ids': self.EXPLORATION_IDS_2,
'playlist_collection_ids': self.COLLECTION_IDS_2
}
collection_progress_data = {
self.COLLECTION_IDS_2[0]: self.EXPLORATION_IDS_2
}
story_progress_data = {
self.STORY_ID_2: self.COMPLETED_NODE_IDS_2
}
profile_user_data = {
'user_settings': profile_user_settings_data,
'user_skill_mastery': user_skill_data,
'completed_activities': completed_activities_data,
'incomplete_activities': incomplete_activities_data,
'exp_user_last_playthrough': last_playthrough_data,
'learner_goals': learner_goals_data,
'learner_playlist': learner_playlist_data,
'collection_progress': collection_progress_data,
'story_progress': story_progress_data,
}
user_takeout_object = takeout_service.export_data_for_user(
self.USER_ID_1)
observed_data = user_takeout_object.user_data
for key, value in profile_user_data.items():
self.assertNotEqual(value, observed_data[key])
|
from podium_api.account import make_account_get
from podium_api.events import (
make_events_get, make_event_create, make_event_get, make_event_delete,
make_event_update
)
from podium_api.devices import (
make_device_get, make_device_create, make_device_update,
make_device_delete, make_devices_get
)
from podium_api.friendships import (
make_friendship_get, make_friendships_get, make_friendship_create,
make_friendship_delete
)
from podium_api.users import make_user_get
from podium_api.eventdevices import (
make_eventdevices_get, make_eventdevice_create, make_eventdevice_update,
make_eventdevice_get, make_eventdevice_delete
)
from podium_api.alertmessages import (
make_alertmessages_get, make_alertmessage_get, make_alertmessage_create
)
from podium_api.venues import (
make_venues_get, make_venue_get
)
from podium_api.laps import make_laps_get, make_lap_get
class PodiumAPI(object):
"""
The PodiumApi object holds references to the interfaces to the
various asynchronous requests. You should provide a PodiumToken received
from **podium_api.login.make_login_post** to create this object.
Keep in mind all API requests are asynchronous, you need to provide
callback functions that will receive the data once the request has
completed. Most requests return their results in the on_success callback,
but some creation requests return their success as a redirect to the
newly created resource's URI. Reference the documentation for each
function for more details.
**Attributes:**
**token** (PodiumToken): The token for the logged in user.
**account** (PodiumAccountAPI): API object for account requests.
**events** (PodiumEventsAPI): API object for event requests.
**devices** (PodiumDevicesAPI): API object for device requests.
**friendships** (PodiumFriendshipsAPI): API object for friendship
requests.
**users** (PodiumUsersApi): API object for user requests.
**eventdevices** (PodiumEventDevicesAPI): API object for event-device
requests.
**laps** (PodiumLapsAPI): API object for lap requests.
**alertmessages** (AlertMessagesAPI: API object for alertmessage requests.
"""
def __init__(self, token):
self.token = token
self.account = PodiumAccountAPI(token)
self.events = PodiumEventsAPI(token)
self.devices = PodiumDevicesAPI(token)
self.friendships = PodiumFriendshipsAPI(token)
self.users = PodiumUsersAPI(token)
self.eventdevices = PodiumEventDevicesAPI(token)
self.laps = PodiumLapsAPI(token)
self.alertmessages = PodiumAlertMessagesAPI(token)
class PodiumLapsAPI(object):
"""
Object that handles lap requests and keeps track of the
authentication token necessary to do so. Usually accessed via
PodiumAPI object.
**Attributes:**
**token** (PodiumToken): The token for the logged in user.
"""
def __init__(self, token):
self.token = token
def list(self, *args, **kwargs):
"""
Request that returns a PodiumPagedRequest of laps.
Args:
endpoint (str): The endpoint to make the request too.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumPagedResponse)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
start (int): Starting index for events list. 0 indexed.
per_page (int): Number per page of results, max of 100.
Return:
UrlRequest: The request being made.
"""
make_laps_get(self.token, *args, **kwargs)
def get(self, *args, **kwargs):
"""
Request that returns a PodiumLap that represents a specific
lap found at the URI.
Args:
endpoint (str): The URI for the lap.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumLap)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_lap_get(self.token, *args, **kwargs)
class PodiumEventDevicesAPI(object):
"""
Object that handles event-device requests and keeps track of the
authentication token necessary to do so. Usually accessed via
PodiumAPI object.
**Attributes:**
**token** (PodiumToken): The token for the logged in user.
"""
def __init__(self, token):
self.token = token
def list(self, *args, **kwargs):
"""
Request that returns a PodiumPagedRequest of events.
By default a get request to
'https://podium.live/api/v1/events/{event_id}/devices' will be made.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumPagedResponse)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict),
data (dict))
Values for failure type are: 'error', 'failure'.
Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int),
data (dict))
Defaults to None.
start (int): Starting index for events list. 0 indexed.
per_page (int): Number per page of results, max of 100.
endpoint (str): If provided this endpoint will be used instead
of the default:
'https://podium.live/api/v1/events/{event_id}/devices'
event_id (int): If an endpoint is not provided you should
provide the id of the event for which you want to look up
the devices.
Return:
UrlRequest: The request being made.
"""
make_eventdevices_get(self.token, *args, **kwargs)
def create(self, *args, **kwargs):
"""
Request that creates a new PodiumEventDevice.
The uri for the newly created event device will be provided to the
redirect_callback if one is provided in the form of a PodiumRedirect.
Args:
event_id (int): Id of the event to add the device to.
device_id (int): Id of the device to add to the event.
name (str): Name of the device for this particular event, allows for
car number/name to change between events. If blank/missing, will
default to device name.
Kwargs:
success_callback (function): Callback for a successful request,
will have the signature:
on_success(result (dict), data (dict))
Defaults to None..
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(redirect_object (PodiumRedirect))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_eventdevice_create(self.token, *args, **kwargs)
def update(self, *args, **kwargs):
"""
Request that updates a PodiumEventDevice.
Args:
eventdevice_uri (str): URI for the eventdevice you are updating.
Kwargs:
name (str): Name of the device for this particular event, allows for
car number/name to change between events. If blank/missing, will
default to device name.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(result (dict), updated_uri (str))
Defaults to None..
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(redirect_object (PodiumRedirect))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_eventdevice_update(self.token, *args, **kwargs)
def get(self, *args, **kwargs):
"""
Request that returns a PodiumEventDevice for the provided
eventdevice_uri
Args:
eventdevice_uri (str): URI for the eventdevice you want.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumEvent)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_eventdevice_get(self.token, *args, **kwargs)
def delete(self, *args, **kwargs):
"""
Deletes the device for the provided URI.
Args:
eventdevice_uri (str): URI for the eventdevice you want.
Kwargs:
success_callback (function): Callback for a successful request,
will have the signature:
on_success(deleted_uri (str))
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_eventdevice_delete(self.token, *args, **kwargs)
class PodiumUsersAPI(object):
"""
Object that handles user requests and keeps track of the
authentication token necessary to do so. Usually accessed via
PodiumAPI object.
**Attributes:**
**token** (PodiumToken): The token for the logged in user.
"""
def __init__(self, token):
self.token = token
def get(self, *args, **kwargs):
"""
Returns a PodiumUser object found at the uri provided in the endpoint
arg.
Args:
endpoint (str): The URI to make the request to. Typically should be
provided by some api object.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to False
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(token (string))
Defaults to None.
failure_callback (function): Callback for redirects, failures, and
errors. Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'redirect', 'failure'.
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_user_get(self.token, *args, **kwargs)
class PodiumFriendshipsAPI(object):
"""
Object that handles friendship requests and keeps track of the
authentication token necessary to do so. Usually accessed via
PodiumAPI object.
**Attributes:**
**token** (PodiumToken): The token for the logged in user.
"""
def __init__(self, token):
self.token = token
def get(self, *args, **kwargs):
"""
Request that returns a PodiumFriendship that represents a specific
friendship found at the URI.
Args:
endpoint (str): The URI for the friendship.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumPagedResponse)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_friendship_get(self.token, *args, **kwargs)
def list(self, *args, **kwargs):
"""
Request that returns a PodiumPagedRequest of friendships.
Args:
endpoint (str): The endpoint to make the request too.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumPagedResponse)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
start (int): Starting index for events list. 0 indexed.
per_page (int): Number per page of results, max of 100.
Return:
UrlRequest: The request being made.
"""
make_friendships_get(self.token, *args, **kwargs)
def create(self, *args, **kwargs):
"""
Request that adds a friendship for the user whose token is in use.
The uri for the newly created event will be provided to the
redirect_callback if one is provided in the form of a PodiumRedirect.
Kwargs:
success_callback (function): Callback for a successful request,
will have the signature:
on_success(result (dict), data (dict))
Defaults to None..
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(redirect_object (PodiumRedirect))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_friendship_create(self.token, *args, **kwargs)
def delete(self, *args, **kwargs):
"""
Deletes the friendship for the provided URI.
Args:
friendship_uri (str): URI for the friendship you want to delete.
Kwargs:
success_callback (function): Callback for a successful request,
will have the signature:
on_success(deleted_uri (str))
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_friendship_delete(self.token, *args, **kwargs)
class PodiumAccountAPI(object):
"""
Object that handles account requests and keeps track of the
authentication token necessary to do so. Usually accessed via
PodiumAPI object.
**Attributes:**
**token** (PodiumToken): The token for the logged in user.
"""
def __init__(self, token):
self.token = token
def get(self, *args, **kwargs):
"""
Request that returns the account for the provided authentication token.
Hits the api/v1/account endpoint with a GET request.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to False
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(account (PodiumAccount))
Defaults to None.
failure_callback (function): Callback for redirects, failures, and
errors. Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'redirect', 'failure'.
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_account_get(self.token, *args, **kwargs)
class PodiumDevicesAPI(object):
"""
Object that handles device requests and keeps track of the
authentication token necessary to do so. Usually accessed via
PodiumAPI object.
**Attributes:**
**token** (PodiumToken): The token for the logged in user.
"""
def __init__(self, token):
self.token = token
def create(self, *args, **kwargs):
"""
Request that creates a new PodiumDevice.
The uri for the newly created event will be provided to the
redirect_callback if one is provided in the form of a PodiumRedirect.
Args:
name(str): Name of the device.
Kwargs:
success_callback (function): Callback for a successful request,
will have the signature:
on_success(result (dict), data (dict))
Defaults to None..
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(redirect_object (PodiumRedirect))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_device_create(self.token, *args, **kwargs)
def update(self, *args, **kwargs):
"""
Request that updates a PodiumDevice
Args:
device_uri (str): URI for the device you are updating.
Kwargs:
name(str): Name of the device.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(result (dict), updated_uri (str))
Defaults to None..
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(redirect_object (PodiumRedirect))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_device_update(self.token, *args, **kwargs)
def get(self, *args, **kwargs):
"""
Request that returns a PodiumDevice for the provided device_uri
Args:
device_uri (str): URI for the device you want.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumEvent)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_device_get(self.token, *args, **kwargs)
def delete(self, *args, **kwargs):
"""
Deletes the device for the provided URI.
Args:
device_uri (str): URI for the device you want.
Kwargs:
success_callback (function): Callback for a successful request,
will have the signature:
on_success(deleted_uri (str))
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_device_delete(self.token, *args, **kwargs)
def list(self, *args, **kwargs):
"""
Request that returns a PodiumPagedRequest of PodiumDevice.
Args:
endpoint (str): the endpoint to make the request to.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumPagedResponse)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict),
data (dict))
Values for failure type are: 'error', 'failure'.
Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int),
data (dict))
Defaults to None.
start (int): Starting index for events list. 0 indexed.
per_page (int): Number per page of results, max of 100.
Return:
UrlRequest: The request being made.
"""
make_devices_get(self.token, *args, **kwargs)
class PodiumEventsAPI(object):
"""
Object that handles event requests and keeps track of the
authentication token necessary to do so. Usually accessed via
PodiumAPI object.
**Attributes:**
**token** (PodiumToken): The token for the logged in user.
"""
def __init__(self, token):
self.token = token
def list(self, *args, **kwargs):
"""
Request that returns a PodiumPagedRequest of events.
By default a get request to
'https://podium.live/api/v1/events' will be made.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumPagedResponse)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
start (int): Starting index for events list. 0 indexed.
per_page (int): Number per page of results, max of 100.
endpoint (str): If provided the start, per_page, expand, and quiet
params will not be used instead making a request based on the
provided endpoint.
Return:
UrlRequest: The request being made.
"""
make_events_get(self.token, *args, **kwargs)
def get(self, *args, **kwargs):
"""
Request that returns a PodiumEvent for the provided event_uri.
Args:
event_uri (str): URI for the event you want.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumEvent)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_event_get(self.token, *args, **kwargs)
def delete(self, *args, **kwargs):
"""
Deletes the event for the provided URI.
Args:
event_uri (str): URI for the event you want.
Kwargs:
success_callback (function): Callback for a successful request,
will have the signature:
on_success(deleted_uri (str))
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_event_delete(self.token, *args, **kwargs)
def create(self, *args, **kwargs):
"""
Request that creates a new PodiumEvent.
The uri for the newly created event will be provided to the
redirect_callback if one is provided in the form of a PodiumRedirect.
Args:
title (str): title for the vent.
start_time (str): Starting time, use ISO 8601 format.
end_time (str): Ending time, use ISO 8601 format.
Kwargs:
venue_id(str): ID for the venue of event.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(result (dict), data (dict))
Defaults to None..
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(redirect_object (PodiumRedirect))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_event_create(self.token, *args, **kwargs)
def update(self, *args, **kwargs):
"""
Request that updates a PodiumEvent.
The uri for the newly created event will be provided to the
redirect_callback if one is provided in the form of a PodiumRedirect.
Args:
event_uri (str): URI for the event you are updating.
Kwargs:
venue_id(str): ID for the venue of event.
title (str): title for the vent.
start_time (str): Starting time, use ISO 8601 format.
end_time (str): Ending time, use ISO 8601 format.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(result (dict), updated_uri (str))
Defaults to None..
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(redirect_object (PodiumRedirect))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_event_update(self.token, *args, **kwargs)
class PodiumAlertMessagesAPI(object):
"""
Object that handles event requests and keeps track of the
authentication token necessary to do so. Usually accessed via
PodiumAPI object.
**Attributes:**
**token** (PodiumToken): The token for the logged in user.
"""
def __init__(self, token):
self.token = token
def list(self, *args, **kwargs):
"""
Request that returns a PodiumPagedRequest of events.
By default a get request to
'https://podium.live/api/v1/events' will be made.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumPagedResponse)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
start (int): Starting index for events list. 0 indexed.
per_page (int): Number per page of results, max of 100.
endpoint (str): If provided the start, per_page, expand, and quiet
params will not be used instead making a request based on the
provided endpoint.
Return:
UrlRequest: The request being made.
"""
make_alertmessages_get(self.token, *args, **kwargs)
def get(self, *args, **kwargs):
"""
Request that returns an AlertMessage for the provided alertmessage_uri.
Args:
endpoint (str): URI for the alertmessage you want.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumEvent)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_alertmessage_get(self.token, *args, **kwargs)
def create(self, *args, **kwargs):
"""
Request that creates a new PodiumAlertMessage.
The uri for the newly created alertmessage will be provided to the
redirect_callback if one is provided in the form of a PodiumRedirect.
Args:
message (str): message for the alertmessage
priority (int): priority of the alertmessage
Kwargs:
message(str): ID for the venue of event.
priority(int): priority for the message.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(result (dict), data (dict))
Defaults to None..
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(redirect_object (PodiumRedirect))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_alertmessage_create(self.token, *args, **kwargs)
class PodiumVenuesAPI(object):
"""
Object that handles event requests and keeps track of the
authentication token necessary to do so. Usually accessed via
PodiumAPI object.
**Attributes:**
**token** (PodiumToken): The token for the logged in user.
"""
def __init__(self, token):
self.token = token
def list(self, *args, **kwargs):
"""
Request that returns a PodiumPagedRequest of events.
By default a get request to
'https://podium.live/api/v1/events' will be made.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumPagedResponse)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
start (int): Starting index for events list. 0 indexed.
per_page (int): Number per page of results, max of 100.
endpoint (str): If provided the start, per_page, expand, and quiet
params will not be used instead making a request based on the
provided endpoint.
Return:
UrlRequest: The request being made.
"""
make_venues_get(self.token, *args, **kwargs)
def get(self, *args, **kwargs):
"""
Request that returns an Venue for the provided endpoint.
Args:
endpoint (str): URI for the venue you want.
Kwargs:
expand (bool): Expand all objects in response output.
Defaults to True
quiet (object): If not None HTML layout will not render endpoint
description. Defaults to None.
success_callback (function): Callback for a successful request,
will have the signature:
on_success(PodiumEvent)
Defaults to None.
failure_callback (function): Callback for failures and errors.
Will have the signature:
on_failure(failure_type (string), result (dict), data (dict))
Values for failure type are: 'error', 'failure'. Defaults to None.
redirect_callback (function): Callback for redirect,
Will have the signature:
on_redirect(result (dict), data (dict))
Defaults to None.
progress_callback (function): Callback for progress updates,
will have the signature:
on_progress(current_size (int), total_size (int), data (dict))
Defaults to None.
Return:
UrlRequest: The request being made.
"""
make_venue_get(self.token, *args, **kwargs)
|
#!/usr/bin/env python
try:
# <= python 2.5
import simplejson as json
except ImportError:
# >= python 2.6
import json
versions = ['2.0.0','2.0.1', '2.0.2', '2.1.0', '2.1.1', '2.3.0', 'latest']
for v in versions:
print '-- testing %s/reference.json' % v
reference = json.load(open('%s/reference.json' % v, 'r'))
assert reference
assert reference['version'] == v,"%s not eq to %s" % (reference['version'],v)
for sym in reference['symbolizers'].items():
assert sym[1]
for i in sym[1].items():
if sym[0] not in ['map','*']:
group_name = sym[0]
if group_name == 'markers':
group_name = 'marker'
css_name = i[1]['css']
assert group_name in css_name, "'%s' not properly prefixed by '%s'" % (css_name,group_name)
assert 'type' in i[1].keys(), '%s: type not in %s' % (sym[0], i[0])
assert 'doc' in i[1].keys(), '%s: doc string not in %s' % (sym[0], i[0])
assert 'css' in i[1].keys(), '%s: css not in %s' % (sym[0], i[0])
|
import unittest
import faker
from aiohttp.test_utils import unittest_run_loop
from ..test_case import AuthenticatedClericusTestCase
fake = faker.Faker()
class LoginTestCase(AuthenticatedClericusTestCase):
@unittest_run_loop
async def testLogin(self):
resp = await self.client.request("GET", "/me/")
# not logged in
self.assertEqual(resp.status, 401)
data = await resp.json()
user = {
"username": fake.user_name(),
"email": fake.email(),
"password": fake.password(),
}
resp = await self.client.request(
"POST",
"/sign-up/",
json=user,
)
# sign up
self.assertEqual(resp.status, 200)
resp = await self.client.request("GET", "/me/")
# logged in from signup
self.assertEqual(resp.status, 200)
data = await resp.json()
self.assertEqual(data["currentUser"]["username"], user["username"])
resp = await self.client.request("GET", "/log-out/")
# log out
self.assertEqual(resp.status, 200)
data = await resp.json()
# logged out from log-out
resp = await self.client.request("GET", "/me/")
self.assertEqual(resp.status, 401)
resp = await self.client.request(
"POST",
"/log-in/",
json={
"email": user["email"],
"password": user["password"],
},
)
# log in
self.assertEqual(resp.status, 200)
data = await resp.json()
# logged in successfully
resp = await self.client.request("GET", "/me/")
self.assertEqual(resp.status, 200)
@unittest_run_loop
async def testInvalidPassword(self):
user = {
"username": fake.user_name(),
"email": fake.email(),
"password": fake.password(),
}
resp = await self.client.request(
"POST",
"/sign-up/",
json=user,
)
# sign up
self.assertEqual(resp.status, 200)
resp = await self.client.request("GET", "/log-out/")
# log out
self.assertEqual(resp.status, 200)
resp = await self.client.request(
"POST",
"/log-in/",
json={
"email": user["email"],
"password": user["password"] + "moo",
},
)
# log in
self.assertEqual(resp.status, 401)
data = await resp.json()
# logged in successfully
resp = await self.client.request("GET", "/me/")
self.assertEqual(resp.status, 401)
@unittest_run_loop
async def testInvalidEmail(self):
user = {
"username": fake.user_name(),
"email": fake.email(),
"password": fake.password(),
}
resp = await self.client.request(
"POST",
"/sign-up/",
json=user,
)
# sign up
self.assertEqual(resp.status, 200)
resp = await self.client.request("GET", "/log-out/")
# log out
self.assertEqual(resp.status, 200)
resp = await self.client.request(
"POST",
"/log-in/",
json={
"email": fake.email(),
"password": user["password"],
},
)
# log in
self.assertEqual(resp.status, 401)
data = await resp.json()
# logged in successfully
resp = await self.client.request("GET", "/me/")
self.assertEqual(resp.status, 401)
@unittest_run_loop
async def testEmptyBody(self):
resp = await self.client.request(
"POST",
"/sign-up/",
json={},
)
# sign up
self.assertEqual(resp.status, 422)
body = await resp.json()
self.assertEqual(len(body["errors"]), 3)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
from math import ceil
import numpy as np
from opensoundscape.spectrogram import Spectrogram
import torch
from torchvision import transforms
def split_audio(audio_obj, seg_duration=5, seg_overlap=1):
duration = audio_obj.duration()
times = np.arange(0.0, duration, duration / audio_obj.samples.shape[0])
num_segments = ceil((duration - seg_overlap) / (seg_duration - seg_overlap))
outputs = [None] * num_segments
for idx in range(num_segments):
if idx == num_segments - 1:
end = duration
begin = end - seg_duration
else:
begin = seg_duration * idx - seg_overlap * idx
end = begin + seg_duration
audio_segment_obj = audio_obj.trim(begin, end)
outputs[idx] = audio_segment_obj
return outputs
class BasicDataset(torch.utils.data.Dataset):
def __init__(self, images):
self.images = images
self.mean = torch.tensor([0.5 for _ in range(3)])
self.stddev = torch.tensor([0.5 for _ in range(3)])
self.transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(self.mean, self.stddev)]
)
def __len__(self):
return len(self.images)
def __getitem__(self, item_idx):
img = self.images[item_idx]
return {"X": self.transform(img)}
|
from .api import *
from .db import *
from .utils import *
from .domain import *
|
from sidmpy.CrossSections.cross_section import InteractionCrossSection
class VelocityIndependentCrossSection(InteractionCrossSection):
def __init__(self, norm):
"""
This class implements a velocity-independent cross section with a constant value specified by norm
:param norm: the cross section normalization in cm^2 / gram
"""
super(VelocityIndependentCrossSection, self).__init__(norm, self._velocity_dependence_kernel)
def _velocity_dependence_kernel(self, v):
return 1.
|
import os
import errno
from io import BytesIO
import time
from flask import abort, current_app, jsonify, request, url_for
from flask.views import MethodView
from werkzeug.exceptions import NotFound, Forbidden
from werkzeug.urls import url_quote
from ..constants import COMPLETE, FILENAME, SIZE
from ..utils.date_funcs import get_maxlife
from ..utils.http import ContentRange, redirect_next
from ..utils.name import ItemName
from ..utils.permissions import CREATE, may
from ..utils.upload import Upload, create_item, background_compute_hash
class UploadView(MethodView):
def post(self):
if not may(CREATE):
raise Forbidden()
f = request.files.get('file')
t = request.form.get('text')
# note: "and f.filename" is needed due to missing __bool__ method in
# werkzeug.datastructures.FileStorage, to work around it crashing
# on Python 3.x.
if f and f.filename:
# Check Content-Range, disallow its usage
if ContentRange.from_request():
abort(416)
# Check Content-Type, default to application/octet-stream
content_type = (
f.headers.get('Content-Type') or
request.headers.get('Content-Type'))
content_type_hint = 'application/octet-stream'
filename = f.filename
# Get size of temporary file
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(0)
elif t is not None:
# t is already unicode, but we want utf-8 for storage
t = t.encode('utf-8')
content_type = request.form.get('contenttype') # TODO: add coding
content_type_hint = 'text/plain'
size = len(t)
f = BytesIO(t)
filename = request.form.get('filename')
else:
raise NotImplementedError
# set max lifetime
maxtime = get_maxlife(request.form, underscore=False)
maxlife_timestamp = int(time.time()) + maxtime if maxtime > 0 else maxtime
name = create_item(f, filename, size, content_type, content_type_hint, maxlife_stamp=maxlife_timestamp)
kw = {}
kw['_anchor'] = url_quote(filename)
if content_type == 'text/x-bepasty-redirect':
# after creating a redirect, we want to stay on the bepasty
# redirect display, so the user can copy the URL.
kw['delay'] = '9999'
return redirect_next('bepasty.display', name=name, **kw)
class UploadNewView(MethodView):
def post(self):
if not may(CREATE):
raise Forbidden()
data = request.get_json()
data_filename = data['filename']
data_size = int(data['size'])
data_type = data['type']
# set max lifetime
maxtime = get_maxlife(data, underscore=True)
maxlife_timestamp = int(time.time()) + maxtime if maxtime > 0 else maxtime
name = ItemName.create(current_app.storage)
with current_app.storage.create(name, data_size) as item:
# Save meta-data
Upload.meta_new(item, data_size, data_filename, data_type,
'application/octet-stream', name, maxlife_stamp=maxlife_timestamp)
return jsonify({'url': url_for('bepasty.upload_continue', name=name),
'name': name})
class UploadContinueView(MethodView):
def post(self, name):
if not may(CREATE):
raise Forbidden()
f = request.files['file']
if not f:
raise NotImplementedError
# Check Content-Range
content_range = ContentRange.from_request()
with current_app.storage.openwrite(name) as item:
if content_range:
# note: we ignore the hash as it is only for 1 chunk, not for the whole upload.
# also, we can not continue computing the hash as we can't save the internal
# state of the hash object
size_written, _ = Upload.data(item, f, content_range.size, content_range.begin)
file_hash = ''
is_complete = content_range.is_complete
else:
# Get size of temporary file
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(0)
size_written, file_hash = Upload.data(item, f, size)
is_complete = True
if is_complete:
Upload.meta_complete(item, file_hash)
result = jsonify({'files': [{
'name': name,
'filename': item.meta[FILENAME],
'size': item.meta[SIZE],
'url': "{}#{}".format(url_for('bepasty.display', name=name), item.meta[FILENAME]),
}]})
if is_complete and not file_hash:
background_compute_hash(current_app.storage, name)
return result
class UploadAbortView(MethodView):
def get(self, name):
if not may(CREATE):
raise Forbidden()
try:
item = current_app.storage.open(name)
except OSError as e:
if e.errno == errno.ENOENT:
return 'No file found.', 404
raise
if item.meta[COMPLETE]:
error = 'Upload complete. Cannot delete fileupload garbage.'
else:
error = None
if error:
return error, 409
try:
item = current_app.storage.remove(name)
except OSError as e:
if e.errno == errno.ENOENT:
raise NotFound()
raise
return 'Upload aborted'
|
import re
def check_domain(url):
return re.search('(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9][/]', url).group(0)
|
# -*- coding: UTF-8 -*-
from django.contrib import admin
from .models import Article, Category, Tag, BlogComment
from pagedown.widgets import AdminPagedownWidget
from django import forms
# from DjangoUeditor.forms import UEditorField
class ArticleForm(forms.ModelForm):
body = forms.CharField(widget=AdminPagedownWidget())
# body = UEditorField('content',height=100,width=500,imagePath="upload/thumbnail/",toolbars='mini',filePath='upload')
class Meta:
model = Article
fields = '__all__'
class ArticleAdmin(admin.ModelAdmin):
form = ArticleForm
# class ArticleAdmin(admin.ModelAdmin):
# # fields = ['title', 'body', 'thumbnail', 'status', 'abstract', 'navigation', 'category', 'tags']
# fields = '__all__'
admin.site.register(Article, ArticleAdmin)
class CategoryAdmin(admin.ModelAdmin):
fields = ['name']
admin.site.register(Category, CategoryAdmin)
class TagAdmin(admin.ModelAdmin):
fields = ['name']
admin.site.register(Tag, TagAdmin)
class BlogCommentAdmin(admin.ModelAdmin):
fields = ['user_name', 'user_email', 'body']
admin.site.register(BlogComment, BlogCommentAdmin)
|
from pprint import pprint
import asyncio
from panoramisk import Manager
async def extension_status():
manager = Manager(loop=asyncio.get_event_loop(),
host='127.0.0.1', port=5038,
username='username', secret='mysecret')
await manager.connect()
action = {
'Action': 'ExtensionState',
'Exten': '2001',
'Context': 'default',
}
extension = await manager.send_action(action)
pprint(extension)
manager.close()
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(extension_status())
loop.close()
if __name__ == '__main__':
main()
|
import json, os
from os.path import relpath
class JSONOutput:
def __init__(self, machine):
self.machine = machine
self.settings = machine.settings
self.logger = self.settings['logger']
def export(self):
img_obj = self.load_machine_image()
# add machine properties to it
self.logger.info('updating image objects with machine (%s) element info' %
self.machine.title)
for e in self.machine.elements:
img_obj = self.update_image_from_element(img_obj, e)
if not self.machine.toplevel and self.machine.is_mem:
for e in self.machine.mem_elements:
img_obj = self.update_image_from_element(img_obj, e)
# add sub-machines to it
img_obj['ext_objects'] = []
for e in self.machine.ext_elements:
# update submachine titles in this machine
self.update_pattern_from_text(img_obj, e, self.machine.ext_elements[e].title)
img_obj['ext_objects'] += [JSONOutput(self.machine.ext_elements[e]).export()]
return img_obj
def update_pattern_from_text(self, img, pattern, value):
for o in img['objects']:
if o['type'] == 'text':
if o['string'] == pattern:
o['string'] = value
def update_image_from_element(self, img, e):
for o in img['objects']:
o['can_toggle'] = e.can_toggle
o['toggled'] = False
if o['type'] == 'text':
if o['string'] == '%TITLE%': # special case for the title
o['string'] = self.machine.title
elif o['string'] == e.name:
o['metaname'] = o['string']
o['string'] = e.value
if e.changed: o['changed'] = True
else:
if o.has_key('metaname') and o['metaname'] == e.name:
if e.changed: o['changed'] = True
return img
def load_machine_image(self):
# find machine image and load it
image_filename = os.path.join(self.machine.dirname, self.machine.image)
if os.access(image_filename, os.R_OK):
with open(image_filename, 'r') as f: img_obj = json.load(f)
self.logger.info('loaded machine image from %s' % relpath(image_filename))
else:
image_filename = os.path.join(self.settings['images'], self.machine.image)
if os.access(image_filename, os.R_OK):
with open(image_filename, 'r') as f: img_obj = json.load(f)
self.logger.info('loaded machine image from %s' % relpath(image_filename))
else:
self.logger.die('cannot open image file %s' % self.machine.image)
return img_obj
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: fzk
# @Time 10:59
import json
from flask import request, render_template, flash
from app.web.blueprint import web
from app.forms.book import SearchForm
from app.kernel.param_deal import FishBook
from app.view_models.book import BookViewModel
@web.route('/book/search')
def search():
form = SearchForm(request.values)
if form.validate():
q = form.q.data.strip()
page = form.page.data
books = FishBook()
books.search_data(query=q, page=page)
return render_template('search_result.html', books=books)
# result = BookViewModel.package_collection(q, fb)
else:
flash('参数非法、请重新输入')
return render_template('search_result.html', books=[])
@web.route('/book/<isbn>/detail')
def book_detail(isbn):
book = FishBook()
book.search_data(isbn)
result = BookViewModel(book.first)
return render_template('book_detail.html', book=result, wishes=[], gifts=[])
|
import ply.yacc as yacc
import sys
from Utils.Cool.ast import *
from Sintax.lexer import create_lexer
#-------------------------Parser----------------------------------#
class CoolParsX(object):
def __init__(self):
self.tokens = None
self.lexer = None
self.parser = None
self.error_list = []
#-----------------------Grammar Rules----------------------------#
def p_program(self, p):
"""
program : classes
"""
p[0] = Program(classes = p[1])
def p_classes(self, p):
"""
classes : classes class SEMICOLON
| class SEMICOLON
"""
if len(p) == 3:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
def p_class(self, p):
"""
class : CLASS TYPE LBRACE features_list_init RBRACE
"""
p[0] = Class(name = p[2], parent = "Object", feature_list = p[4])
def p_class_inherits(self, p):
"""
class : CLASS TYPE INHERITS TYPE LBRACE features_list_init RBRACE
"""
p[0] = Class(name = p[2], parent = p[4], feature_list = p[6])
def p_feature_list_init(self, p):
"""
features_list_init : features_list
| empty
"""
p[0] = [] if p.slice[1].type == "empty" else p[1]
def p_feature_list(self, p):
"""
features_list : features_list feature SEMICOLON
| feature SEMICOLON
"""
if len(p) == 3:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
def p_feature_method(self, p):
"""
feature : ID LPAREN params_list RPAREN COLON TYPE LBRACE expression RBRACE
"""
p[0] = ClassMethod(name=p[1], params=p[3], return_type=p[6], body=p[8])
def p_feature_method_no_params(self, p):
"""
feature : ID LPAREN RPAREN COLON TYPE LBRACE expression RBRACE
"""
p[0] = ClassMethod(name=p[1], params = list(), return_type = p[5], body = p[7])
def p_feature_attribute_initialized(self, p):
"""
feature : ID COLON TYPE ASSIGN expression
"""
p[0] = ClassAttribute(name = p[1], attribute_type = p[3], initializer_expr = p[5])
def p_feature_attr(self, p):
"""
feature : ID COLON TYPE
"""
p[0] = ClassAttribute(name=p[1], attribute_type=p[3], initializer_expr = None)
def p_params_list(self, p):
"""
params_list : params_list COMMA params
| params
"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[3]]
def p_param(self, p):
"""
params : ID COLON TYPE
"""
p[0] = Parameter(name=p[1], p_type=p[3])
def p_expression_object_identifier(self, p):
"""
expression : ID
"""
p[0] = Object(name = p[1])
# def p_expression_self_type(self, p):
# """
# expression : SELF_TYPE
# """
# p[0] = SelfType()
def p_expression_integer(self, p):
"""
expression : INTEGER
"""
p[0] = Integer(value=p[1])
def p_expression_boolean(self, p):
"""
expression : TRUE
expression : FALSE
"""
p[0] = Boolean(value=p[1])
def p_expression_string(self, p):
"""
expression : STRING
"""
p[0] = String(value=p[1])
def p_expr_self(self, p):
"""
expression : SELF
"""
p[0] = Self()
def p_expr_block(self, p):
"""
expression : LBRACE block RBRACE
"""
p[0] = Block(expr_block=p[2])
# def p_block_init(self,p):
# """
# iblock : block
# """
# p[0] = Block(p[1])
# def p_block_init_expr(self,p):
# """
# iblock : expression
# """
# p[0] = p[1]
def p_block(self, p):
"""
block : block expression SEMICOLON
| expression SEMICOLON
"""
if len(p) == 3:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
def p_expr_assignment(self, p):
"""
expression : ID ASSIGN expression
"""
p[0] = Assingment(object_inst = Object(name=p[1]), expr = p[3])
def p_expr_dispatch(self, p):
"""
expression : expression DOT ID LPAREN arguments_list_init RPAREN
"""
p[0] = DynamicDispatch(object_inst = p[1], method = p[3], params = p[5])
def p_arguments_list_init(self, p):
"""
arguments_list_init : arguments_list
| empty
"""
p[0] = list() if p.slice[1].type == "empty" else p[1]
def p_arguments_list(self, p):
"""
arguments_list : arguments_list COMMA expression
| expression
"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[3]]
def p_expr_static_dispatch(self, p):
"""
expression : expression AT TYPE DOT ID LPAREN arguments_list_init RPAREN
"""
p[0] = StaticDispatch(object_inst=p[1], obj_type=p[3], method=p[5], params=p[7])
def p_expr_self_dispatch(self, p):
"""
expression : ID LPAREN arguments_list_init RPAREN
"""
p[0] = DynamicDispatch(object_inst = Object("self"), method = p[1], params = p[3])
def p_expr_math_operations(self, p):
"""
expression : expression PLUS expression
| expression MINUS expression
| expression MULTIPLY expression
| expression DIVIDE expression
"""
if p[2] == '+':
p[0] = Add(left=p[1], right=p[3])
elif p[2] == '-':
p[0] = Sub(left=p[1], right=p[3])
elif p[2] == '*':
p[0] = Mul(left=p[1], right=p[3])
elif p[2] == '/':
p[0] = Div(left=p[1], right=p[3])
def p_expr_math_comparisons(self, p):
"""
expression : expression LT expression
| expression LTEQ expression
| expression EQ expression
"""
if p[2] == '<':
p[0] = LessThan(left=p[1], right=p[3])
elif p[2] == '<=':
p[0] = LessThanOrEqual(left=p[1], right=p[3])
elif p[2] == '=':
p[0] = Equal(left=p[1], right=p[3])
def p_expr_with_parenthesis(self, p):
"""
expression : LPAREN expression RPAREN
"""
p[0] = p[2]
def p_expr_if_conditional(self, p):
"""
expression : IF expression THEN expression ELSE expression FI
"""
p[0] = If(predicate = p[2], then_body = p[4], else_body = p[6])
def p_expr_while_loop(self, p):
"""
expression : WHILE expression LOOP expression POOL
"""
p[0] = WhileLoop(predicate = p[2], body = p[4])
def p_expr_let(self, p):
"""
expression : let_expression
"""
p[0] = p[1]
def p_expr_let_heads(self, p): #new 1
"""
let_expression_heads : let_expression_head_i COMMA let_expression_heads
| let_expression_head COMMA let_expression_heads
"""
p[0] = [p[1]] + p[3]
def p_expr_let_heads_end(self, p): #new 1
"""
let_expression_heads : let_expression_head_i
| let_expression_head
"""
p[0] = [p[1]]
def p_expr_let_head_i(self, p): #new 1
"""
let_expression_head_i : ID COLON TYPE ASSIGN expression
"""
p[0] = Let(obj_inst = p[1], return_type = p[3], init_expr = p[5], body = None)
def p_expr_let_head(self, p): #new 1
"""
let_expression_head : ID COLON TYPE
"""
p[0] = Let(obj_inst = p[1], return_type = p[3], init_expr = None, body = None)
def p_expr_let_simple(self, p): #updated 1
"""
let_expression : LET ID COLON TYPE COMMA let_expression_heads IN expression
| LET ID COLON TYPE IN expression
"""
if p[5] == ",":
p[0] = Let(obj_inst = p[2], return_type = p[4], init_expr = None, body = p[8], nested_lets = p[6])
else:
p[0] = Let(obj_inst = p[2], return_type = p[4], init_expr = None, body = p[6])
def p_expr_let_initialized(self, p): #updated
"""
let_expression : LET ID COLON TYPE ASSIGN expression COMMA let_expression_heads IN expression
| LET ID COLON TYPE ASSIGN expression IN expression
"""
if p[7] == ",":
p[0] = Let(obj_inst = p[2], return_type = p[4], init_expr = p[6], body = p[10], nested_lets = p[8])
else:
p[0] = Let(obj_inst = p[2], return_type = p[4], init_expr = p[6], body = p[8])
def p_expr_case(self, p):
"""
expression : CASE expression OF actions ESAC
"""
p[0] = Case(expr=p[2], actions=p[4])
def p_actions_list(self, p):
"""
actions : actions action
| action
"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
def p_action_expr(self, p):
"""
action : ID COLON TYPE ARROW expression SEMICOLON
"""
p[0] = Action(r_object = p[1],r_type = p[3], expr = p[5])
def p_expr_new(self, p):
"""
expression : NEW TYPE
"""
p[0] = New(new_object_type = p[2])
def p_expr_isvoid(self, p):
"""
expression : ISVOID expression
"""
p[0] = IsVoid(p[2])
def p_expr_integer_complement(self, p):
"""
expression : INT_COMP expression
"""
p[0] = IntegerComplement(p[2])
def p_expr_boolean_complement(self, p):
"""
expression : NOT expression
"""
p[0] = BooleanComplement(p[2])
def p_empty(self, p):
"""
empty :
"""
p[0] = None
def p_error(self, p):
"""
Error rule for Syntax Errors handling and reporting.
"""
if p is None:
print("Error! Unexpected end of input!")
else:
error = "Syntax error! Line: {}, position: {}, character: {}, type: {}".format(
p.lineno, p.lexpos, p.value, p.type)
self.error_list.append(error)
self.parser.errok()
def build(self, lexer = None):
"""
if no lexer is provided a new one will be created
"""
if not lexer:
self.lexer = create_lexer()
else:
self.lexer = lexer
self.tokens = self.lexer.tokens
self.parser = yacc.yacc(module = self)
def parse(self, program_source_code):
if self.parser is None:
raise ValueError("Parser was not build, try building it first with the build() method.")
return self.parser.parse(program_source_code)
|
import databases
import sqlalchemy
from fastapi import FastAPI
from ormar import Integer, Model, ModelMeta, String
from pytest import fixture
from fastapi_pagination import LimitOffsetPage, Page, add_pagination
from fastapi_pagination.ext.ormar import paginate
from ..base import BasePaginationTestCase
from ..utils import faker
@fixture(scope="session")
def db(database_url):
return databases.Database(database_url)
@fixture(scope="session")
def meta(database_url):
return sqlalchemy.MetaData()
@fixture(scope="session")
def User(meta, db):
class User(Model):
class Meta(ModelMeta):
database = db
metadata = meta
id = Integer(primary_key=True)
name = String(max_length=100)
return User
@fixture(
scope="session",
params=[True, False],
ids=["model", "query"],
)
def query(request, User):
if request.param:
return User
else:
return User.objects
@fixture(scope="session")
def app(db, meta, User, query, model_cls):
app = FastAPI()
app.add_event_handler("startup", db.connect)
app.add_event_handler("shutdown", db.disconnect)
@app.get("/default", response_model=Page[model_cls])
@app.get("/limit-offset", response_model=LimitOffsetPage[model_cls])
async def route():
return await paginate(query)
return add_pagination(app)
class TestOrmar(BasePaginationTestCase):
@fixture(scope="class")
async def entities(self, User, query, client):
await User.objects.bulk_create(User(name=faker.name()) for _ in range(100))
return await User.objects.all()
|
from tempfile import NamedTemporaryFile
import consts
import pytest
from assisted_service_client.rest import ApiException
from tests.base_test import BaseTest, random_name
class TestGeneral(BaseTest):
def test_create_cluster(self, api_client, cluster):
c = cluster()
assert c.id in map(lambda cluster: cluster['id'], api_client.clusters_list())
assert api_client.cluster_get(c.id)
assert api_client.get_events(c.id)
def test_delete_cluster(self, api_client, cluster):
c = cluster()
assert api_client.cluster_get(c.id)
api_client.delete_cluster(c.id)
assert c.id not in map(lambda cluster: cluster['id'], api_client.clusters_list())
with pytest.raises(ApiException):
assert api_client.cluster_get(c.id)
@pytest.mark.xfail
def test_cluster_unique_name(self, api_client, cluster):
cluster_name = random_name()
_ = cluster(cluster_name)
with pytest.raises(ApiException):
cluster(cluster_name)
def test_discovery(self, api_client, cluster, nodes):
cluster_id = cluster().id
self.generate_and_download_image(cluster_id=cluster_id, api_client=api_client)
nodes.start_all()
self.wait_until_hosts_are_discovered(cluster_id=cluster_id, api_client=api_client)
return cluster_id
def test_select_roles(self, api_client, cluster, nodes):
cluster_id = self.test_discovery(api_client, cluster, nodes)
self.set_host_roles(cluster_id=cluster_id, api_client=api_client)
hosts = api_client.get_cluster_hosts(cluster_id=cluster_id)
for host in hosts:
hostname = host["requested_hostname"]
role = host["role"]
if "master" in hostname:
assert role == consts.NodeRoles.MASTER
elif "worker" in hostname:
assert role == consts.NodeRoles.WORKER
|
def to_html(bibs):
return 'Hello'
|
import sys
def sol():
input = sys.stdin.readline
N = int(input())
k = int(input())
left = 1
right = k
ans = 0
while left <= right:
mid = (left + right) // 2
cnt = 0
for i in range(1, N + 1):
cnt += min(mid // i, N)
if cnt >= k:
right = mid - 1
ans = mid
else:
left = mid + 1
print(ans)
if __name__ == "__main__":
sol()
|
from django.contrib import admin
from .models import (AgentTemplate, SecurityPolicyTemplate, Service,
SecurityPolicy, Customer, Log,
Agent, Algorithm, AlgorithmTemplate)
# class SecurityPolicyInline(admin.TabularInline):
# model = SecurityPolicy
# extra = 3
#
# class ServiceAdmin(admin.ModelAdmin):
# inlines = [SecurityPolicyInline]
# class SecurityPolicyAdmin(admin.ModelAdmin):
# list_display = ('policy_id', 'policy_sla', 'policy_name',
# 'policy_description', 'last_modified')
admin.site.register(Service)
admin.site.register(SecurityPolicy)
admin.site.register(SecurityPolicyTemplate)
admin.site.register(Customer)
admin.site.register(Log)
admin.site.register(Agent)
admin.site.register(AgentTemplate)
admin.site.register(Algorithm)
admin.site.register(AlgorithmTemplate)
|
import random
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib import messages
from .models import CaseStudy
# Create your views here.
def index_view(request):
""" Render index page with case study as success story """
template_name = "main.html"
obj = random.choice(CaseStudy.objects.all())
context = {
"object": obj
}
return render(request, template_name, context)
def about_view(request):
""" Render About Us page with a list of staff members """
template_name = "about.html"
object_list = User.objects.filter(is_staff=True)
context = {
"object_list": object_list
}
return render(request, template_name, context)
def detail_view(request, user):
""" Render profile page for staff member.
If no staff member by that username, return user to About page,
show user an error message "no staff member with this username". """
template_name = "staffmember.html"
try:
obj = User.objects.get(username=user)
print(obj)
if obj.is_staff:
context = {
"obj": obj
}
else:
messages.error(
request, 'No staff member with the username <em>' + user + '</em>.')
return redirect("about:about_list")
except User.DoesNotExist:
messages.error(
request, 'No staff member with the username <em>' + user + '</em>.')
return redirect("about:about_list")
return render(request, template_name, context)
def casestudy_list_view(request):
""" Render a list of case studies """
template_name = "casestudies.html"
queryset = CaseStudy.objects.all()
context = {
"queryset": queryset
}
return render(request, template_name, context)
|
import os
import unittest
import numpy as np
from numpy.testing import assert_allclose
from gnes.encoder.base import PipelineEncoder
from gnes.encoder.numeric.pca import PCALocalEncoder
from gnes.encoder.numeric.pq import PQEncoder
from gnes.encoder.numeric.tf_pq import TFPQEncoder
class TestPCA(unittest.TestCase):
def setUp(self):
self.test_vecs = np.random.random([1000, 100]).astype('float32')
dirname = os.path.dirname(__file__)
self.lopq_yaml_np = os.path.join(dirname, 'yaml', 'lopq-encoder-2-np.yml')
self.lopq_yaml_tf = os.path.join(dirname, 'yaml', 'lopq-encoder-2-tf.yml')
self.lopq_yaml_np2 = os.path.join(dirname, 'yaml', 'lopq-encoder-3.yml')
def test_pq_assert(self):
self._test_pq_assert(PQEncoder)
self._test_pq_assert(TFPQEncoder)
def test_pq_tfpq_identity(self):
def _test_pq_tfpq_identity(pq1, pq2):
pq1.train(self.test_vecs)
out1 = pq1.encode(self.test_vecs)
pq2._copy_from(pq1)
out2 = pq2.encode(self.test_vecs)
assert_allclose(out1, out2)
_test_pq_tfpq_identity(PQEncoder(10), TFPQEncoder(10))
_test_pq_tfpq_identity(TFPQEncoder(10), PQEncoder(10))
def _test_pq_assert(self, cls):
self.assertRaises(AssertionError, cls, 100, 0)
self.assertRaises(AssertionError, cls, 100, 256)
pq = cls(8)
self.assertRaises(AssertionError, pq.train, self.test_vecs)
pq = cls(101)
self.assertRaises(AssertionError, pq.train, self.test_vecs)
def _simple_assert(self, out, num_bytes, num_clusters):
self.assertEqual(bytes, type(out))
self.assertEqual(self.test_vecs.shape[0] * num_bytes, len(out))
self.assertTrue(np.all(np.frombuffer(out, np.uint8) <= num_clusters))
def test_assert_pca(self):
self.assertRaises(AssertionError, PCALocalEncoder, 8, 3)
self.assertRaises(AssertionError, PCALocalEncoder, 2, 3)
pca = PCALocalEncoder(100, 2)
self.assertRaises(AssertionError, pca.train, self.test_vecs)
pca = PCALocalEncoder(8, 2)
self.assertRaises(AssertionError, pca.train, self.test_vecs[:7])
pca.train(self.test_vecs)
out = pca.encode(self.test_vecs)
self.assertEqual(out.shape[1], 8)
self.assertEqual(out.shape[0], self.test_vecs.shape[0])
def test_train_pca(self):
num_bytes = 8
num_clusters = 11
lopq = PipelineEncoder.load_yaml(self.lopq_yaml_np2)
lopq.train(self.test_vecs)
out = lopq.encode(self.test_vecs)
self._simple_assert(out, num_bytes, num_clusters)
# def test_train_pca_assert(self):
# # from PCA
# self.assertRaises(AssertionError, LOPQEncoder, num_bytes=100, pca_output_dim=20)
# # from PCA
# self.assertRaises(AssertionError, LOPQEncoder, num_bytes=7, pca_output_dim=20)
# # from LOPQ, cluster too large
# self.assertRaises(AssertionError, LOPQEncoder, num_bytes=4, pca_output_dim=20, cluster_per_byte=256)
def test_encode_backend(self):
num_bytes = 8
lopq = PipelineEncoder.load_yaml(self.lopq_yaml_tf)
lopq.train(self.test_vecs)
out = lopq.encode(self.test_vecs)
self._simple_assert(out, num_bytes, 255)
lopq2 = PipelineEncoder.load_yaml(self.lopq_yaml_np)
lopq2.train(self.test_vecs)
out = lopq2.encode(self.test_vecs)
self._simple_assert(out, num_bytes, 255)
# copy from lopq
lopq2._copy_from(lopq)
out2 = lopq2.encode(self.test_vecs)
self._simple_assert(out, num_bytes, 255)
self.assertEqual(out, out2)
def test_encode_batching(self):
num_bytes = 8
lopq = PipelineEncoder.load_yaml(self.lopq_yaml_tf)
lopq.train(self.test_vecs)
out = lopq.encode(self.test_vecs, batch_size=32)
self._simple_assert(out, num_bytes, 255)
out2 = lopq.encode(self.test_vecs, batch_size=64)
self.assertEqual(out, out2)
# def test_num_cluster(self):
# def _test_num_cluster(num_bytes, num_cluster, backend):
# lopq = LOPQEncoder(num_bytes,
# cluster_per_byte=num_cluster,
# pca_output_dim=20, pq_backend=backend)
# lopq.train(self.test_vecs)
# out = lopq.encode(self.test_vecs)
# self._simple_assert(out, num_bytes, num_cluster)
#
# _test_num_cluster(10, 3, 'numpy')
# _test_num_cluster(10, 3, 'tensorflow')
# _test_num_cluster(10, 5, 'numpy')
# _test_num_cluster(10, 5, 'tensorflow')
|
segment_to_number = {
"abcefg": "0",
"cf": "1",
"acdeg": "2",
"acdfg": "3",
"bcdf": "4",
"abdfg": "5",
"abdefg": "6",
"acf": "7",
"abcdefg": "8",
"abcdfg": "9",
}
def decode(key, code):
one = tuple(k for k in key if len(k) == 2)[0]
four = tuple(k for k in key if len(k) == 4)[0]
seven = tuple(k for k in key if len(k) == 3)[0]
eight = tuple(k for k in key if len(k) == 7)[0]
three = tuple(k for k in key if len(k) == 5 and all(s in k for s in one))[0]
six = tuple(k for k in key if len(k) == 6 and sum(s in k for s in one) == 1)[0]
code_to_number = {}
code_to_number["a"] = (set(seven) - set(one)).pop()
code_to_number["b"] = (set(four) - set(three)).pop()
code_to_number["d"] = (set(four) - set(one) - set(code_to_number["b"])).pop()
code_to_number["c"] = (set(four) - set(six)).pop()
code_to_number["f"] = (set(four) - set(code_to_number.values())).pop()
code_to_number["g"] = (set(three) - set(code_to_number.values())).pop()
code_to_number["e"] = (set(eight) - set(code_to_number.values())).pop()
decode_segment = {v: k for k, v in code_to_number.items()}
return int("".join(segment_to_number["".join(sorted(decode_segment[x] for x in c))] for c in code))
input = tuple(tuple(segments.split() for segments in line.split(" | ")) for line in open("input").read().splitlines())
print(f"Answer part one: {sum(1 for _, code in input for segment in code if len(segment) in (2,3,4,7))}")
print(f"Answer part two: {sum(decode(*line) for line in input)}")
|
import time
def prune(args, model, sess, dataset):
print('|========= START PRUNING =========|')
t_start = time.time()
batch = dataset.get_next_batch('train', args.batch_size)
feed_dict = {}
feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})
feed_dict.update({model.compress: True, model.is_train: False, model.pruned: False})
result = sess.run([model.outputs, model.sparsity], feed_dict)
print('Pruning: {:.3f} global sparsity (t:{:.1f})'.format(result[-1], time.time() - t_start))
|
from typing import Dict
from requests.models import Response
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import BackendApplicationClient
from previsionio.utils import NpEncoder
import json
import time
import requests
from . import logger
from . import config
from .utils import handle_error_response, parse_json, PrevisionException
PREVISION_TOKEN_URL = 'https://accounts.prevision.io/auth/realms/prevision.io/protocol/openid-connect/token'
class DeployedModel(object):
"""
DeployedModel class to interact with a deployed model.
Args:
prevision_app_url (str): URL of the App. Can be retrieved on your app dashbord.
client_id (str): Your app client id. Can be retrieved on your app dashbord.
client_secret (str): Your app client secret. Can be retrieved on your app dashbord.
prevision_token_url (str): URL of get token. Should be
https://accounts.prevision.io/auth/realms/prevision.io/protocol/openid-connect/token
if you're in the cloud, or a custom IP address if installed on-premise.
"""
def __init__(self, prevision_app_url: str, client_id: str, client_secret: str, prevision_token_url: str = None):
"""Init DeployedModel (and check that the connection is valid)."""
self.prevision_app_url = prevision_app_url
self.client_id = client_id
self.client_secret = client_secret
if prevision_token_url:
self.prevision_token_url = prevision_token_url
else:
self.prevision_token_url = PREVISION_TOKEN_URL
self.problem_type = None
self.token = None
self.url = None
self.access_token = None
try:
about_resp = self.request('/about', method=requests.get)
app_info = parse_json(about_resp)
self.problem_type = app_info['problem_type']
inputs_resp = self.request('/inputs', method=requests.get)
self.inputs = parse_json(inputs_resp)
outputs_resp = self.request('/outputs', method=requests.get)
self.outputs = parse_json(outputs_resp)
except Exception as e:
logger.error(e)
raise PrevisionException('Cannot connect: {}'.format(e))
def _generate_token(self):
client = BackendApplicationClient(client_id=self.client_id)
oauth = OAuth2Session(client=client)
token = oauth.fetch_token(token_url=self.prevision_token_url,
client_id=self.client_id,
client_secret=self.client_secret)
self.token = token
return token
def _get_token(self):
while self.token is None or time.time() > self.token['expires_at'] - 60:
try:
self._generate_token()
except Exception as e:
logger.warning(f'failed to generate token with error {e.__repr__()}')
def check_types(self, features):
for feature, value in features:
pass
def _check_token_url_app(self):
if not self.prevision_app_url:
raise PrevisionException('No url configured. Call client_app.init_client() to initialize')
if not self.client_id:
raise PrevisionException('No client id configured. Call client_app.init_client() to initialize')
if not self.client_secret:
raise PrevisionException('No client secret configured. Call client_app.init_client() to initialize')
def request(self, endpoint, method, files=None, data=None, allow_redirects=True, content_type=None,
check_response=True, message_prefix=None, **requests_kwargs):
"""
Make a request on the desired endpoint with the specified method & data.
Requires initialization.
Args:
endpoint: (str): api endpoint (e.g. /experiments, /prediction/file)
method (requests.{get,post,delete}): requests method
files (dict): files dict
data (dict): for single predict
content_type (str): force request content-type
allow_redirects (bool): passed to requests method
Returns:
request response
Raises:
Exception: Error if url/token not configured
"""
self._check_token_url_app()
url = self.prevision_app_url + endpoint
status_code = 502
retries = config.request_retries
n_tries = 0
resp = None
while (n_tries < retries) and (status_code in config.retry_codes):
n_tries += 1
try:
self._get_token()
assert self.token is not None
headers = {
"Authorization": "Bearer " + self.token['access_token'],
}
if content_type:
headers['content-type'] = content_type
resp = method(url,
headers=headers,
files=files,
allow_redirects=allow_redirects,
data=data,
**requests_kwargs)
status_code = resp.status_code
except Exception as e:
raise PrevisionException(f'Error requesting: {url} with error {e.__repr__()}')
if status_code in config.retry_codes:
logger.warning(f'Failed to request {url} with status code {status_code}.'
f' Retrying {retries - n_tries} times')
time.sleep(config.request_retry_time)
assert isinstance(resp, Response)
if check_response:
handle_error_response(resp, url, data, message_prefix=message_prefix, n_tries=n_tries)
return resp
def predict(self, predict_data: Dict, use_confidence: bool = False, explain: bool = False):
""" Get a prediction on a single instance using the best model of the experiment.
Args:
predict_data (dictionary): input data for prediction
confidence (bool, optional): Whether to predict with confidence values
(default: ``False``)
explain (bool): Whether to explain prediction (default: ``False``)
Returns:
tuple(float, float, dict): Tuple containing the prediction value, confidence and explain.
In case of regression problem type, confidence format is a list.
In case of multiclassification problem type, prediction value format is a string.
"""
# FIXME add some checks for feature name with input api
features = [{'name': feature, 'value': value}
for feature, value in predict_data.items()]
predict_url = '/predict'
if explain or use_confidence:
predict_url += '?'
if explain:
predict_url += 'explain=true&'
if use_confidence:
predict_url += 'confidence=true'
predict_url = predict_url.rstrip('&')
resp = self.request(predict_url,
data=json.dumps(features, cls=NpEncoder),
method=requests.post,
message_prefix='Deployed model predict')
pred_response = resp.json()
target_name = self.outputs[0]['keyName']
preds = pred_response['response']['predictions']
prediction = preds[target_name]
if use_confidence:
if self.problem_type == 'regression':
confidance_resp = [{key: value} for key, value in preds.items() if 'TARGET_quantile=' in key]
elif 'confidence' in preds:
confidance_resp = preds['confidence']
else:
confidance_resp = None
else:
confidance_resp = None
if explain and 'explanation' in preds:
explain_resp = preds['explanation']
else:
explain_resp = None
return prediction, confidance_resp, explain_resp
|
import warnings
from scraps.fitsS21 import hanger_resonator
warnings.warn(
DeprecationWarning(
"This module has been deprecated in favor of scraps.fitsS21.hanger_resonator"
)
)
def cmplxIQ_fit(paramsVec, res, residual=True, **kwargs):
"""Return complex S21 resonance model or, if data is specified, a residual.
This function is deprecated and will be removed in a future version. Use hanger_resonator.hanger_fit.
Parameters
----------
params : list-like
A an ``lmfit.Parameters`` object containing (df, f0, qc, qi, gain0, gain1, gain2, pgain0, pgain1, pgain2)
res : scraps.Resonator object
A Resonator object.
residual : bool
Whether to return a residual (True) or to return the model calcuated at the frequencies present in res (False).
Keyword Arguments
-----------------
freqs : list-like
A list of frequency points at which to calculate the model. Only used if `residual=False`
remove_baseline : bool
Whether or not to remove the baseline during calculation (i.e. ignore pgain and gain polynomials). Default is False.
only_baseline: bool
Whether or not to calculate and return only the baseline. Default is False.
Returns
-------
model or (model-data)/eps : ``numpy.array``
If residual=True is specified, the return is the residuals weighted by the uncertainties. If residual=False, the return is the model
values calculated at the frequency points. The returned array is in the form
``I + Q`` or ``residualI + residualQ``.
"""
warnings.warn(
DeprecationWarning(
"This function has been renamed hanger_resonator.hanger_fit. cmplxIQ_fit will be removed in a future version"
)
)
return hanger_resonator.hanger_fit(paramsVec, res, residual, **kwargs)
def cmplxIQ_params(res, **kwargs):
"""Initialize fitting parameters used by the cmplxIQ_fit function.
Parameters
----------
res : ``scraps.Resonator`` object
The object you want to calculate parameter guesses for.
Keyword Arguments
-----------------
fit_quadratic_phase : bool
This determines whether the phase baseline is fit by a line or a
quadratic function. Default is False for fitting only a line.
hardware : string {'VNA', 'mixer'}
This determines whether or not the Ioffset and Qoffset parameters are
allowed to vary by default.
use_filter : bool
Whether or not to use a smoothing filter on the data before calculating
parameter guesses. This is especially useful for very noisy data where
the noise spikes might be lower than the resonance minimum.
filter_win_length : int
The length of the window used in the Savitsky-Golay filter that smoothes
the data when ``use_filter == True``. Default is ``0.1 * len(data)`` or
3, whichever is larger.
Returns
-------
params : ``lmfit.Parameters`` object
"""
warnings.warn(
DeprecationWarning(
"This function has been renamed hanger_resonator.hanger_params. cmplxIQ_fit will be removed in a future version"
)
)
return hanger_resonator.hanger_params(res, **kwargs)
|
import time
import asyncio
joined = 0
messages = 0
async def update_stats():
await client.wait_until_ready()
global messages, joined
client.loop.create_task(update_stats())
@client.event
async def on_message(message):
global messages # ADD TO TOP OF THIS FUNCTION
messages += 1 # ADD TO TOP OF THIS FUNCTION
...
@client.event
async def on_member_join(member):
global joined # ADD TO TOP OF THIS FUNCTION
joined += 1 # ADD TO TOP OF THIS FUNCTION
|
import os
import pytest
from brownie import *
from integration_tests.utils import *
from src.rebaser import Rebaser
from src.utils import get_healthy_node
os.environ["DISCORD_WEBHOOK_URL"] = os.getenv("TEST_DISCORD_WEBHOOK_URL")
os.environ["ETH_USD_CHAINLINK"] = "0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419"
os.environ["DIGG_TOKEN_ADDRESS"] = "0x798D1bE841a82a273720CE31c822C61a67a601C3"
os.environ["DIGG_ORCHESTRATOR_ADDRESS"] = "0xbd5d9451e004fc495f105ceab40d6c955e4192ba"
os.environ["DIGG_POLICY_ADDRESS"] = "0x327a78D13eA74145cc0C63E6133D516ad3E974c3"
os.environ["UNIV2_DIGG_WBTC_ADDRESS"] = "0xe86204c4eddd2f70ee00ead6805f917671f56c52"
os.environ["SUSHI_DIGG_WBTC_ADDRESS"] = "0x9a13867048e01c663ce8ce2fe0cdae69ff9f35e3"
os.environ["GAS_LIMIT"] = "1000000"
@pytest.mark.require_network("mainnet-fork")
def test_correct_network():
pass
@pytest.fixture
def rebaser() -> Rebaser:
return Rebaser(
keeper_address=test_address,
keeper_key=test_key,
web3=get_healthy_node(Network.Ethereum),
)
def test_rebase(rebaser):
"""
Check if the contract should be harvestable, then call the harvest function
If the strategy should be harvested then claimable rewards should be positive before
and 0 after. If not then claimable rewards should be the same before and after
calling harvest
"""
accounts[0].transfer(test_address, "1 ether")
assert rebaser.rebase() == {}
def test_send_rebase_tx(rebaser):
accounts[0].transfer(test_address, "10 ether")
# TODO: mock send discord functions
rebaser._Rebaser__process_rebase() == {}
|
import httplib, sys
import myparser
class search_google_labs:
def __init__(self,list):
self.results=""
self.totalresults=""
self.server="labs.google.com"
self.hostname="labs.google.com"
self.userAgent="(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
id=0
self.set=""
for x in list:
id+=1
if id==1:
self.set=self.set+"q"+str(id)+"="+str(x)
else:
self.set=self.set+"&q"+str(id)+"="+str(x)
def do_search(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', "/sets?hl=en&"+self.set)
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
self.totalresults+= self.results
def get_set(self):
rawres=myparser.parser(self.totalresults,list)
return rawres.set()
def process(self):
self.do_search()
|
default_app_config = 'task.apps.TaskConfig'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.