content
stringlengths 5
1.05M
|
|---|
###############################################################################
# Copyright 2018 The AnPyLar Team. All Rights Reserved.
# Use of this source code is governed by an MIT-style license that
# can be found in the LICENSE file at http://anpylar.com/mit-license
###############################################################################
from .observable_base import ObservableOperator, _MetaObservableOperator
from .promise import Promise
__all__ = []
class _MetaToPromise(_MetaObservableOperator):
def __call__(cls, parent, *args, **kwargs):
self = super().__call__(parent, *args, **kwargs) # create
self._promise = Promise()
self._parent._subscribe(self, self._get_sid())
return self._promise
class To_Promise_Operator(ObservableOperator, metaclass=_MetaToPromise):
def on_next(self, val, sid):
self._promise._resolve(val)
def on_error(self, error, sid):
self._promise._reject(error)
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 Hervé BREDIN
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Clustering(dict):
"""Clustering results
# create a new clustering result from scratch
>>> c = Clustering()
>>> c[1] = 0 # image #1 is in cluster #0
>>> c[2] = 0 # image #2 is in cluster #0
>>> c[3] = 1 # image #3 is in cluster #1
>>> c[4] = 1 # image #4 is in cluster #1
>>> c[5] = 2 # image #5 is in cluster #2
>>> c[6] = 2 # image #6 is in cluster #2
# load a clustering result from text file
# with one line per image: image_id cluster_id
>>> path = '/vol/corpora4/mediaeval/2014/evaluation/SED_2014_Dev_Task1_Test_Submission.txt'
>>> c = Clustering.load(path)
# save a clustering result to text file
>>> c.save('/tmp/result.txt')
# c can be seen as a standard 'dict'
# but it actually is a bi-directional 'dict'
>>> c
{1: 0, 2: 0, 3: 1, 4: 1, 5: 2, 6: 2}
>>> c[5]
2
>>> c.clusters[1]
[3, 4]
>>> c.clusters[c[6]] # get all images in the same cluster as image #6
[5, 6]
"""
def __init__(self, *args, **kwargs):
super(Clustering, self).__init__(*args, **kwargs)
self.clusters = {}
for key, value in self.iteritems():
self.clusters.setdefault(value, []).append(key)
def __setitem__(self, key, value):
if key in self:
self.__delitem__(key)
super(Clustering, self).__setitem__(key, value)
self.clusters.setdefault(value, []).append(key)
def __delitem__(self, key):
self.clusters.setdefault(self[key], []).remove(key)
if self[key] in self.clusters and not self.clusters[self[key]]:
del self.clusters[self[key]]
super(Clustering, self).__delitem__(key)
@classmethod
def load(cls, path):
clustering = cls()
with open(path, 'r') as f:
for line in f:
key, value = line.strip().split()
clustering[int(key)] = int(value)
return clustering
def save(self, path):
with open(path, 'w') as f:
for key, value in self.iteritems():
f.write('{key:d} {value:d}\n'.format(key=key, value=value))
def to_list(self, items):
return [self[item] for item in items]
|
from panda3d.bullet import BulletWorld
from panda3d.core import BitMask32
from panda3d.core import ClockObject
from panda3d.core import CollisionHandlerEvent
from panda3d.core import CollisionTraverser
from panda3d.core import Vec3
from panda3d.physics import PhysicsCollisionHandler
from pandac.PandaModules import loadPrcFileData
from src.graphics import toggleSmileyFrowney
from src.logconfig import newLogger
from src.world_config import GRAVITY_ACCEL
log = newLogger(__name__)
# Each object belongs to zero or more of these groups. The matrix of which
# groups collide with which other groups is defined in initCollisionGroups.
COLLIDE_BIT_GROUND_PLANE = 0
COLLIDE_BIT_SCENERY = 1 # Floors, walls
COLLIDE_BIT_PLAYER = 2
COLLIDE_BIT_ENTITY = 3 # Misc entities flying around
COLLIDE_BIT_BULLET = 4
# Bullets are split off from other entities because they don't collide with
# the player. Really this group is for "things that otherwise collide as
# entities, except that they don't collide with the player". But I couldn't
# think of a short name for that.
COLLIDE_MASK_NONE = BitMask32(0x0)
COLLIDE_MASK_GROUND_PLANE = BitMask32.bit(COLLIDE_BIT_GROUND_PLANE)
COLLIDE_MASK_SCENERY = BitMask32.bit(COLLIDE_BIT_SCENERY )
COLLIDE_MASK_PLAYER = BitMask32.bit(COLLIDE_BIT_PLAYER )
COLLIDE_MASK_ENTITY = BitMask32.bit(COLLIDE_BIT_ENTITY )
COLLIDE_MASK_BULLET = BitMask32.bit(COLLIDE_BIT_BULLET )
# Not used yet, but still define it preemptively because we'll probably want
# it.
app = None
world = None
physicsCollisionHandler = None
eventCollisionHandler = None
def initPhysics(app_):
global app
app = app_
# Allow creating a matrix of Booleans to specify which collision groups
# collide with which other collision groups.
loadPrcFileData("", "bullet-filter-algorithm groups-mask")
global world
world = BulletWorld()
world.setGravity(Vec3(0, 0, -GRAVITY_ACCEL))
app.taskMgr.add(doPhysicsOneFrame, "doPhysics")
initCollisionGroups()
initCollisionHandling()
def doPhysicsOneFrame(task):
# TODO: This next line doesn't lint, but maybe it would be more efficient
# to cache the globalClock somehow instead of calling getGlobalClock()
# every frame? I suppose we could just suppress the pylint warning.
# dt = globalClock.getDt()
dt = ClockObject.getGlobalClock().getDt()
# TODO[#3] This seems excessive but until we fix recoil lets leave this
# here for debugging purposes
# 90 substeps, at 1/600 frames per second for physics updates.
world.doPhysics(dt, 90, 1.0/600.0)
return task.cont
def initCollisionGroups():
"""
Setup the rules for which collision groups can collide with which other
collision groups.
"""
# Note: this matrix is required to be symmetric across the main diagonal: X
# can collide with Y if and only if Y can collide with X. Therefore, we
# only specify one half of the matrix.
#
# ground
# plane scenery player entity bullet
# ground plane 1 0 1 1 1
# scenery 0 1 1 1
# player 0 1 0
# entity 1 1
# bullet 1
setGroupCollisionFlags(COLLIDE_BIT_GROUND_PLANE,
[(COLLIDE_BIT_GROUND_PLANE, 1)])
setGroupCollisionFlags(COLLIDE_BIT_SCENERY,
[(COLLIDE_BIT_GROUND_PLANE, 0),
(COLLIDE_BIT_SCENERY, 0)])
setGroupCollisionFlags(COLLIDE_BIT_PLAYER,
[(COLLIDE_BIT_GROUND_PLANE, 1),
(COLLIDE_BIT_SCENERY, 1),
(COLLIDE_BIT_PLAYER, 0)])
setGroupCollisionFlags(COLLIDE_BIT_ENTITY,
[(COLLIDE_BIT_GROUND_PLANE, 1),
(COLLIDE_BIT_SCENERY, 1),
(COLLIDE_BIT_PLAYER, 1),
(COLLIDE_BIT_ENTITY, 1)])
# TODO: Maybe bullets shouldn't be able to collide with other bullets?
# Currently if you create too many, the framerate suffers badly.
setGroupCollisionFlags(COLLIDE_BIT_BULLET,
[(COLLIDE_BIT_GROUND_PLANE, 1),
(COLLIDE_BIT_SCENERY, 1),
(COLLIDE_BIT_PLAYER, 0),
(COLLIDE_BIT_ENTITY, 1),
(COLLIDE_BIT_BULLET, 1)])
def setGroupCollisionFlags(bit1, otherBitSpec):
for bit2, canCollide in otherBitSpec:
world.setGroupCollisionFlag(bit1, bit2, bool(canCollide))
def initCollisionHandling():
"""
Initialize the collision handlers. This must be run before any objects are
created.
"""
global physicsCollisionHandler
global eventCollisionHandler
# Note: app already has a cTrav before this line, but its set to the value
# 0. So we are not defining a new member outside of __init__; we're just
# overwriting an existing one.
app.cTrav = CollisionTraverser()
# Handle fast objects
app.cTrav.setRespectPrevTransform(True)
# TODO[#2]: Have physics.py expose a function to add colliders
# Used to handle collisions between physics-affected objects.
physicsCollisionHandler = PhysicsCollisionHandler()
# Used to run custom code on collisions.
# TODO[bullet]: This isn't used anymore. Reimplement custom collision
# detection so we can toggle between smiley and frowney when that object is
# shot.
eventCollisionHandler = CollisionHandlerEvent()
eventCollisionHandler.addInPattern("%fn-into-%in")
eventCollisionHandler.addOutPattern("%fn-out-%in")
# TODO[#2]: These don't belong here... where do they belong? initWorld?
app.accept("BulletColliderEvt-into-SmileyCollide", onCollideEventIn)
app.accept("BulletColliderEvt-out-SmileyCollide", onCollideEventOut)
def onCollideEventIn(entry):
log.debug("Collision detected IN.")
# There, pylint, I used the parameter. Happy?
log.debug(" %s", entry)
# Get rid of the bullet.
bullet = entry.getFromNode().getParent(0)
bullet.getParent(0).removeChild(bullet)
toggleSmileyFrowney()
def onCollideEventOut(entry):
# Note: I'm not sure we actually care about handling the "out" events.
log.debug("Collision detected OUT.")
log.debug(" %s", entry)
# TODO[bullet]: Provide a way to get/set the player velocity?
def addBulletColliders(bulletColliderPhys, bulletColliderEvt, physicsNP):
# Handle collisions through physics via bulletColliderPhys.
physicsCollisionHandler.addCollider(bulletColliderPhys, physicsNP)
app.cTrav.addCollider(bulletColliderPhys, physicsCollisionHandler)
# Handle collisions in custom manner via bulletColliderEvt.
app.cTrav.addCollider(bulletColliderEvt, eventCollisionHandler)
|
#!/bin/python
import sys
def print30(*args, **kargs):
sep = kargs.get('sep', ' ')
end = kargs.get('end','\n')
file = kargs.get('file',sys.stdout)
output = ''
first = True
for arg in args:
output += ('' if first else sep) + str(arg)
first = False
file.write(output + end)
|
"""by the given image of the billiard balls need to count them and approximately find their radius. Then compute the variance for the found radiuses."""
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
def count_balls(img, method="erode"):
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (7,7))
if method=="erode":
morphed_img = cv.erode(img, kernel)
else:
assert method == "opening"
morphed_img = cv.morphologyEx(img, cv.MORPH_OPEN, kernel, iterations=2)
cv.imshow('After erosing', cv.resize(morphed_img, (720, 560)))
contours, hierarchy = cv.findContours(morphed_img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
count_balls = 0
area_store = []
non_noisy_cntrs = []
for i, c in enumerate(contours):
# compute cntour area
area = cv.contourArea(c)
# get rid of noise
if area > 3000:
non_noisy_cntrs.append(c)
# if the area more than the empiric threshold
# that means the balls weren't separated.
if area > 15000:
count_balls += 2
area_store.append(area / 2)
else:
count_balls += 1
area_store.append(area)
rads = []
for ar in area_store:
# compure radius (pi*r**2 = S)
rads.append(np.sqrt(ar/np.pi))
return count_balls, rads, non_noisy_cntrs
def main():
init_img = cv.imread("..\image_example\whiteballssample.jpg")
gray_scale_img = cv.cvtColor(init_img, cv.COLOR_BGR2GRAY)
img1 = cv.equalizeHist(gray_scale_img)
ret, img2 = cv.threshold(img1,240,255, cv.THRESH_BINARY)
blur = cv.GaussianBlur(gray_scale_img,(5,5),0)
ret3,th3 = cv.threshold(gray_scale_img,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
th2 = cv.adaptiveThreshold(gray_scale_img,255,cv.ADAPTIVE_THRESH_MEAN_C,\
cv.THRESH_BINARY,33,2)
titles = ['Original Image', 'Binary Thresholding (trsh = 240)',
'Adaptive Mean Thresholding', 'Otsu']
images = [gray_scale_img, img2, th2, th3]
for i in range(4):
plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
for img, name in zip([th3, img2], ['Otsu', 'Binary']):
num_balls, rads, non_noisy_cntrs = count_balls(img, method="erode")
print("ball count on the image: {}, method: {}".format(num_balls, name))
print("average radius of the balls: {}, method: {}".format(np.mean(rads), name))
print("variance of the radius: {}, method: {}\n".format(np.var(rads), name))
contoured_image = gray_scale_img.copy()
cv.drawContours(contoured_image, non_noisy_cntrs, -1, (0,255,0), 3)
cv.imshow('contours drawing, method: {}'.format(name), cv.resize(contoured_image, (720, 560)))
cv.waitKey(0)
cv.destroyAllWindows()
if __name__ == "__main__":
main()
|
import pytest
from django.core.exceptions import ValidationError
from test_plus import TestCase
from maceoutliner.users.validators import validate_usernames_icase
class UserNameValidatorTest(TestCase):
"""
Tests for custom username validators.
"""
def setUp(self):
self.user1 = self.make_user("username_121")
self.data = {
"email": "somemonkey@gmail.com",
"username": "USeRNAME_121",
"password1": "ohsos67894783278932 sdfhasdfauifh&*(&)",
"password2": "ohsos67894783278932 sdfhasdfauifh&*(&)",
}
self.url_kwargs = {"data": self.data}
self.view_string = "account_signup"
def test_icase_username_search(self):
"""
Try to create another user with the same username in a different case.
"""
with pytest.raises(ValidationError):
validate_usernames_icase("USeRNAME_121")
def test_allauth_incorporates_validator(self):
"""
Ensure that submitting to the allauth view takes the new validator into account.
"""
self.post(self.view_string, **self.url_kwargs)
self.response_200()
form = self.get_context("form")
print(form.errors)
assert len(form.errors) == 1
assert (
form.errors["username"][0]
== "This username is already taken. Please select another."
)
|
import os.path
import gi
gi.require_versions({
'Gtk': '3.0',
# 'Pywebkit': '0.1'
})
from gi.repository import Gtk, Gdk, GObject, GLib#, Pywebkit
#from gi.repository.Pywebkit import Webview
from pymtk.WebView import WebView2
from pymtk.future import synced
from pymtk.ui import UI,DirectoryTree,radio_group
from pymtk.git import Git, GitFile
from pymtk import webkit2
from pymtk.webkit2 import idle_add
import pymtk
# set base path for local resource files
UI.set_directory(__file__)
WebView2.dir = os.path.dirname(os.path.realpath(__file__))
class Controller(object):
def __init__(self,*args):
self.last_action = self.onViewStatus
#create UI
self.ui = UI("diff.ui.xml", win="mainWindow")
# tree view
self.ui.tree.add_root( GitFile(os.getcwd()) )
# web view
self.ui.web.load_local_uri("diff.html")
# status bar
self.ui.status_bar( os.getcwd() )
# bind event handlers
self.ui.bind(self)
pymtk.webkit2.bind(self.ui.web,self)
# IPC channel
self.JavaScript = pymtk.webkit2.JavaScript(self.ui.web)
self.ui.show()
def selected_file(self):
f = self.ui.tree.get_selected_file().file_name
f = f if not f is None else self.ui.tree.root.file_name
f = f if not f is None else os.getcwd()
return f
def doGit(self, cmd, file=None, param=None, action=None, refresh=False, *args,**kargs):
if file is None:
file = self.selected_file()
git = Git(file)
params = () if param is None else (param,)
try:
c = cmd(git,*params)
except BaseException as e:
print(e)
if not action is None:
self.last_action = action
if refresh == True:
self.onViewRefresh()
return c
def doGitPlainText(self, cmd, file=None, param=None,action=None, refresh=False, *args,**kargs):
c = self.doGit(cmd,file,param,action,refresh,*args,**kargs)
self.JavaScript.setPlainText( *c )
def onDocumentLoad(self,*args):
self.doGitPlainText( Git.status, file = os.getcwd(), action=self.last_action )
def onFileOpen(self,*args):
dir = self.ui.showFileDialog(Gtk.FileChooserAction.SELECT_FOLDER,"Please choose a folder")
if not dir is None:
self.ui.status_bar( dir )
self.ui.tree.clear()
self.ui.tree.add_root( GitFile(dir) )
self.doGitPlainText( Git.status, file=dir )
def onExit(self,*args):
Gtk.main_quit()
def onGitAdd(self,*args):
self.doGitPlainText( Git.add, refresh=True)
def onGitRestore(self,*args):
self.doGitPlainText( Git.restore, refresh=True )
def onGitRestoreStaged(self,*args):
self.doGitPlainText( Git.restore_staged, refresh=True )
def onGitRestoreOrigin(self,*args):
self.doGitPlainText( Git.restore_origin, refresh=True )
def onGitPull(self,*args):
self.doGitPlainText( Git.pull, refresh=True )
def onGitPush(self,*args):
self.doGitPlainText( Git.push, refresh=True )
def onGitShowBranches(self,*args):
c = self.doGit( Git.branches )
self.JavaScript.setBranches(c["current"], c["branches"])
def onCreateBranch(self,branch):
self.doGitPlainText( Git.create_branch, param=branch )
def onDeleteBranch(self,branch):
b = self.doGit( Git.default_branch ).strip()
if branch == b:
self.JavaScript.setPlainText( b, "cannot delete default branch" )
return
self.doGitPlainText( Git.delete_branch, param=branch )
def onGitCommit(self,*args):
c = self.doGit( Git.diff_cached )
self.JavaScript.setCommit( *c )
def onSubmitCommit(self,msg):
self.doGitPlainText( Git.commit, param=msg, refresh=True)
def onSelectBranch(self,branch):
self.doGitPlainText( Git.select_branch, param=branch, refresh=True )
def onGitDiffOrigin(self,*args):
c = self.doGit( Git.diff_origin )
self.JavaScript.setDiff("ORIGIN: " + c[0],c[1])
def onGitDiffCached(self,*args):
c = self.doGit( Git.diff_cached )
self.JavaScript.setDiff("Indexed but not committed: " + c[0],c[1])
@radio_group(menu="ViewDiffMenuItem", tb="tb_diff")
def onViewDiff(self,*args):
c = self.doGit( Git.diff, action=self.onViewDiff )
self.JavaScript.setDiff( *c )
@radio_group(menu="ViewStatusMenuItem", tb="tb_status")
def onViewStatus(self,*args):
self.doGitPlainText( Git.status, action=self.onViewStatus )
@radio_group(menu="ViewFileMenuItem", tb="tb_file")
def onViewFile(self,*args):
self.doGitPlainText( Git.view_file, action=self.onViewFile )
def onContext(self,treeview, event,*args):
if event.button == 3: # right click
m = self.ui.GitSubMenu
Gtk.Menu.popup_at_pointer(m,event)
return False
def onWebContext(self,web,menue,event,*args):
m = self.ui.ViewSubMenu
Gtk.Menu.popup_at_pointer(m,event)
# suppress standard webview context menue
return True
def onSelect(self,*args):
f = self.last_action
if not f == None:
self.last_action = None
f()
self.last_action = f
def onViewRefresh(self,*args):
self.ui.tree.refresh()
def onHelp(self,*args):
self.ui.alert("This is the simple pygtk diff viewer using webkit2 based HTML rendering.")
#create controller
controller = Controller()
# start the GUI event main loop
Gtk.main()
|
class BulletParams:
def __init__(self, speed, size, damage):
self.speed = speed
self.size = size
self.damage = damage
@staticmethod
def read_from(stream):
speed = stream.read_double()
size = stream.read_double()
damage = stream.read_int()
return BulletParams(speed, size, damage)
def write_to(self, stream):
stream.write_double(self.speed)
stream.write_double(self.size)
stream.write_int(self.damage)
def __repr__(self):
return "BulletParams(" + \
repr(self.speed) + "," + \
repr(self.size) + "," + \
repr(self.damage) + \
")"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by charlie on 18-4-17
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import numpy as np
import os
import random
import pickle
from data_provider.THUMOS14 import THUMOS14
from sklearn.svm import SVR
from sklearn.linear_model import SGDRegressor, LogisticRegression
from sklearn.metrics import log_loss, accuracy_score
import time
import tensorflow as tf
from proposal.evaluate import cal_iou, cal_average_recall
class DataSetUtils(object):
def __init__(self, batch_size=256):
self.dataset = THUMOS14()
self.batch_size = batch_size
self.visual_feature_dim = 4096
self.feat_dir = self.dataset.FEATURE_DIR
self.unit_duration = 1.0
self.training_samples, self.testing_sample = self.prepare_sample()
print(len(self.training_samples), len(self.testing_sample))
self.idx = 0
def prepare_sample(self):
samples = []
train, test = self.dataset.load_in_info()
for vid in train:
used = set()
info = train[vid]
proposals = info['proposals']
movie_name = vid
for proposal in proposals:
start = proposal['start']
end = proposal['end']
round_start = np.floor(start / self.unit_duration) * self.unit_duration
round_end = np.floor(end / self.unit_duration) * self.unit_duration
if round_start == round_end:
samples.append((movie_name, int(round_start), 1))
used.add(int(round_start))
else:
while round_start <= round_end:
samples.append((movie_name, int(round_start), 1))
used.add(int(round_start))
round_start += self.unit_duration
clip_start = 0
clip_end = clip_start + self.unit_duration
while clip_end < info['duration']:
if int(clip_start) not in used:
samples.append((movie_name, int(clip_start), 0))
clip_start += self.unit_duration
clip_end = clip_start + self.unit_duration
train_samples = sorted(samples)
samples = []
for vid in test:
used = set()
info = test[vid]
proposals = info['proposals']
movie_name = vid
for proposal in proposals:
start = proposal['start']
end = proposal['end']
round_start = np.floor(start / self.unit_duration) * self.unit_duration
round_end = np.floor(end / self.unit_duration) * self.unit_duration
if round_start == round_end:
samples.append((movie_name, int(round_start), 1))
used.add(int(round_start))
else:
while round_start <= round_end:
samples.append((movie_name, int(round_start), 1))
used.add(int(round_start))
round_start += self.unit_duration
clip_start = 0
clip_end = clip_start + self.unit_duration
while clip_end < info['duration']:
if int(clip_start) not in used:
samples.append((movie_name, int(clip_start), 0))
clip_start += self.unit_duration
clip_end = clip_start + self.unit_duration
test_samples = sorted(samples)
return train_samples, test_samples
def next_train_batch(self):
image_batch = np.zeros([self.batch_size, self.visual_feature_dim])
label_batch = np.zeros([self.batch_size, 2], dtype=np.int32)
index = 0
while index < self.batch_size:
k = self.idx
movie_name = self.training_samples[k][0]
round_gt_start = self.training_samples[k][1]
label = self.training_samples[k][2]
feat = self.dataset.load_feature(movie_name, round_gt_start, round_gt_start + self.unit_duration, l2=True)
image_batch[index, :] = feat
# label_batch[index]=label
if label == 1:
label_batch[index, 0] = 0
label_batch[index, 1] = 1
else:
label_batch[index, 0] = 1
label_batch[index, 1] = 0
index += 1
self.idx += 1
if self.idx >= len(self.training_samples): self.idx = 0
return image_batch, label_batch
def get_train(self):
image_batch = []
label_batch = []
for i in range(len(self.training_samples)):
k = i
movie_name = self.training_samples[k][0]
round_gt_start = self.training_samples[k][1]
label = self.training_samples[k][2]
feat = self.dataset.load_feature(movie_name, round_gt_start, round_gt_start + self.unit_duration)
image_batch.append(feat)
label_batch.append(label)
if i > 2000: break
image_batch = np.array(image_batch)
label_batch = np.array(label_batch)
return image_batch, label_batch
def get_test(self):
image_batch = []
label_batch = []
for i in range(len(self.testing_sample)):
k = i
movie_name = self.testing_sample[k][0]
round_gt_start = self.testing_sample[k][1]
label = self.testing_sample[k][2]
feat = self.dataset.load_feature(movie_name, round_gt_start, round_gt_start + self.unit_duration)
image_batch.append(feat)
label_batch.append(label)
if i > 200: break
image_batch = np.array(image_batch)
label_batch = np.array(label_batch)
return image_batch, label_batch
def model(X, w_h, w_o):
h = tf.nn.sigmoid(tf.matmul(X, w_h))
# h = tf.nn.sigmoid(tf.matmul(X, w_h))
return tf.matmul(h, w_o)
def init_weight(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.001))
def training():
dataset = DataSetUtils()
X = tf.placeholder("float", shape=[None, 4096])
y = tf.placeholder("float", shape=[None, 2])
w_h = init_weight([4096, 1000])
w_o = init_weight([1000, 2])
logit = model(X, w_h, w_o)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=y)) * 100
train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(cost)
saver = tf.train.Saver()
max_iter = 50000
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for i in range(max_iter):
train_X, train_y = dataset.next_train_batch()
loss, _ = sess.run([cost, train_op], feed_dict={X: train_X, y: train_y})
if i % 500 == 0:
print("Step", i, "Loss", loss)
if i % 10000 == 0:
prob, pred, labels = [], [], []
for i in range(len(dataset.testing_sample)):
movie_name = dataset.testing_sample[i][0]
round_gt_start = dataset.testing_sample[i][1]
label = dataset.testing_sample[i][2]
feat = dataset.dataset.load_feature(movie_name, round_gt_start,round_gt_start + dataset.unit_duration, l2=True)
feat = feat.reshape(1, -1)
_logit = sess.run(logit, feed_dict={X: feat.reshape([1, -1])})
if softmax(_logit)[0, 1] > 0.5:
pred.append(1)
else:
pred.append(0)
prob.append(softmax(_logit)[0, 1])
labels.append(label)
print("Accuracy", accuracy_score(y_pred=pred, y_true=labels))
print("Loss", log_loss(y_pred=prob, y_true=labels))
saver.save(sess, os.path.join(THUMOS14.MODEL_DIR, 'action_classifier_' + str(max_iter)))
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=1).reshape(-1, 1)
def testing():
dataset = DataSetUtils()
result = {}
with tf.Graph().as_default():
sess = tf.Session()
X = tf.placeholder("float", shape=[None, 4096])
y = tf.placeholder("float", shape=[None, 2])
w_h = init_weight([4096, 1000])
w_o = init_weight([1000, 2])
logit = model(X, w_h, w_o)
# First let's load meta graph and restore weights
saver = tf.train.Saver()
saver.restore(sess, os.path.join(THUMOS14.MODEL_DIR, 'action_classifier_50000'))
pred, probs, labels = [], [], []
for i in range(len(dataset.testing_sample)):
if i % 1000 == 0: print(i)
movie_name = dataset.testing_sample[i][0]
round_gt_start = dataset.testing_sample[i][1]
label = dataset.testing_sample[i][2]
feat = dataset.dataset.load_feature(movie_name, round_gt_start, round_gt_start + dataset.unit_duration,l2=True)
feat = feat.reshape(1, -1)
_logit = sess.run(logit, feed_dict={X: feat.reshape([1, -1])})
if softmax(_logit)[0, 1] > 0.5:
pred.append(1)
else:
pred.append(0)
prob = softmax(_logit)[0, 1]
probs.append(prob)
labels.append(label)
_list = result.get(movie_name, [])
_list.append((round_gt_start, prob))
result[movie_name] = _list
print("Accuracy", accuracy_score(y_pred=pred, y_true=labels))
print("Loss", log_loss(y_pred=probs, y_true=labels))
pickle.dump(result, open(os.path.join(THUMOS14.RES_DIR, 'tag_time_probs'), 'w'))
def postprocess():
'''
result:movie:[(timestamp,action_prob)]
gamma:
:return:
'''
def compensate(pred):
ret = []
for time, prob in pred:
ret.append((time, 1 - prob))
return sorted(ret, key=lambda x: x[0])
def G(pred, gamma):
candidates = []
pre = 1
if pred[0][1] < gamma:
start = 0
else:
start = -1
end = -1
idx = 0
probs=[]
while idx < len(pred):
time, prob = pred[idx]
if start!=-1:probs.append(prob)
if prob < gamma and pre > prob and end == -1: start = time
if prob > gamma and pre < prob and start != -1:
end = time
candidates.append((start, end,sum(probs)/len(probs)))
start = end = -1
probs=[]
pre = prob
idx += 1
# print(candidates)
return candidates
def absorb(candidates, tau):
proposals = []
for i in range(len(candidates)):
start = candidates[i][0]
end0=end = candidates[i][1]
prob =candidates[i][2]
for j in range(i + 1, len(candidates)):
if candidates[j][0]<end:continue
# print(candidates[j][1],(end0 - start) / (candidates[j][1] - start),tau)
if (end0 - start) / (candidates[j][1] - start) < tau: break
end = candidates[j][1]
proposals.append((start, end,prob))
return proposals
def filter(proposals, tiou=0.95):
removed = []
for i in range(len(proposals)):
for j in range(i + 1, len(proposals)):
if j in removed: continue
if cal_iou(proposals[i][:2], proposals[j][:2]) > tiou:
removed.append(i)
break
return sorted([(x[0],x[1],1-x[2]) for idx, x in enumerate(proposals) if idx not in removed],key=lambda x:x[2],reverse=True)
dataset = DataSetUtils()
_, test = dataset.dataset.load_in_info()
result = pickle.load(open(os.path.join(THUMOS14.RES_DIR, 'tag_time_probs'), 'r'))
ret = {}
for movie in test:
pred = result[movie]
pred = compensate(pred)
proposals = set()
for gamma in np.arange(0.05, 1, 0.05):
G_set = G(pred, gamma)
for tau in np.arange(0.05, 1, 0.05):
candidates = absorb(G_set, tau)
proposals = proposals.union(set(candidates))
proposals = sorted(list(proposals), key=lambda x: (x[0], x[1]))
proposals = filter(proposals)
# print(sorted(proposals, key=lambda x: (x[0], x[1])))
proposals = [{"start": start, "end": end} for start, end,_ in proposals]
ret[movie] = {'proposals': proposals}
# print(result[movie])
# print(test[movie]['proposals'])
# break
print(cal_average_recall(predicts=ret, groundtruth=test, num_proposals=200))
pickle.dump(ret, open(os.path.join(THUMOS14.RES_DIR, 'tag_proposal'), 'w'))
if __name__ == '__main__':
# training()
# testing()
postprocess()
|
# coding: utf-8
from typing import List, Optional
import pydantic
from pydantic import BaseModel
class Tipe(BaseModel):
name: str
returns: str
origin: Optional[str]
origtype: Optional[str]
desc: List[str]
raises_get: Optional[str]
raises_set: Optional[str]
@pydantic.validator('name', 'returns')
def validate_str_len(cls, value: str) -> str:
s = value.strip()
if len(s) > 0:
return s
raise ValueError("must not be null or empty string")
|
name = "raspberrypi-uart-logger"
|
from decouple import config
SITE_ID = config('SITE_ID_PRODUCTION')
EMAIL_HOST= "smtp.gmail.com"
EMAIL_HOST_USER= config('EMAIL')
EMAIL_HOST_PASSWORD= config('PASSWORD')
EMAIL_PORT= 587
EMAIL_USE_TLS= True
DEFAULT_FROM_EMAIL= config('EMAIL')
EMAIL_BACKEND = "anymail.backends.mailjet.EmailBackend"
ANYMAIL = {
"MAILJET_API_KEY": config('MAILJET_API_KEY'),
"MAILJET_SECRET_KEY": config('MAILJET_SECRET_KEY'),
}
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
SITE_NAME = config('SITE_NAME')
AWS_ACCESS_KEY_ID = config('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = config('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = config('AWS_STORAGE_BUCKET_NAME')
AWS_S3_FILE_OVERWRITE = False
AWS_DEFAULT_ACL = None
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=10',
}
AWS_LOCATION = 'media'
AWS_QUERYSTRING_AUTH=True
DEFAULT_FILE_STORAGE = 'guru.storage_back.PublicMediaStorage'
AWS_PRIVATE_MEDIA_LOCATION = 'private'
PRIVATE_FILE_STORAGE = 'guru.storage_back.PrivateMediaStorage'
AWS_PUBLIC_MEDIA_LOCATION ='public'
PUBLIC_FILE_STORAGE = 'guru.storage_back.PublicMediaStorage'
AWS_S3_REGION_NAME = 'ap-south-1'
AWS_S3_SIGNATURE_VERSION = 's3v4'
AWS_S3_ENDPOINT_URL = f'https://fra1.digitaloceanspaces.com'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME':config('DBNAME'),
'USER':config('USER'),
'PASSWORD':config('DBPASS'),
'PORT':config('DBPORT'),
'HOST':config('DBHOST'),
}
}
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
|
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import count
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: mnmcount <file>", file=sys.stderr)
sys.exit(-1)
spark = (SparkSession
.builder
.appName("PythonMnMCount")
.getOrCreate())
# get the M&M data set file name
mnm_file = sys.argv[1]
# read the file into a Spark DataFrame
mnm_df = (spark.read.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(mnm_file))
mnm_df.show(n=5, truncate=False)
# aggregate count of all colors and groupBy state and color
# orderBy descending order
count_mnm_df = (mnm_df.select("State", "Color", "Count")
.groupBy("State", "Color")
.sum("Count")
.orderBy("sum(Count)", ascending=False))
# show all the resulting aggregation for all the dates and colors
count_mnm_df.show(n=60, truncate=False)
print("Total Rows = %d" % (count_mnm_df.count()))
# find the aggregate count for California by filtering
ca_count_mnm_df = (mnm_df.select("*")
.where(mnm_df.State == 'CA')
.groupBy("State", "Color")
.sum("Count")
.orderBy("sum(Count)", ascending=False))
# show the resulting aggregation for California
ca_count_mnm_df.show(n=10, truncate=False)
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
@ddt.ddt
class ShareTypesAdminTest(base.BaseSharesAdminTest):
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_share_type_create_delete(self):
name = data_utils.rand_name("tempest-manila")
extra_specs = self.add_extra_specs_to_dict()
# Create share type
st_create = self.shares_v2_client.create_share_type(
name, extra_specs=extra_specs)
self.assertEqual(name, st_create['share_type']['name'])
st_id = st_create['share_type']['id']
# Delete share type
self.shares_v2_client.delete_share_type(st_id)
# Verify deletion of share type
self.shares_v2_client.wait_for_resource_deletion(st_id=st_id)
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.get_share_type,
st_id)
def _verify_is_public_key_name(self, share_type, version):
old_key_name = 'os-share-type-access:is_public'
new_key_name = 'share_type_access:is_public'
if utils.is_microversion_gt(version, "2.6"):
self.assertIn(new_key_name, share_type)
self.assertNotIn(old_key_name, share_type)
else:
self.assertIn(old_key_name, share_type)
self.assertNotIn(new_key_name, share_type)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
@ddt.data('2.0', '2.6', '2.7')
def test_share_type_create_get(self, version):
self.skip_if_microversion_not_supported(version)
name = data_utils.rand_name("tempest-manila")
extra_specs = self.add_extra_specs_to_dict({"key": "value", })
# Create share type
st_create = self.create_share_type(
name, extra_specs=extra_specs, version=version)
self.assertEqual(name, st_create['share_type']['name'])
self._verify_is_public_key_name(st_create['share_type'], version)
st_id = st_create["share_type"]["id"]
# Get share type
get = self.shares_v2_client.get_share_type(st_id, version=version)
self.assertEqual(name, get["share_type"]["name"])
self.assertEqual(st_id, get["share_type"]["id"])
self.assertEqual(extra_specs, get["share_type"]["extra_specs"])
self._verify_is_public_key_name(get['share_type'], version)
# Check that backwards compatibility didn't break
self.assertDictMatch(get["volume_type"], get["share_type"])
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
@ddt.data('2.0', '2.6', '2.7')
def test_share_type_create_list(self, version):
self.skip_if_microversion_not_supported(version)
name = data_utils.rand_name("tempest-manila")
extra_specs = self.add_extra_specs_to_dict()
# Create share type
st_create = self.create_share_type(
name, extra_specs=extra_specs, version=version)
self._verify_is_public_key_name(st_create['share_type'], version)
st_id = st_create["share_type"]["id"]
# list share types
st_list = self.shares_v2_client.list_share_types(version=version)
sts = st_list["share_types"]
self.assertGreaterEqual(len(sts), 1)
self.assertTrue(any(st_id in st["id"] for st in sts))
for st in sts:
self._verify_is_public_key_name(st, version)
# Check that backwards compatibility didn't break
vts = st_list["volume_types"]
self.assertEqual(len(sts), len(vts))
for i in range(len(sts)):
self.assertDictMatch(sts[i], vts[i])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
def test_get_share_with_share_type(self):
# Data
share_name = data_utils.rand_name("share")
shr_type_name = data_utils.rand_name("share-type")
extra_specs = self.add_extra_specs_to_dict({
"storage_protocol": CONF.share.capability_storage_protocol,
})
# Create share type
st_create = self.create_share_type(
shr_type_name, extra_specs=extra_specs)
# Create share with share type
share = self.create_share(
name=share_name, share_type_id=st_create["share_type"]["id"])
self.assertEqual(share["name"], share_name)
self.shares_client.wait_for_share_status(share["id"], "available")
# Verify share info
get = self.shares_v2_client.get_share(share["id"], version="2.5")
self.assertEqual(share_name, get["name"])
self.assertEqual(share["id"], get["id"])
self.assertEqual(shr_type_name, get["share_type"])
get = self.shares_v2_client.get_share(share["id"], version="2.6")
self.assertEqual(st_create["share_type"]["id"], get["share_type"])
self.assertEqual(shr_type_name, get["share_type_name"])
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_private_share_type_access(self):
name = data_utils.rand_name("tempest-manila")
extra_specs = self.add_extra_specs_to_dict({"key": "value", })
project_id = self.shares_client.tenant_id
# Create private share type
st_create = self.create_share_type(
name, False, extra_specs=extra_specs)
self.assertEqual(name, st_create['share_type']['name'])
st_id = st_create["share_type"]["id"]
# It should not be listed without access
st_list = self.shares_v2_client.list_share_types()
sts = st_list["share_types"]
self.assertFalse(any(st_id in st["id"] for st in sts))
# List projects that have access for share type - none expected
access = self.shares_v2_client.list_access_to_share_type(st_id)
self.assertEqual([], access)
# Add project access to share type
access = self.shares_v2_client.add_access_to_share_type(
st_id, project_id)
# Now it should be listed
st_list = self.shares_client.list_share_types()
sts = st_list["share_types"]
self.assertTrue(any(st_id in st["id"] for st in sts))
# List projects that have access for share type - one expected
access = self.shares_v2_client.list_access_to_share_type(st_id)
expected = [{'share_type_id': st_id, 'project_id': project_id}, ]
self.assertEqual(expected, access)
# Remove project access from share type
access = self.shares_v2_client.remove_access_from_share_type(
st_id, project_id)
# It should not be listed without access
st_list = self.shares_client.list_share_types()
sts = st_list["share_types"]
self.assertFalse(any(st_id in st["id"] for st in sts))
# List projects that have access for share type - none expected
access = self.shares_v2_client.list_access_to_share_type(st_id)
self.assertEqual([], access)
|
# -*-coding:utf8-*-
import requests
import json
import time
import MySQLdb
from multiprocessing.dummy import Pool as ThreadPool
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
urls = []
head = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36'
}
time1 = time.time()
for i in range(1000, 5000):
url = 'https://coding.net/api/public/all?page=' + str(i) + '&pageSize=1'
urls.append(url)
def getsource(url):
jscontent = requests.get(url, headers=head, verify=True).content
time2 = time.time()
jsDict = json.loads(jscontent)
if jsDict['code'] == 0:
jsList = jsDict['data']
jsData = jsList['list'][0]
id = jsData['id']
name = jsData['name']
url = jsData['https_url']
description = jsData['description']
print "Succeed: " + str(id) + "\t" + str(time2 - time1)
try:
conn = MySQLdb.connect(host='localhost', user='root', passwd='', port=3306, charset='utf8')
cur = conn.cursor()
conn.select_db('python')
cur.execute('INSERT INTO coding VALUES (%s,%s,%s,%s)',
[int(id), name, url, description])
except MySQLdb.Error, e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
else:
print "Error: " + url
pool = ThreadPool(8)
try:
results = pool.map(getsource, urls)
except Exception:
print 'ConnectionError'
time.sleep(300)
results = pool.map(getsource, urls)
pool.close()
pool.join()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Tools.Logger import logger
from Tools.Config import ROOT_DIR, CONFIG
import json
class ReverseIndex:
def __init__(self):
self.index = {}
self.count_documents_in_word = {}
self.words = []
self.total = None
def create(self, X, y):
self.index = {}
self.total = len(X)
for index in range(len(X)):
for token in X[index]:
try:
self.index[token].append(y[index])
except KeyError:
self.index[token] = [y[index]]
self.load_envs()
return self
def load_envs(self):
self.words = sorted(list(set(self.index.keys())))
for key in self.words:
self.count_documents_in_word[key] = len(set(self.index[key]))
def persist(self):
with open(ReverseIndex.filename(), 'w+') as f:
f.write(self.to_s())
@classmethod
def filename(cls):
return ROOT_DIR + "/" + CONFIG['reverse_index_path']
@classmethod
def load(cls):
logger.info("Loading Reverse Index")
ri = cls()
with open(ReverseIndex.filename(), 'r') as f:
data = json.loads(f.read())
ri.index = data['index']
ri.count_documents_in_word = data['count_documents_in_word']
ri.words = data['words']
ri.total = data['total']
return ri
def to_s(self):
return json.dumps({
'index': self.index,
'count_documents_in_word': self.count_documents_in_word,
'words': self.words,
'total': self.total
})
|
import ast
import operator
from ast import Constant, Num, Str, Bytes, Ellipsis, NameConstant, copy_location
from typing import Iterable, Optional
from compiler.peephole import safe_multiply, safe_power, safe_mod, safe_lshift
from compiler.visitor import ASTRewriter
def is_const(node):
return isinstance(node, (Constant, Num, Str, Bytes, Ellipsis, NameConstant))
def get_const_value(node):
if isinstance(node, (Constant, NameConstant)):
return node.value
elif isinstance(node, Num):
return node.n
elif isinstance(node, (Str, Bytes)):
return node.s
elif isinstance(node, Ellipsis):
return ...
raise TypeError("Bad constant value")
class Py37Limits:
MAX_INT_SIZE = 128
MAX_COLLECTION_SIZE = 256
MAX_STR_SIZE = 4096
MAX_TOTAL_ITEMS = 1024
UNARY_OPS = {
ast.Invert: operator.invert,
ast.Not: operator.not_,
ast.UAdd: operator.pos,
ast.USub: operator.neg,
}
INVERSE_OPS = {
ast.Is: ast.IsNot,
ast.IsNot: ast.Is,
ast.In: ast.NotIn,
ast.NotIn: ast.In,
}
BIN_OPS = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: lambda l, r: safe_multiply(l, r, Py37Limits),
ast.Div: operator.truediv,
ast.FloorDiv: operator.floordiv,
ast.Mod: lambda l, r: safe_mod(l, r, Py37Limits),
ast.Pow: lambda l, r: safe_power(l, r, Py37Limits),
ast.LShift: lambda l, r: safe_lshift(l, r, Py37Limits),
ast.RShift: operator.rshift,
ast.BitOr: operator.or_,
ast.BitXor: operator.xor,
ast.BitAnd: operator.and_,
}
class AstOptimizer(ASTRewriter):
def __init__(self, optimize = False):
super().__init__()
self.optimize = optimize
def visitUnaryOp(self, node: ast.UnaryOp) -> ast.expr:
op = self.visit(node.operand)
if is_const(op):
conv = UNARY_OPS[type(node.op)]
val = get_const_value(op)
try:
return copy_location(Constant(conv(val)), node)
except:
pass
elif (
isinstance(node.op, ast.Not)
and isinstance(node.operand, ast.Compare)
and len(node.operand.ops) == 1
):
cmp_op = node.operand.ops[0]
new_op = INVERSE_OPS.get(type(cmp_op))
if new_op is not None:
return self.update_node(node.operand, ops=[new_op()])
return self.update_node(node, operand=op)
def visitBinOp(self, node: ast.BinOp) -> ast.expr:
l = self.visit(node.left)
r = self.visit(node.right)
if is_const(l) and is_const(r):
handler = BIN_OPS.get(type(node.op))
if handler is not None:
lval = get_const_value(l)
rval = get_const_value(r)
try:
return copy_location(Constant(handler(lval, rval)), node)
except:
pass
return self.update_node(node, left=l, right=r)
def makeConstTuple(self, elts: Iterable[ast.expr]) -> Optional[Constant]:
if all(is_const(elt) for elt in elts):
return Constant(tuple(get_const_value(elt) for elt in elts))
return None
def visitTuple(self, node: ast.Tuple) -> ast.expr:
elts = self.walk_list(node.elts)
if isinstance(node.ctx, ast.Load):
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(res, node)
return self.update_node(node, elts=elts)
def visitSubscript(self, node: ast.Subscript) -> ast.expr:
value = self.visit(node.value)
slice = self.visit(node.slice)
if (
isinstance(node.ctx, ast.Load)
and is_const(value)
and isinstance(slice, ast.Index)
and is_const(slice.value)
):
try:
return copy_location(
Constant(get_const_value(value)[get_const_value(slice.value)]), node
)
except:
pass
return self.update_node(node, value=value, slice=slice)
def _visitIter(self, node: ast.expr) -> ast.expr:
if isinstance(node, ast.List):
elts = self.visit(node.elts)
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(res, node)
return self.update_node(node, elts=elts)
elif isinstance(node, ast.Set):
elts = self.visit(node.elts)
res = self.makeConstTuple(elts)
if res is not None:
return copy_location(Constant(frozenset(res.value)), node)
return self.update_node(node, elts=elts)
return self.generic_visit(node)
def visitcomprehension(self, node: ast.comprehension) -> ast.expr:
target = self.visit(node.target)
iter = self.visit(node.iter)
ifs = self.visit(node.ifs)
iter = self._visitIter(iter)
return self.update_node(node, target=target, iter=iter, ifs=ifs)
def visitFor(self, node: ast.For) -> ast.expr:
target = self.visit(node.target)
iter = self.visit(node.iter)
body = self.visit(node.body)
orelse = self.visit(node.orelse)
iter = self._visitIter(iter)
return self.update_node(
node, target=target, iter=iter, body=body, orelse=orelse
)
def visitCompare(self, node: ast.Compare) -> ast.expr:
left = self.visit(node.left)
comparators = self.visit(node.comparators)
if isinstance(node.ops[-1], (ast.In, ast.NotIn)):
new_iter = self._visitIter(comparators[-1])
if new_iter is not None and new_iter is not comparators[-1]:
comparators = list(comparators)
comparators[-1] = new_iter
return self.update_node(node, left=left, comparators=comparators)
def visitName(self, node: ast.Name):
if node.id == "__debug__":
return copy_location(Constant(not self.optimize), node)
return self.generic_visit(node)
|
import logging
import yagmail
from dredge_logger import generateImages
from dredge_logger.config import config
_logger = logging.getLogger(__name__)
def backup_files(filename, extra_csv=False):
"""This function when called will use the filename of the file to send the files to a list of emails"""
_logger.debug("Backing up files with name: " + filename)
if extra_csv:
try:
files = [
config.vars["csv_path"] + "\\" + filename + ".csv",
]
_logger.debug("Sending Email(s) to " + str(config.vars["email_list"]).strip("[]"))
subject = f"{config.vars['dredge_name']} - {filename} - Log Files - CSV_0600"
body = f"The files with the logged information from {config.vars['dredge_name']} on {filename.strip('_0600')}"
send_email(
config.vars["email_list"],
subject,
body,
files,
)
except Exception as e:
_logger.debug("Error sending email")
_logger.debug(e, exc_info=True)
else:
sendImage = True
try:
filenames = generateImages.generateGraph(filename + ".csv")
except Exception as e:
_logger.debug("Error generating graph: " + str(e), exc_info=True)
sendImage = False
finally:
try:
# Email the files to list of receivers
files = [
config.vars["json_path"] + "\\" + filename + ".json",
config.vars["csv_path"] + "\\" + filename + ".csv",
]
if sendImage:
for fn in filenames:
files.append(fn)
_logger.debug("Sending Email(s) to " + str(config.vars["email_list"]).strip("[]"))
subject = f"{config.vars['dredge_name']} - {filename} - Log Files"
body = f"The files with the logged information from {config.vars['dredge_name']} on {filename}"
send_email(
config.vars["email_list"],
subject,
body,
files,
)
except Exception as e:
_logger.debug("Error sending email")
_logger.debug(e, exc_info=True)
def send_email(receivers, subject, body, files):
"""Sends an email with the above parameters"""
yag = yagmail.SMTP(config.vars["email"])
yag.send(
to=receivers,
subject=subject,
contents=body,
attachments=files,
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from decimal import Decimal
import model_utils.fields
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('count', models.PositiveIntegerField(default=0)),
('total', models.PositiveIntegerField(default=0)),
('average', models.DecimalField(decimal_places=3, max_digits=6, default=Decimal('0'))),
('object_id', models.PositiveIntegerField(blank=True, null=True)),
('content_type', models.ForeignKey(blank=True, null=True, to='contenttypes.ContentType', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserRating',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('created', model_utils.fields.AutoCreatedField(verbose_name='created', editable=False, default=django.utils.timezone.now)),
('modified', model_utils.fields.AutoLastModifiedField(verbose_name='modified', editable=False, default=django.utils.timezone.now)),
('ip', models.GenericIPAddressField(blank=True, null=True)),
('score', models.PositiveSmallIntegerField()),
('rating', models.ForeignKey(related_name='user_ratings', to='star_ratings.Rating', on_delete=models.CASCADE)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='userrating',
unique_together=set([('user', 'rating')]),
),
migrations.AlterUniqueTogether(
name='rating',
unique_together=set([('content_type', 'object_id')]),
),
]
|
"""This problem was asked by Facebook.
Given a list of integers L, find the maximum length of a sequence of consecutive
numbers that can be formed using elements from L.
For example, given L = [5, 2, 99, 3, 4, 1, 100], return 5 as we
can build a sequence [1, 2, 3, 4, 5] which has length 5.
"""
|
import util
def my_range(start, stop):
if start < stop:
return range(start+1, stop)
else:
return range(start-1, stop, -1)
class matrix():
def __init__(self, filename : str):
self.lines = util.load_str_lines_list(filename)
self.heigth = len(self.lines)
self.width = len(self.lines[0])
def visible(self, x0 : int, y0 : int, x1 : int, y1 : int) -> bool:
if x1 == x0:
for y in my_range (y0, y1):
if self.lines[y][x0] == '#':
return False
return True
k = (y1-y0) / (x1-x0)
for x in my_range(x0, x1):
y = y0 + (x - x0) * k
if y < 0 or y >= self.heigth:
return False
inty = int(round(y, 4))
if inty == round(y, 4):
if self.lines[inty][x] == '#':
return False
return True
def calc(self, i0: int, j0 : int) -> int:
res = 0
for j in range(self.heigth):
line = self.lines[j]
for i in range(self.width):
if line[i] != '#':
continue
if i == i0 and j == j0:
continue
if self.visible(i0, j0, i, j):
res += 1
return res
def findbest(self) -> dict:
maxi = 0
maxj = 0
maxa = 0 # asteroids
for j in range(self.heigth):
line = self.lines[j]
for i in range(self.width):
if line[i] == '#':
res = self.calc(i, j)
if res > maxa:
maxi = i
maxj = j
maxa = res
print(f'Best pos ({maxi}, {maxj}), Asteroids = {maxa}')
return {'x':maxi, 'y': maxj, 'asteroids': maxa}
def test(input_file : str, expected:dict):
m = matrix(input_file)
result = m.findbest()
if result != expected:
print(f"Error, expected={expected}, actual={result}")
else:
print("OK")
test('input_test2.txt', {'x': 11, 'y': 13, 'asteroids': 210})
print("Part 1.")
test('input.txt', {'x': 29, 'y': 28, 'asteroids': 256})
|
from __future__ import print_function
from typing import Sequence, Any, IO
from serversim import *
def print_results(num_users=None, weight1=None, weight2=None, server_range1=None,
server_range2=None, servers=None, grp=None, fi=None):
# type: (int, float, float, Sequence[int], Sequence[int], Sequence[Server], UserGroup, IO[str]) -> None
if fi is None:
import sys
fi = sys.stdout
print("\n\n***** Start Simulation --", num_users, ",", weight1, ",", weight2, ", [", server_range1[0], ",", server_range1[-1] + 1,
") , [", server_range2[0], ",", server_range2[-1] + 1, ") *****", file=fi)
print("Simulation: num_users =", num_users, file=fi)
print("<< ServerExample >>\n", file=fi)
indent = " " * 4
print("\n" + "Servers:", file=fi)
for svr in servers:
print(indent*1 + "Server:", svr.name, file=fi)
print(indent * 2 + "max_concurrency =", svr.max_concurrency, file=fi)
print(indent * 2 + "num_threads =", svr.num_threads, file=fi)
print(indent*2 + "speed =", svr.speed, file=fi)
print(indent * 2 + "avg_process_time =", svr.avg_process_time, file=fi)
print(indent * 2 + "avg_hw_queue_time =", svr.avg_hw_queue_time, file=fi)
print(indent * 2 + "avg_thread_queue_time =", svr.avg_thread_queue_time, file=fi)
print(indent * 2 + "avg_service_time =", svr.avg_service_time, file=fi)
print(indent * 2 + "avg_hw_queue_length =", svr.avg_hw_queue_length, file=fi)
print(indent * 2 + "avg_thread_queue_length =", svr.avg_thread_queue_length, file=fi)
print(indent * 2 + "hw_queue_length =", svr.hw_queue_length, file=fi)
print(indent * 2 + "hw_in_process_count =", svr.hw_in_process_count, file=fi)
print(indent * 2 + "thread_queue_length =", svr.thread_queue_length, file=fi)
print(indent * 2 + "thread_in_use_count =", svr.thread_in_use_count, file=fi)
print(indent*2 + "utilization =", svr.utilization, file=fi)
print(indent*2 + "throughput =", svr.throughput, file=fi)
print(indent*1 + "Group:", grp.name, file=fi)
print(indent*2 + "num_users =", grp.num_users, file=fi)
print(indent*2 + "min_think_time =", grp.min_think_time, file=fi)
print(indent*2 + "max_think_time =", grp.max_think_time, file=fi)
print(indent * 2 + "responded_request_count =", grp.responded_request_count(None), file=fi)
print(indent * 2 + "unresponded_request_count =", grp.unresponded_request_count(None), file=fi)
print(indent * 2 + "avg_response_time =", grp.avg_response_time(), file=fi)
print(indent * 2 + "std_dev_response_time =", grp.std_dev_response_time(None), file=fi)
print(indent*2 + "throughput =", grp.throughput(None), file=fi)
for svc in grp.svcs:
print(indent*2 + svc.svc_name + ":", file=fi)
print(indent * 3 + "responded_request_count =", grp.responded_request_count(svc), file=fi)
print(indent * 3 + "unresponded_request_count =", grp.unresponded_request_count(svc), file=fi)
print(indent * 3 + "avg_response_time =", grp.avg_response_time(svc), file=fi)
print(indent * 3 + "std_dev_response_time =", grp.std_dev_response_time(svc), file=fi)
print(indent*3 + "throughput =", grp.throughput(svc), file=fi)
|
import requests
from bs4 import BeautifulSoup
# pull individual items of websites
URL="https://www.amazon.in/Grand-Theft-Auto-V-PC/dp/B00LSBDSYA/ref=sr_1_5?keywords=pc+games&qid=1562758220&s=gateway&sr=8-5"
# product to check
headers= {"User-Agent": " Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36"}
page=requests.get(URL,headers=headers)
# gets data from website
soup=BeautifulSoup(page.content)
print(soup.prettify)
|
import os
import csv
from flask import flash
def batch_select(csvfile):
# def batch_select(csvfile, user_id, user_name, genome, technology):
"""Opens bed file to load into variants table."""
if os.path.exists(csvfile) and os.path.getsize(csvfile) > 0:
try:
with open(csvfile, 'r') as f:
dr = csv.reader(f, delimiter=',')
# See if csv file has 7 columns:
correct_number_columns = [False]
for row in dr:
if len(row) < 7:
correct_number_columns.append(True)
if True in correct_number_columns:
to_db = "Not 7 columns"
flash(".csv file requires 7 columns: Genome,Alignment Bam, Bai, Ref fasta, Ref build (e.g., hg38), Bed, and Technology (e.g., Illumina).")
else:
with open(csvfile, 'r') as f:
dr = csv.reader(f, delimiter=',')
to_db = [line for line in dr]
# list of dicts:
# to_db = [{
# 'genome':i[0],
# 'alignment':i[1],
# 'alignment_index':i[2],
# 'reference':i[3],
# 'reference_build':i[4],
# 'variants':i[5],
# 'technology':i[6]
# } for i in dr]
except IOError:
flash(".csv file does not exists or is not properly setup.")
return to_db
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model to handle all operations related to PayBC Account data."""
from sqlalchemy import ForeignKey
from .base_model import BaseModel
from .db import db, ma
from .payment_account import PaymentAccount
class InternalPaymentAccount(BaseModel):
"""This class manages all of the base data about Internal Account."""
__tablename__ = 'internal_payment_account'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
corp_number = db.Column(db.String(20), nullable=True)
corp_type_code = db.Column(db.String(10), ForeignKey('corp_type.code'), nullable=True)
account_id = db.Column(db.Integer, ForeignKey('payment_account.id'), nullable=True, index=True)
@classmethod
def find_by_corp_number_and_corp_type_and_account_id(cls, corp_number: str, corp_type: str, account_id):
"""Given a corp_number and corp_type, this will return payment account."""
account = None
if corp_number and corp_type:
query = cls.query.filter_by(corp_number=corp_number). \
filter_by(corp_type_code=corp_type). \
join(PaymentAccount).filter(PaymentAccount.auth_account_id == account_id)
account = query.one_or_none()
return account
class InternalPaymentAccountSchema(ma.ModelSchema): # pylint: disable=too-many-ancestors
"""Main schema used to serialize the Internal Payment System Account."""
class Meta: # pylint: disable=too-few-public-methods
"""Returns all the fields from the SQLAlchemy class."""
model = InternalPaymentAccount
|
import os
import sys
sys.path.append(os.path.abspath(r"C:\Users\power\Desktop\Project\Dev\UserDict4Papago"))
import re
from pprint import pprint
import cProfile
import MeCab
from lib.util import *
from lib.convert_dict import ConvertDictionary
from lib.papagopy.papagopy import Papagopy
def main():
# 파파고에서 안 사라지는 문자 찾아야함.. -> seperator.txt
sep = '▒▒▒'
sep_nl = '∮' # 분석기에서 사라질 \n 보완
p = Papagopy()
c = ConvertDictionary()
dictList = c.convert()
t = MeCab.Tagger()
rf = ReadFile('./example_text/t.txt')
s = rf.replace('\r', '').replace('\n', sep_nl).replace(' ', '')
a = t.parse(s).split()[:-1]
surface = a[0::2]
pos = a[1::2]
b = [(surface[i], i) for i, p in enumerate(pos) if ('固有名詞' in p) and (surface[i] in dictList)]
for sur, idx in b:
surface[idx] = f'{sep}{sur}{sep}'
pre = ''.join(surface).replace(f'{sep}{sep}', f'{sep} {sep}').replace(sep_nl, '\n KDR ')
trans = p.translate(pre, 'ko', 'ja')
post = trans.replace(f'{sep} ', f'{sep}').replace(f'{sep}{sep}', f'{sep} {sep}')
c = 0
for i, j in zip(re.findall(f'{sep}.*?{sep}', post), b):
print(f'{c} :::', i, ' -> ', dictList[j[0]])
post = post.replace(i, f'{dictList[j[0]]}')
c+=1
WriteFile(pre, 't-pre.txt') # 전처리
WriteFile(trans, 't-trans.txt') # 파파고 번역
WriteFile(post, 't-post.txt') # 후처리
if __name__ == '__main__':
main()
# import cProfile
# cProfile.run(fr"main()")
|
def mostrar_semillas(imagenC, im_O, im_B):
imagenS = np.zeros(imagenC.shape)
imagenS[:,:,0] = np.maximum(imagenC[:,:,0], im_O*255)
imagenS[:,:,1] = imagenC[:,:,1]
imagenS[:,:,2] = np.maximum(imagenC[:,:,2], im_B*255)
plt.figure(figsize=(7,7))
plt.imshow(imagenS.astype(int))
plt.show()
|
from tkinter import *
from tkinter.ttk import *
class FrameView(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.init_ui()
def init_ui(self):
self.parent.title("Modifying rows")
self.pack(fill = BOTH, expand = True)
self.rowconfigure(1, weight = 1)
self.columnconfigure(1, weight = 1)
self.columnconfigure(3, weight = 1)
label1 = Label(self, text = "Item:")
label1.grid(row = 0, column = 0, padx = 5, sticky = W)
item_entry = Entry(self)
item_entry.grid(row=0, column=1, sticky=W+E, padx=5)
label1 = Label(self, text="Quantity:")
label1.grid(row=0, column=2)
quant_entry = Entry(self)
quant_entry.grid(row=0, column=3, sticky=W+E, padx=5)
insert_button = Button(self, text="Insert",
command=lambda: self.insert_item(item_entry , quant_entry))
insert_button.grid(row=0, column=4, pady=5, padx=5)
self.tree = Treeview(self, columns=("quant"), selectmode=EXTENDED)
self.tree.heading("#0", text="Item")
self.tree.heading("#1", text="Quantity")
self.tree.column(0, anchor=E)
self.tree.grid(row=1, column=0, columnspan=5, padx=5, sticky=E+W+N+S)
remove_button = Button(self, text="Remove", command=self.remove_item)
remove_button.grid(row=2, column=2, padx=5, pady=5, sticky=W)
self.pack()
def remove_item(self):
iids = self.tree.selection()
for iid in iids:
self.tree.delete(iid)
def insert_item(self, item_entry , quant_entry):
val1 = item_entry.get()
val2 = quant_entry.get()
if (len(val1.strip()) == 0):
return
if (len(val1.strip()) == 0):
return
item_entry.delete(0, END)
quant_entry.delete(0, END)
self.tree.insert("", index=END, text=val1, values=(val2 ,))
def main():
root = Tk()
fv = FrameView(root)
root.geometry("+400+400")
root.mainloop()
if __name__ == '__main__':
main()
|
from unittest import TestCase
from fathomnet import models
from fathomnet.api import geoimages
class TestGeoImagesAPI(TestCase):
def test_find_all(self):
n_images = 5
pageable = models.Pageable(size=n_images)
results = geoimages.find_all(pageable)
self.assertIsNotNone(results)
self.assertEqual(len(results), n_images)
def test_count(self):
geo_image_constraints = models.GeoImageConstraints(
concept='Bathochordaeus',
limit=10
)
count = geoimages.count(geo_image_constraints)
self.assertIsNotNone(count)
self.assertGreater(count.count, 0)
def test_find(self):
geo_image_constraints = models.GeoImageConstraints(
concept='Bathochordaeus',
limit=10
)
results = geoimages.find(geo_image_constraints)
self.assertIsNotNone(results)
self.assertGreater(len(results), 0)
def test_find_by_image_set_upload_uuid(self):
image_set_upload_uuid = '9c891f7a-976b-4376-acf9-31681e1b3a15'
results = geoimages.find_by_image_set_upload_uuid(image_set_upload_uuid)
self.assertIsNotNone(results)
self.assertGreater(len(results), 0)
|
import os.path
import pickle
import typing as tp
from satella.coding import Monitor
from satella.coding.typing import Number
from smok.exceptions import NotReadedError, OperationFailedError
from smok.pathpoint import PathpointValueType, ValueOrExcept
from .in_memory import InMemoryPathpointDatabase
class PicklingPathpointDatabase(InMemoryPathpointDatabase):
"""
An example pathpoint database that persists it's data on disk.
It persists it's store every :meth:`~smokclient.extras.BasePathpointDatabase.checkpoint` call.
:param path: path to file containing pickled data.
"""
def __init__(self, path: str):
super().__init__()
self.__path = path
self.last_pathpoint_value = {}
if os.path.exists(path):
try:
with open(path, 'rb') as f_in:
self.__pathpoints, self.last_pathpoint_value = pickle.load(f_in)
except pickle.PickleError:
pass
def on_new_data(self, pathpoint: str, timestamp: Number,
value_or_exception: ValueOrExcept) -> None:
self.last_pathpoint_value[pathpoint] = timestamp, value_or_exception
super().on_new_data(pathpoint, timestamp, value_or_exception)
def get_current_value(self, pathpoint: str) -> tp.Tuple[Number, PathpointValueType]:
if pathpoint not in self.last_pathpoint_value:
raise NotReadedError()
val = self.last_pathpoint_value[pathpoint]
if isinstance(val[1], OperationFailedError):
raise val[1]
return val
@Monitor.synchronized
def checkpoint(self) -> None:
with open(self.__path, 'wb') as f_out:
pickle.dump((self.__pathpoints, self.last_pathpoint_value), f_out)
|
import os
import requests
from pymongo import MongoClient
from coin.models import Coin
from multiprocessing import Pool
from coinds.cassandra.coins import Coin as CassandraCoin
BINAN_API_KEY=os.environ.get("BINAN_API_KEY")
BINAN_SECRET_KEY=os.environ.get("BINAN_SECRET_KEY")
BINAN_BASE_URL=os.environ.get("BINAN_BASE_URL")
MONGO_URL=os.environ.get("MONGO_URL")
mongo = MongoClient(MONGO_URL)
from datetime import datetime
class BinancePoll(Coin):
btc_price = 0
maincoins=dict()
day = datetime.now().strftime('%m-%d-%Y')
time = datetime.now()
def write_prices(self):
return self.update_all_coins()
def find_btc(self, all_coins):
for coin in all_coins:
if coin['symbol'] == 'BTCUSDT':
return float(coin['price'])
raise ValueError('Bitcoin USD pair not found.')
def write_basepairs(self, coin_obj):
if 'USDT' in coin_obj['symbol']:
try:
basepair=Coin.objects.get(pair=coin_obj['symbol'])
except:
basepair=Coin(pair=coin_obj['symbol'])
print("NEW COIN: {0}".format(coin_obj['symbol']))
print("@" * 100)
ticker = coin_obj['symbol'].split('USDT')[0]
price = float(coin_obj['price'])
btc_price = round((price / self.btc_price), 8)
basepair.ticker = ticker
basepair.name = ticker
basepair.price = price
basepair.btc_price = btc_price
basepair.save()
#this Cassandra call doesnt belong here, i just needed a way to get more than one row in so i could test my graph on the frontend. im sorry.
CassandraCoin.create(day=self.day, name=ticker, ticker=ticker, pair=coin_obj['symbol'], icon_url="None", price=price, btc_price=btc_price, source="binance", created_at=self.time)
return ticker, price
else:
return False
def write_tradepairs(self, coin_obj):
pair = coin_obj['symbol']
if 'USDT' in pair:
return False
else:
for maincoin in self.maincoins.keys():
if maincoin in pair and pair.index(maincoin) > 0:
cutoff = pair.index(maincoin)
ticker = pair[0:cutoff]
in_terms_of = pair[cutoff:]
price = float(self.maincoins[in_terms_of]) * float(coin_obj['price'])
btc_price = round((price / self.btc_price), 8)
try:
cryptopair = Coin.objects.get(pair=pair)
except:
cryptopair = Coin(pair=pair)
cryptopair.ticker = ticker
cryptopair.name = ticker
cryptopair.price = price
cryptopair.btc_price = btc_price
cryptopair.save()
#this Cassandra call doesnt belong here, i just needed a way to get more than one row in so i could test my graph on the frontend. im sorry.
CassandraCoin.create(day=self.day, name=ticker, ticker=ticker, pair=pair, icon_url="None", price=price, btc_price=btc_price, source="binance", created_at=self.time)
print(price, ticker, coin_obj['price'], in_terms_of)
print("Coin {0} saved successfully.".format(ticker))
print("*" * 100)
else:
continue
def update_all_coins(self):
all_coins = requests.get(BINAN_BASE_URL + "/ticker/allPrices").json()
self.btc_price = self.find_btc(all_coins)
#BASE COINS: BTC, ETH, LTC, NEO, BNB
for coin in all_coins:
if self.write_basepairs(coin):
self.maincoins[self.write_basepairs(coin)[0]] = self.write_basepairs(coin)[1]
##TRADING pairs
for coin in all_coins:
self.write_tradepairs(coin)
return all_coins, len(all_coins)
def update_coins(self):
r = requests.get(BINAN_BASE_URL + "/ticker/allPrices")
all_coins = r.json()
for coin in all_coins:
if 'USDT' in coin['symbol']:
# Coin.objects.find_or_create()
print (coin['symbol'].split('USDT')[0], float(coin['price']))
else:
print (coin['symbol'], coin['price'])
continue
return "finished"
|
class WikiCache(object):
def __init__(self):
self.cache = {}
def __contains__(self, key):
return key in self.cache
def __getitem__(self, key):
return self.cache[key]
|
import asyncio
import statistics
import Battle_Utils
import Config
import math
import discord
import datetime
from discord.ext import commands
import Utils
import random
import time
def get_numbers(number_string):
final_string = ""
for character in number_string:
try:
int(character)
final_string += character
except:
continue
return int(final_string)
class Bosses(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.battling_users = []
self.waiting_users = []
self.active_channels = []
self.bosses = 0
async def drops(self, message):
type = random.choice(['ruby', 'coin', 'chest'])
if type == 'ruby':
amount = random.randint(1, 3)
embed = discord.Embed(color=Config.MAINCOLOR, title="Rubies",
description="There are " + str(amount) + " " + Config.EMOJI[
'ruby'] + " on the ground. React first to pick them up!")
ruby = self.bot.get_emoji(676177832963211284)
msg = await message.channel.send(embed=embed)
def check(reaction, user):
return reaction.message.id == msg.id and reaction.me and Utils.get_account(user.id) is not None
await msg.add_reaction(ruby)
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=120, check=check)
except asyncio.TimeoutError:
await msg.delete()
return
Config.USERS.update_one({'user_id': user.id}, {'$inc': {'rubies': amount}})
await msg.edit(embed=discord.Embed(color=Config.MAINCOLOR, title="Rubies picked up by " + user.name,
description=user.name + " has picked up the " + str(amount) + " " +
Config.EMOJI['ruby'] + " rubies"))
elif type == 'coin':
amount = random.randint(5, 10)
embed = discord.Embed(color=Config.MAINCOLOR, title="Coins",
description="There are " + str(amount) + " " + Config.EMOJI[
'coin'] + " on the ground. React first to pick them up!")
emoji = self.bot.get_emoji(676181520062349322)
msg = await message.channel.send(embed=embed)
def check(reaction, user):
return reaction.message.id == msg.id and reaction.me and Utils.get_account(user.id) is not None
await msg.add_reaction(emoji)
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=120, check=check)
except asyncio.TimeoutError:
await msg.delete()
return
Config.USERS.update_one({'user_id': user.id}, {'$inc': {'coins': amount}})
await msg.edit(embed=discord.Embed(color=Config.MAINCOLOR, title="Coins picked up by " + user.name,
description=user.name + " has picked up the " + str(amount) + " " +
Config.EMOJI['coin'] + " coins"))
elif type == 'xp':
amount = random.randint(20, 50)
embed = discord.Embed(color=Config.MAINCOLOR, title="XP",
description="There is " + str(amount) + " " + Config.EMOJI[
'xp'] + " on the ground. React first to pick it up!")
emoji = self.bot.get_emoji(730357877310488636)
msg = await message.channel.send(embed=embed)
def check(reaction, user):
return reaction.message.id == msg.id and reaction.me and Utils.get_account(user.id) is not None
await msg.add_reaction(emoji)
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=120, check=check)
except asyncio.TimeoutError:
await msg.delete()
return
Config.USERS.update_one({'user_id': user.id}, {'$inc': {'xp': amount}})
await msg.edit(embed=discord.Embed(color=Config.MAINCOLOR, title="XP picked up by " + user.name,
description=user.name + " has picked up " + str(amount) + " " +
Config.EMOJI['xp'] + " XP"))
elif type == 'chest':
amount = 1
embed = discord.Embed(color=Config.MAINCOLOR, title="Chest", description="There is a " + Config.EMOJI[
'chest'] + " on the ground. React first to pick it up!")
emoji = self.bot.get_emoji(671574326364995595)
msg = await message.channel.send(embed=embed)
def check(reaction, user):
return reaction.message.id == msg.id and reaction.me and Utils.get_account(user.id) is not None
await msg.add_reaction(emoji)
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=120, check=check)
except asyncio.TimeoutError:
await msg.delete()
return
Config.USERS.update_one({'user_id': user.id}, {'$inc': {'chests': amount}})
await msg.edit(embed=discord.Embed(color=Config.MAINCOLOR, title="Chest picked up by " + user.name,
description=user.name + " has picked up the " + Config.EMOJI[
'chest'] + " Chest"))
elif type == 'item':
item = random.choice(list(Config.ITEMS.find({'cost': {'$lt': 6000}})))
item['level'] = 1
embed = discord.Embed(color=Config.MAINCOLOR, title="Item",
description="There is a " + item['emoji'] + " **" + item[
'name'] + "** on the ground. React first to pick it up!")
emoji = self.bot.get_emoji(get_numbers(item['emoji']))
msg = await message.channel.send(embed=embed)
def check(reaction, user):
return reaction.message.id == msg.id and reaction.me and Utils.get_account(user.id) is not None
did_pickup = False
await msg.add_reaction(emoji)
while not did_pickup:
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=120, check=check)
except asyncio.TimeoutError:
await msg.delete()
return
user_account = Utils.get_account(user.id)
if user_account is not None:
for i in user_account['inventory']:
if i['name'] == item['name']:
await reaction.remove(user)
a_msg = await message.channel.send(user.mention + " You cannot collect an item you already have!")
await a_msg.delete(delay=20)
continue
Config.USERS.update_one({'user_id': user.id}, {'$push': {'inventory': item}})
await msg.edit(embed=discord.Embed(color=Config.MAINCOLOR,
title=item['emoji'] + " " + item['name'] + " picked up by " + user.name,
description=user.name + " has picked up the " + item['emoji'] + " **" +
item['name'] + "**"))
return
@commands.command()
async def force_drop(self, ctx):
if ctx.author.id not in Config.OWNERIDS:
return
await self.drops(ctx.message)
def change_turn(self, turn, max, monster):
if monster["turn"]:
turn += 1
if turn >= max:
turn = 0
monster["turn"] = False
else:
monster["turn"] = True
return turn
async def construct_embeds(self, match, turn, message, monster):
title = "Boss fight against " + monster['name']
embed = Battle_Utils.construct_boss_embed(match, turn, monster, title)
await message.edit(embed=embed)
async def construct_embeds_with_message(self, message, monster, turn, match, text):
title = "Boss fight against " + monster['name']
embed = Battle_Utils.construct_boss_embed_with_message(match, turn, monster, title, text)
await message.edit(embed=embed)
async def boss_thread(self, match, message, monster):
Config.LOGGING.info("Boss thread started: Current threads: " + str(self.bosses))
match_cache = match.copy()
await message.clear_reactions()
monster['health'] = monster['stats']['health']
monster['mana'] = monster['stats']['endurance']
embed = discord.Embed(title="Boss found", color=Config.MAINCOLOR,
description="[jump](" + message.jump_url + ")")
one_message = await message.channel.send(", ".join(x['user'].mention for x in match), embed=embed)
await one_message.delete(delay=10)
monster['effects'] = []
monster["turn"] = False
for user in match:
self.battling_users.append({"id": user['user'].id, "time": time.time()})
user['health'] = user['account']['stats']['health']
user['mana'] = user['account']['stats']['endurance']
user['effects'] = []
user['afk'] = 0
turn = random.randint(0, len(match) - 1)
if len(match) == 1:
if match[0]['account']['slots'][0] is not None:
await message.add_reaction("1️⃣")
if match[0]['account']['slots'][1] is not None:
await message.add_reaction("2️⃣")
if match[0]['account']['slots'][2] is not None:
await message.add_reaction("3️⃣")
if match[0]['account']['slots'][3] is not None:
await message.add_reaction("4️⃣")
if len(match[0]['account']['slots']) >= 5:
if match[0]['account']['slots'][4] is not None:
await message.add_reaction("🔆")
else:
await message.add_reaction("1️⃣")
await message.add_reaction("2️⃣")
await message.add_reaction("3️⃣")
await message.add_reaction("4️⃣")
await message.add_reaction("🔆")
await message.add_reaction("💤")
await message.add_reaction("🏳️")
a_turn = False
while len(match) > 0 and monster['health'] > 0 and monster['mana'] > 0:
restart = False
for user in match:
if user['health'] <= 0 or user['mana'] <= 0 or user['afk'] > 2:
match.remove(user)
turn -= 1
restart = True
if turn < 0:
turn = 0
if restart:
continue
# calculate effects for beginning of round
for _ in match:
effects_remove = []
for effect in _['effects']:
_[effect['type']] -= effect['amount']
_[effect['type']] = round(_[effect['type']], 1)
effect['turns'] -= 1
if effect['turns'] < 1:
effects_remove.append(effect)
for effect in effects_remove:
_['effects'].remove(effect)
# restart if needed after effects applied
restart = False
for user in match:
if user['health'] <= 0 or user['mana'] <= 0 or user['afk'] > 2:
if turn >= match.index(user):
turn -= 1
match.remove(user)
restart = True
if restart:
continue
# effects for monster
effects_remove = []
for effect in monster['effects']:
monster[effect['type']] -= effect['amount']
monster[effect['type']] = round(monster[effect['type']], 1)
effect['turns'] -= 1
if effect['turns'] < 1:
effects_remove.append(effect)
for effect in effects_remove:
monster['effects'].remove(effect)
if not monster["turn"]:
resource = 'mana'
resource_number = 3
if match[turn]['ability'] is not None:
if match[turn]['ability'] == "Healing Blood":
resource = 'health'
resource_number = 5
elif match[turn]['ability'] == "Inner Light":
resource_number = 6
if a_turn is True:
resource_number = 0
a_turn = False
# Check if the user is stunned
elif match[turn]['stunned']:
resource_number = 0
match[turn][resource] += resource_number
else:
abilities = []
for user in match:
abilities.append(user["ability"])
if "Stagnation" not in abilities and not monster['stunned']:
monster['mana'] += 8
elif "Stagnation" in abilities and not monster['stunned']:
monster['mana'] += 4
if monster['health'] <= 0 or monster['mana'] <= 0:
break
# Make sure player/boss stats are all fine
Battle_Utils.match_check(match, monster)
await self.construct_embeds(match, turn, message, monster)
if monster['stunned']:
monster['stunned'] = False
await asyncio.sleep(3)
turn = self.change_turn(turn, len(match_cache), monster)
continue
# check if monster's turn
if monster["turn"]: # turn == len(match):
# simulate monster thinking lol
if len(match_cache) == 1:
if match_cache[0]["account"]["battles"]["bosses"] < 3:
await asyncio.sleep(5)
else:
await asyncio.sleep(3)
else:
await asyncio.sleep(3)
spell = Battle_Utils.pick_spell(monster)
if spell is not None:
victim = random.randint(0, len(match) - 1)
monster, match[victim], text = Battle_Utils.spell_effect(spell, monster, match[victim], True)
await self.construct_embeds_with_message(message, monster, turn, match, text)
if match[victim]['ability'] is not None:
if match[victim]["ability"] == "Glass Armor":
ability = Utils.get_ability(match[victim]['account']['slots'][4])
match[victim]["ability"] = "Glass Armor Done"
match[victim]['account']['stats']['defense'] -= ability['effect']
match, monster = Battle_Utils.match_check(match, monster)
turn = self.change_turn(turn, len(match), monster)
if len(match_cache) == 1:
if match_cache[0]["account"]["battles"]["bosses"] < 3:
await asyncio.sleep(5)
else:
await asyncio.sleep(3)
else:
await asyncio.sleep(3)
for user in match:
if user['health'] <= 0 or user['mana'] <= 0 or user['afk'] > 2:
match.remove(user)
turn -= 1
continue
try:
# Check if the user is stunned
if match[turn]['stunned']:
match[turn]['stunned'] = False
await asyncio.sleep(3)
turn = self.change_turn(turn, len(match_cache), monster)
continue
reaction_dict = {'1️⃣': 0, '2️⃣': 1, '3️⃣': 2, '4️⃣': 3, '🔆': 4}
def check(payload):
if payload.user_id == match[turn]['user'].id and payload.message_id == message.id:
if str(payload.emoji) in reaction_dict.keys():
return match[turn]['account']['slots'][reaction_dict[str(payload.emoji)]] is not None
else:
return True
else:
return False
temp_msg = await message.channel.fetch_message(message.id)
reaction = None
for temp_reaction in temp_msg.reactions:
users = await temp_reaction.users().flatten()
if match[turn]['user'].id in [x.id for x in users] and temp_reaction.me:
reaction = temp_reaction
try:
await temp_reaction.remove(match[turn]['user'])
except:
await Config.LOGGING.error("Cannot remove emoji (not big deal)")
if reaction is None:
payload = await self.bot.wait_for('raw_reaction_add', timeout=30.0, check=check)
reaction = payload.emoji
try:
await message.remove_reaction(payload.emoji, match[turn]['user'])
except:
await Config.LOGGING.error("Cannot remove emoji (not big deal)")
if str(reaction) == "💤":
turn = self.change_turn(turn, len(match), monster)
continue
elif str(reaction) == "🏳️":
match[turn]['health'] = 0
match[turn]['mana'] = 0
turn = self.change_turn(turn, len(match), monster)
for user in match:
if user['health'] <= 0 or user['mana'] <= 0 or user['afk'] > 2:
match.remove(user)
turn -= 1
continue
elif str(reaction) == "🔆" and match[turn]["ability"] is not None:
a_turn = True
elif str(reaction) == "🔆" and match[turn]["ability"] is None:
ability = Utils.get_ability(match[turn]['account']['slots'][4])
match, text, monster = Battle_Utils.ability_effect(ability, match, turn, monster)
await self.construct_embeds_with_message(message, monster, turn, match, text)
# Only change turn if it's supposed to
if ability["name"] not in ["Amplify"]:
turn = self.change_turn(turn, len(match), monster)
if len(match_cache) == 1:
if match_cache[0]["account"]["battles"]["bosses"] < 3:
await asyncio.sleep(5)
else:
await asyncio.sleep(3)
else:
await asyncio.sleep(3)
continue
elif str(reaction) in ['1️⃣', '2️⃣', '3️⃣', '4️⃣']:
spell = Utils.get_spell(match[turn]['account']['class'], match[turn]['account']['slots'][reaction_dict[str(reaction)]])
match[turn], monster, text = Battle_Utils.spell_effect(spell, match[turn], monster, True)
await self.construct_embeds_with_message(message, monster, turn, match, text)
# Remove amplify effect
if match[turn]["ability"] == "Amplify":
ability = Utils.get_ability(match[turn]['account']['slots'][4])
match[turn]["ability"] = "Amplify Done"
match[turn]['account']['stats']['strength'] -= ability['effect']
match = Battle_Utils.match_check(match)
turn = self.change_turn(turn, len(match), monster)
if len(match_cache) == 1:
if match_cache[0]["account"]["battles"]["bosses"] < 3:
await asyncio.sleep(5)
else:
await asyncio.sleep(3)
else:
await asyncio.sleep(3)
for user in match:
if user['health'] <= 0 or user['mana'] <= 0 or user['afk'] > 2:
match.remove(user)
turn -= 1
continue
except Exception as e:
if isinstance(e, asyncio.TimeoutError) and turn != len(match):
embed = discord.Embed(title="AFK WARNING", color=Config.MAINCOLOR,
description="Your boss fight is still going! You lost this turn because you took over 30 seconds to choose a spell.\n\n[Click to go to fight](" + message.jump_url + ")")
timeout_msg = await message.channel.send(match[turn]['user'].mention, embed=embed)
await timeout_msg.delete(delay=20)
match[turn]['afk'] += 1
for user in match:
if user['health'] <= 0 or user['mana'] <= 0 or user['afk'] > 2:
match.remove(user)
turn -= 1
turn = self.change_turn(turn, len(match), monster)
continue
elif isinstance(e, discord.errors.NotFound):
return
else:
match[turn]['mana'] -= 3
try:
await message.clear_reactions()
except:
await Config.LOGGING.error("Cannot remove emoji (not big deal)")
for player in match_cache:
broken_items = Utils.decrease_durability(player['account']['user_id'])
if len(broken_items) > 0:
embed = discord.Embed(title="Broken Tools", description=player['user'].mention + "! Your " + " and ".join([x['name'] for x in broken_items]) + " broke!", color=Config.MAINCOLOR)
await message.channel.send(content=player['user'].mention, embed=embed)
if monster['health'] > 0 and monster['mana'] > 0:
embed = discord.Embed(color = Config.MAINCOLOR, description="**"+monster['name']+" Has bested the group...**")
await message.edit(embed=embed)
else:
if len(match_cache) == 1:
if match_cache[0]["account"]["battles"]["bosses"] < 3:
if match_cache[0]["account"]["battles"]["bosses"] == 0:
desc = "<:1_:786197490860818432><:0_:786197490466160641><:0_:786197490466160641>\nNext boss is going to be even stronger, but you'll get more rewards!"
amount = 10
coins_amount = 20
elif match_cache[0]["account"]["battles"]["bosses"] == 1:
desc = "<:1_:786197490860818432><:1_:786197490860818432><:0_:786197490466160641>\nNext boss is going to be even stronger, but you'll get more rewards!"
amount = 20
coins_amount = 40
else:
desc = "<:1_:786197490860818432><:1_:786197490860818432><:1_:786197490860818432>\nGood job, now it's time for the big boi leagues. From now on you can summon bosses where others can join as well. This is the end of the dummy bosses but you can always get more info on the wiki, tutorial or help command. Or join our Discord server for more guidance! **Good luck out there champion!**\n\n"
amount = 50
coins_amount = 50
embed = discord.Embed(
title="Dummy bot defeat!",
description="**GOOD JOB!** "
+ "You did it, you beat one of your first bosses! "
+ "<:E:730362458547421256> Now it's time to get your loot, you got:\n"
+ "+" + str(amount) + " "+ Config.EMOJI['key'] + "\n+" + str(coins_amount) + " " + Config.EMOJI['coin'] + "\n\n"
+ "You're getting the hang of it <:L:730356470905831434>\n"
+ "__Training bosses:__ " + desc
+ "<:E:730362457541050478><:E:730362455716397127> If you want to continue, you can summon another boss or if you want more info you can check the wiki command!",
color = Config.OK
)
embed.set_thumbnail(url="https://media.discordapp.net/attachments/736320244649295894/786213274386694194/SPELL_Damage.png?width=450&height=430")
await message.edit(embed=embed)
for user in match_cache:
user['account'] = Utils.get_account(user['user'].id)
user['account']['keys'] += amount
while user['account']['keys'] > 9:
user['account']['keys'] -= 10
user['account']['chests'] += 1
Config.USERS.update_one({'user_id': user['user'].id}, {'$set': {'chests': user['account']['chests'], 'keys': user['account']['keys']}, '$inc': {'coins': coins_amount, "battles.bosses": 1}})
users = []
for user in match_cache:
users.append(user["user"].id)
i = 0
while i != len(self.battling_users):
if self.battling_users[i]["id"] in users:
self.battling_users.pop(i)
else:
i += 1
if message.channel.id in self.active_channels:
self.active_channels.remove(message.channel.id)
self.bosses -= 1
return
if not monster['titan']:
amount = random.randint(math.floor(0.3 * len(match_cache)) * 2 + 3, math.floor(0.3 * len(match_cache)) * 2 + 6)
coins_amount = random.randint(len(match_cache) * 3, (len(match_cache) * 4) + 1)
else:
amount = random.randint(math.floor(0.5 * len(match_cache)) * 2 + 5, math.floor(0.5 * len(match_cache)) * 2 + 9)
coins_amount = random.randint(len(match_cache) * 4, (len(match_cache) * 5) + 1)
mystring = str(amount) + " "+ Config.EMOJI['key'] + "\n+" + str(coins_amount) + " " + Config.EMOJI['coin']
for user in match_cache:
user['account'] = Utils.get_account(user['user'].id)
user['account']['keys'] += amount
while user['account']['keys'] > 9:
user['account']['keys'] -= 10
user['account']['chests'] += 1
Config.USERS.update_one({'user_id': user['user'].id}, {'$set': {'chests': user['account']['chests'], 'keys': user['account']['keys']}, '$inc': {'coins': coins_amount}})
if monster['health'] <= 0:
embed = discord.Embed(color = Config.MAINCOLOR, description="**Congratulations! "+monster['name']+" has been killed!**\n\nEveryone gets:\n\n+" + mystring)
elif monster['mana'] <= 0:
embed = discord.Embed(color = Config.MAINCOLOR, description="**Congratulations! "+monster['name']+" has fainted!**\n\nEveryone gets:\n\n+" + mystring)
else:
embed = discord.Embed(color=Config.MAINCOLOR, description="**Congratulations! " + monster['name'] + " has been destroyed completely!**\n\nEveryone gets:\n\n+ " + mystring)
await message.edit(embed=embed)
users = []
for user in match_cache:
users.append(user["user"].id)
i = 0
while i != len(self.battling_users):
if self.battling_users[i]["id"] in users:
self.battling_users.pop(i)
else:
i += 1
if message.channel.id in self.active_channels:
self.active_channels.remove(message.channel.id)
self.bosses -= 1
@commands.command()
@commands.bot_has_permissions(add_reactions=True, manage_messages=True, send_messages=True, external_emojis=True)
async def boss(self, ctx):
msg, account = await Utils.get_account_lazy(self.bot, ctx, ctx.author.id)
if account is None:
return
if not Config.OPEN_QUEUES:
embed = discord.Embed(color=Config.ERRORCOLOR, title="Enchanted Maintenance",
description="Queuing is disabled at the moment. Enchanted is under Maintenance.")
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
return
if ctx.author.id in self.waiting_users:
embed=discord.Embed(color=Config.MAINCOLOR, title="Error Finding Boss", description="You are already searching for a boss. Please finish that battle first.")
if msg is None:
await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
return
if ctx.channel.id in self.active_channels:
embed=discord.Embed(color=Config.MAINCOLOR, title="Error Finding Boss", description="You are already battling a boss in this channel. Please finish that battle first.")
if msg is None:
await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
return
i = 0
while i != len(self.battling_users):
if self.battling_users[i]["id"] == ctx.author.id:
if (self.battling_users[i]["time"]+600) > time.time():
embed=discord.Embed(color=Config.MAINCOLOR, title="Error entering Queue", description="You are already battling a boss. Please finish that battle first.")
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
return
else:
self.battling_users.pop(i)
continue
else:
i += 1
if account["battles"]["bosses"] > 2:
user_names = []
user_ids = []
quote = "*\""+Battle_Utils.quotes()+"\"*"
self.waiting_users.append(ctx.author.id)
self.active_channels.append(ctx.channel.id)
user_ids.append(ctx.author.id)
user_names.append(ctx.author.name)
users_names = ""
for user_n in user_names:
users_names += user_n+"\n"
embed=discord.Embed(color=Config.MAINCOLOR, title=ctx.author.name + " Is searching for a boss<a:dots:715134569355018284>", description=f"The battle will begin in 1 minute. React to join.\n⚔️ **Players ({str(len(user_ids))}/10):**\n{users_names}\n{quote}", timestamp=datetime.datetime.utcnow() + datetime.timedelta(minutes=1))
embed.set_footer(text='React with the ✔️ to join | starting at ')
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/736320366116470815/779302235427438602/fire_1f525.png")
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
await msg.add_reaction("✔️")
await msg.add_reaction("❌")
await msg.add_reaction("⏩")
countdown = datetime.datetime.utcnow() + datetime.timedelta(minutes=1)
def check(reaction, user):
return user.id != self.bot.user.id and reaction.message.id == msg.id
while datetime.datetime.utcnow() < countdown:
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=10, check=check)
if str(reaction) == "⏩" and user.id == ctx.author.id:
break
elif str(reaction) == "❌" and user.id == ctx.author.id:
await msg.clear_reactions()
await msg.edit(embed=discord.Embed(title="Boss Search canceled", color = Config.MAINCOLOR, description=ctx.author.name + " has disbanded the search..."))
if ctx.channel.id in self.active_channels:
self.active_channels.remove(ctx.channel.id)
for u in user_ids:
if u in self.waiting_users:
self.waiting_users.remove(u)
return
elif Utils.get_account(user.id) is None:
await reaction.remove(user)
error_msg = await ctx.send(embed=discord.Embed(title="You don't have an account", color = Config.MAINCOLOR, description="Type `]profile` to choose a class and react again to join the battle!"))
await error_msg.delete(delay=20)
continue
elif user.id in self.waiting_users and user.id != ctx.author.id:
if user.id not in user_ids:
error_msg = await ctx.send(content=user.mention, embed=discord.Embed(title="Already searching", color = Config.MAINCOLOR, description="You are already searching for a boss"))
await error_msg.delete(delay=20)
await reaction.remove(user)
continue
elif user.id in self.battling_users and user.id != ctx.author.id:
error_msg = await ctx.send(content=user.mention, embed=discord.Embed(title="Already battling", color = Config.MAINCOLOR, description="You are already battling a boss"))
await error_msg.delete(delay=20)
await reaction.remove(user)
continue
elif reaction.message.id != msg.id or not reaction.me:
continue
if str(reaction) != "✔️":
await reaction.remove(user)
continue
await reaction.remove(user)
if user.id in user_ids:
user_ids.remove(user.id)
user_names.remove(user.name)
self.waiting_users.remove(user.id)
else:
if len(user_ids) > 9:
error_msg = await ctx.send(content=user.mention, embed=discord.Embed(title="Already full", color = Config.MAINCOLOR, description="The party is full. Only 10 people can fight a single boss."))
continue
user_ids.append(user.id)
user_names.append(user.name)
self.waiting_users.append(user.id)
users_names = ""
for user in user_names:
users_names += user+"\n"
embed=discord.Embed(color=Config.MAINCOLOR, title=ctx.author.name + " Is searching for a boss<a:dots:715134569355018284>", description=f"The battle will begin in 1 minute. React to join.\n⚔️ **Players ({str(len(user_ids))}/10):**\n{users_names}\n{quote}", timestamp=datetime.datetime.utcnow() + datetime.timedelta(minutes=1))
embed.set_footer(text='React with the ✔️ to join | starting at ')
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/736320366116470815/779302235427438602/fire_1f525.png")
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
except asyncio.TimeoutError:
continue
# temp_msg = await ctx.channel.fetch_message(msg.id)
# users = []
# for temp_reaction in temp_msg.reactions:
# if str(temp_reaction) == "✔️":
# users = await temp_reaction.users().flatten()
# if ctx.author.id not in [x.id for x in users]:
# users.append(ctx.author)
if len(user_ids) == 0:
await msg.clear_reactions()
await msg.edit(embed=discord.Embed(title="Boss Search canceled", color=Config.MAINCOLOR, description="No one was brave enough to challenge a boss..."))
if ctx.channel.id in self.active_channels:
self.active_channels.remove(ctx.channel.id)
for u in user_ids:
if u in self.waiting_users:
self.waiting_users.remove(u)
return
match = []
for user in user_ids:
user = await self.bot.fetch_user(user)
if user.id != self.bot.user.id:
account = Utils.get_account(user.id)
armor = None
weapon = None
if account["weapon"] is not None:
weapon = Utils.get_item(account['weapon']["name"])
if account["armor"] is not None:
armor = Utils.get_item(account['armor']["name"])
match.append({'ability': None, 'weapon': weapon, 'armor': armor, 'user': user, 'stunned': False,
'account': account})
monster_class = random.choice(list(Config.CLASSES.find({})))
spells = list(Config.SPELLS.find({'class': monster_class['name'], 'type': {'$nin': ['STUN']}}).limit(6))
for i in range(len(match)):
if match[i]['account']['armor'] is not None:
match[i]['account']['stats']['defense'] += Utils.calc_item_effect(match[i]["account"]["armor"], match[i]['armor'])
if match[i]['account']['weapon'] is not None:
match[i]['account']['stats']['strength'] += Utils.calc_item_effect(match[i]["account"]["weapon"], match[i]['weapon'])
if random.randint(0, 7) == 0:
strength = round(3 + (statistics.mean(x['account']['stats']['defense'] for x in match)
* round(random.uniform(1.1, 1.4), 2)))
defense = round(2 * (statistics.mean(x['account']['stats']['strength'] for x in match)
* round(random.uniform(0.6, 0.7), 2)))
monster = {'name': Battle_Utils.make_monster_name(True),
'titan': True, 'spells': spells,
'armor': {'name': "Titan's Breastplate", 'effect': random.randint(3, 9),
'emoji': "<:helmet:675820506284556306>"},
'weapon': {'name': "Aged Sword", 'effect': random.randint(3, 9),
'emoji': "<:battle:670882198450339855>"},
'stats': {'health': round(80 + (math.sqrt(len(match)) * 15)), 'strength': strength,
'defense': defense, 'endurance': random.randint(140, 170) + len(match) * 12},
'stunned': False}
else:
strength = round(3 + (statistics.mean(x['account']['stats']['defense'] for x in match)
* round(random.uniform(1, 1.2), 2)))
defense = round(2 * (statistics.mean(x['account']['stats']['strength'] for x in match)
* round(random.uniform(0.5, 0.65), 2)))
monster = {'name': Battle_Utils.make_monster_name(),
'titan': False, 'spells': spells,
'stats': {'health': round(60 + (math.sqrt(len(match)) * 10)), 'strength': strength,
'defense': defense, 'endurance': random.randint(90, 140) + len(match) * 10},
'stunned': False}
for user in match:
if user["account"]["user_id"] in self.waiting_users:
self.waiting_users.remove(user["account"]["user_id"])
Config.USERS.update_one({'user_id': user["account"]["user_id"]}, {'$inc': {'battles.bosses': 1}})
else:
if account["battles"]["bosses"] == 0:
desc = "<:0_:786197490466160641><:0_:786197490466160641><:0_:786197490466160641>"
spells = [{'name': 'Witchers Wind', 'class': 'Paladin', 'id': 0, 'damage': 10, 'scaling': 1.4, 'emoji': '<:D:761582483434242118>', 'cost': 20, 'type': 'DAMAGE'}, {'name': 'Ceremony of Absorption', 'class': 'Paladin', 'id': 1, 'damage': 5, 'scaling': 1.0, 'emoji': '<:H:761582482809159693>', 'cost': 15, 'type': 'HEAL'}]
monster = {'name': Battle_Utils.make_monster_name(), 'titan': False, 'spells': spells, 'stats': {'health': 40, 'strength': 1, 'defense': 1, 'endurance': 60}, 'stunned': False}
if account["battles"]["bosses"] == 1:
desc = "<:1_:786197490860818432><:0_:786197490466160641><:0_:786197490466160641>\nWATCH OUT FOR MANA, you can die if it goes below 0"
spells = [{'name': 'Hymn of Rage', 'class': 'Druid', 'id': 0, 'damage': 6, 'scaling': 1.3, 'emoji': '<:D:761582483434242118>', 'cost': 12, 'type': 'DAMAGE'}, {'name': 'Silence', 'class': 'Druid', 'id': 1, 'damage': 3, 'scaling': 1, 'emoji': '<:P:761582482708889631>', 'cost': 10, 'type': 'PEN'}, {'name': 'Flood', 'class': 'Druid', 'id': 4, 'damage': 10, 'scaling': 1, 'emoji': '<:H:761582482809159693>', 'cost': 40, 'type': 'HEAL'}, {'name': 'Mystic Burn', 'class': 'Druid', 'id': 6, 'damage': 10, 'scaling': 1.3, 'emoji': '<:D:761582483434242118>', 'cost': 23, 'type': 'DAMAGE'}]
monster = {'name': Battle_Utils.make_monster_name(), 'titan': False, 'spells': spells, 'stats': {'health': 50, 'strength': 2, 'defense': 2, 'endurance': 80}, 'stunned': False}
if account["battles"]["bosses"] == 2:
desc = "<:1_:786197490860818432><:1_:786197490860818432><:0_:786197490466160641>\nWATCH OUT FOR MANA, you can die if it goes below 0"
spells = [{'name': 'Revitalize', 'class': 'Arcane', 'id': 7, 'damage': 4, 'scaling': 1, 'emoji': '<:D:761582483237371914>', 'cost': 0, 'type': 'DRAIN'}, {'name': 'Seism', 'class': 'Arcane', 'id': 1, 'damage': 15, 'scaling': 1, 'emoji': '<:D:761582483237371914>', 'cost': 10, 'type': 'DRAIN'}, {'name': 'Upbringing', 'class': 'Arcane', 'id': 3, 'damage': 3, 'scaling': 1, 'emoji': '<:P:761582482708889631>', 'cost': 20, 'type': 'PEN'}, {'name': 'Void', 'class': 'Arcane', 'id': 5, 'damage': 6, 'scaling': 1.6, 'emoji': '<:D:761582483434242118>', 'cost': 30, 'type': 'DAMAGE'}, {'name': 'Depths', 'class': 'Arcane', 'id': 0, 'damage': 4, 'scaling': 1.5, 'emoji': '<:D:761582483434242118>', 'cost': 16, 'type': 'DAMAGE'}, {'name': 'Mirage', 'class': 'Arcane', 'id': 4, 'damage': 1, 'scaling': 1, 'emoji': '<:A:761582483153354752>', 'cost': 30, 'type': 'ARMOR'}]
monster = {'name': Battle_Utils.make_monster_name(), 'titan': True, 'spells': spells, 'stats': {'health': 70, 'strength': 3, 'defense': 1, 'endurance': 80}, 'stunned': False}
embed = discord.Embed(
title="Time to show you the ropes!",
description="**You're about to start one of your first bosses!** "
+ "How exciting! <:E:730362458547421256> The first three solo bosses you do are againt our training dummies, "
+ "designed to show you how to beat the bigger monsters \:)\n\n"
+ "__Training bosses:__ " + desc + "\n"
+ "If you're not sure how certain things work yet, don't worry. You'll figure it out! <:L:730356470905831434>\n"
+ "<:E:730362457541050478><:E:730362455716397127> If you're ready to rumble, click on the checkmark ✔",
color = 0xffcd00
)
embed.set_thumbnail(url="https://media.discordapp.net/attachments/736320244649295894/786213274386694194/SPELL_Damage.png?width=450&height=430")
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
await msg.add_reaction("✔️")
await msg.add_reaction("❌")
countdown = datetime.datetime.utcnow() + datetime.timedelta(minutes=1)
def check(reaction, user):
return user.id != self.bot.user.id and reaction.message.id == msg.id
while datetime.datetime.utcnow() < countdown:
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=10, check=check)
if str(reaction) == "❌" and user.id == ctx.author.id:
await msg.clear_reactions()
await msg.edit(embed=discord.Embed(title="Boss Search canceled", color = Config.MAINCOLOR, description=ctx.author.name + " has disbanded the search..."))
# if ctx.channel.id in self.active_channels:
# self.active_channels.remove(ctx.channel.id)
# for u in user_ids:
# if u in self.waiting_users:
# self.waiting_users.remove(u)
return
elif user.id in self.waiting_users and user.id != ctx.author.id:
if user.id not in self.battling_users:
error_msg = await ctx.send(content=user.mention, embed=discord.Embed(title="Already searching", color = Config.MAINCOLOR, description="You are already searching for a boss"))
await error_msg.delete(delay=20)
await reaction.remove(user)
continue
elif user.id in self.battling_users and user.id != ctx.author.id:
error_msg = await ctx.send(content=user.mention, embed=discord.Embed(title="Already battling", color = Config.MAINCOLOR, description="You are already battling a boss"))
await error_msg.delete(delay=20)
await reaction.remove(user)
continue
elif reaction.message.id != msg.id or not reaction.me:
continue
elif str(reaction) == "✔️" and user.id == ctx.author.id:
await msg.clear_reactions()
break
except asyncio.TimeoutError:
continue
match = []
if ctx.author.id != self.bot.user.id:
account = Utils.get_account(ctx.author.id)
armor = None
weapon = None
if account["weapon"] is not None:
weapon = Utils.get_item(account['weapon']["name"])
if account["armor"] is not None:
armor = Utils.get_item(account['armor']["name"])
match.append({'ability': None, 'weapon': weapon, 'armor': armor, 'user': user, 'account': account,
'stunned': False})
if 'weapon' in monster.keys():
monster['stats']['strength'] += monster['weapon']['effect']
if 'armor' in monster.keys():
monster['stats']['defense'] += monster['armor']['effect']
monster['stats']['strength'] = round(monster['stats']['strength'])
monster['stats']['defense'] = round(monster['stats']['defense'])
monster['stats']['health'] = round(monster['stats']['health'])
self.bosses += 1
match_copy = match.copy()
try:
await self.boss_thread(match, msg, monster)
except Exception as e:
users = []
for user in match_copy:
users.append(user["user"].id)
i = 0
while i != len(self.battling_users):
if self.battling_users[i]["id"] in users:
self.battling_users.pop(i)
else:
i += 1
if ctx.channel.id in self.active_channels:
self.active_channels.remove(ctx.channel.id)
self.bosses -= 1
raise e
@boss.error
async def boss_error(self, error, ctx):
if isinstance(error, commands.BotMissingPermissions):
await ctx.send(embed=discord.Embed(title="Uh oh..", description="I'm missing some permissions, please make sure i have the following:\n\nadd_reactions, manage_messages, send_messages, external_emojis"), color = Config.ERRORCOLOR)
def setup(bot):
bot.add_cog(Bosses(bot))
|
"""Test your system from the command line."""
import getpass
import logging
import sys
from total_connect_client.client import TotalConnectClient
logging.basicConfig(filename="test.log", level=logging.DEBUG)
if len(sys.argv) != 3:
print("usage: username location1=usercode1,location2=usercode2 \n")
sys.exit()
USERNAME = sys.argv[1]
USERCODES = dict(x.split("=") for x in sys.argv[2].split(","))
PASSWORD = getpass.getpass()
TC = TotalConnectClient(USERNAME, PASSWORD, USERCODES)
for location_id in TC.locations:
TC.locations[location_id].disarm()
print("Disarmed.")
sys.exit()
|
"""chapter 1 quiz 4 options table
Revision ID: cb9d3f9cc88c
Revises: ecd8e8e98a9b
Create Date: 2022-01-12 10:15:20.368945
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cb9d3f9cc88c'
down_revision = 'ecd8e8e98a9b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('web_dev_chapter_1_quiz_4_options',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('option_1', sa.Boolean(), nullable=True),
sa.Column('option_2', sa.Boolean(), nullable=True),
sa.Column('option_3', sa.Boolean(), nullable=True),
sa.Column('option_4', sa.Boolean(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('student_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['student_id'], ['student.id'], name=op.f('fk_web_dev_chapter_1_quiz_4_options_student_id_student')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_web_dev_chapter_1_quiz_4_options'))
)
with op.batch_alter_table('web_dev_chapter_1_quiz_4_options', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_web_dev_chapter_1_quiz_4_options_timestamp'), ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('web_dev_chapter_1_quiz_4_options', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_web_dev_chapter_1_quiz_4_options_timestamp'))
op.drop_table('web_dev_chapter_1_quiz_4_options')
# ### end Alembic commands ###
|
from Socket import Socket
from MiddlewareModule import MiddlewareModule
import threading
class AsyncServer(Socket):
def __init__(self,Port = 25565):
self.Clients = {}
self.Events = [[],[],[],[],[],[]]
self.Thread_Loop = None
super().port = Port
def Use(self,Module = MiddlewareModule(),OnEvent = 0,Priority=None):
if Priority:
Module.priority = Priority
self.Events[OnEvent].append(Module)
def Start(self,datach = True):
super().Bind();
super().Listen();
if datach:
self.Thread_Loop = threading.Thread(target = AsyncServer.__Accept_Loop__ , args=(self,))
self.Thread_Loop.start()
else:
AsyncServer.__Accept_Loop__(self)
@staticmethod
def __Accept_Loop__(obj):
pass
pass
|
from azureml.core import Workspace
from azureml.core.webservice import Webservice
# Requires the config to be downloaded first to the current working directory
ws = Workspace.from_config()
# Set with the deployment name
name = "bank-marketing-endpoint"
# Load existing web service
service = Webservice(name=name, workspace=ws)
# Enable Application Insights
service.update(enable_app_insights=True)
# Get logs and print
logs = service.get_logs()
for line in logs.split('\n'):
print(line)
|
__all__ = ["const", "eos", "geom", "potentials", "sitemix"]
|
import pytest
import os.path
import yaml
from xxx import Klass
from funcy import silent
from mock_api import mock_function, mock_method, track_function, track_method, same_url, aggregate_same_urls, schema_difference_coefficent, group_equal, parametrize_urls
urlmap = {
'/': 'Root: Str',
'/ciao/': """
Root:
x: Int
y: Str
""",
'/ciao/{}/': """
Root:
r: Int
"""
}
def test_0():
with mock_function('yaml.load', urlmap, arg=0) as m:
res = yaml.load('http://instagram.com/')
print(res)
with pytest.raises(Exception):
res = yaml.load('http://instagram.com/ciao/8/8')
res = yaml.load('http://instagram.com/ciao/8')
assert 'r' in res
assert yaml.load('9') == 9
def test_1():
with mock_function('yaml.load', urlmap, arg=0) as m:
res = yaml.load('http://instagram.com/ciao/')
print(res)
assert 'x' in res
assert 'y' in res
assert yaml.load('9') == 9
def test_2():
m = mock_function('yaml.load', urlmap, arg=0)
m.start()
with pytest.raises(Exception):
res = yaml.load('http://instagram.com/xxx/')
m.stop()
assert yaml.load('9') == 9
def test_3():
path = 'urls_.yml'
silent(os.remove)(path)
with track_function('yaml.load', path, ):
yaml.load('a/5/b')
yaml.load('a/9/b')
yaml.load('a/3/b')
assert os.path.exists(path)
with open(path) as f:
print()
print(f.read())
silent(os.remove)(path)
def test_4():
path = 'urls_.yml'
silent(os.remove)(path)
with track_function('yaml.load', path, ):
yaml.load('xx/5/b')
yaml.load('a/9/b')
yaml.load('a/3/b')
yaml.load('a/3/b')
yaml.load('a/89/8')
assert os.path.exists(path)
with open(path) as f:
print()
print(f.read())
silent(os.remove)(path)
def test_track_class():
path = 'urls_.yml'
silent(os.remove)(path)
with track_method('xxx.Klass', 'ciao', path,):
x = Klass()
x.ciao('a/1/b')
x.ciao('a/2/b')
x.ciao('a/8/b')
assert os.path.exists(path)
with open(path) as f:
print()
print(f.read())
silent(os.remove)(path)
def test_mock_method():
with mock_method('xxx.Klass', 'ciao', urlmap, arg=1):
k = Klass()
res = k.ciao('http://instagram.com/ciao/')
print(res)
with pytest.raises(Exception):
res = k.ciao('http://instagram.com/xxx/')
@pytest.mark.parametrize(
'a, b, expected',
[
('/ciao/x', 'ciao/x', True),
('/ciao/34', 'ciao/12', True),
('/ciao/34/xxx', '/ciao/4/xxx', True),
('http://instagram.com/ciao/34/xxx', 'http://instagram.com/ciao/4/xxx', True),
('a/b/x/s', 'a/b/1/k', False)
]
)
def test_same_url(a, b, expected):
res = same_url(a, b)
print(res)
assert res == expected
def test_aggregate_same_urls():
data = {
'/xxx/1': [0, ],
'/xxx/2': [1, ],
'/xxx/3': [2, ],
}
aggregate_same_urls(data)
def test_schema_difference_coefficent():
a = {
'properties': {
'x': 1,
'y': 1,
'a': 1,
}
}
b = {
'properties': {
'x': 1,
'y': 1,
'a': 9
}
}
y = schema_difference_coefficent(a, b)
print(y)
def test_group_by():
equal = lambda a, b: a+b == 3
groups = group_equal([1, 2, 3, 2, 3, 4, 1, 2 ], equal=equal)
print(groups)
def test_parametrize_urls():
x = parametrize_urls([
'xxx/ciao/8/x',
'xxx/ciao/9/x',
'xxx/ciao/2/x',
])
print(x)
assert x == 'xxx/ciao/{}/x'
|
import hashlib
import hmac
import bcrypt
# pycryptodome
from Crypto import Random
from Crypto.Cipher import AES
# If you want, you can change this to whatever you want - recommend generating one using -> salt = bcrypt.gensalt(30)
# BEWARE: if you change this, you will need your exact version of Inventus because this salt is not stored anywhere but
# this python file. If you lose this file, you will lose the salt, and effectively all your encrypted file
salt = b'$2b$30$bDaVUZfLWpXIyoR70wkI5u' # randomly generated salt
class AESCipher(object):
def __init__(self, password):
# define block size
self.block_size = AES.block_size
# Hash password with 750 rounds of Bcrypt KDF followed by SHA256
self.key = hashlib.sha256(bcrypt.kdf(
password=password.encode(),
salt=salt,
desired_key_bytes=32,
rounds=750) + password.encode() + salt).digest()
def encrypt(self, plain_text):
# Pad the plaintext with PKCS#7
plain_text = self.__pad(plain_text)
# Generate a new random IV for each file
iv = Random.new().read(self.block_size)
# Create and AES object
cipher = AES.new(self.key, AES.MODE_CBC, iv)
# Encrypted text is the IV + ciphertext
encrypted_text = iv + cipher.encrypt(plain_text)
# Calculate a 64 byte HMAC authentication code and append it to the encrypted text
# you can use sha256 instead of sha512, but change 64 to 32 in the decrypt section
hmac_auth = hmac.new(self.key, encrypted_text, hashlib.sha512).digest()
return encrypted_text + hmac_auth
def decrypt(self, encrypted_text):
# last 64 bytes of the encrypted text is the hmac value (if using sha512 for authentication)
hmac_auth = encrypted_text[-64:]
encrypted_text = encrypted_text[:-64]
# HMAC authentication value from bytes
hmac_auth1 = hmac.new(self.key, encrypted_text, hashlib.sha512).digest()
# check if the message hasn't been tampered with, return False for hmac_successful and don't decrypt
if hmac_auth != hmac_auth1:
return None, False
# if hmac was authenticated
# the IV is the size of the block
iv = encrypted_text[:self.block_size]
# create an AES object
cipher = AES.new(self.key, AES.MODE_CBC, iv)
# decrypt the encrypted text without the iv
plain_text = cipher.decrypt(encrypted_text[self.block_size:])
# Un-pad and return plaintext, and True for hmac_successful
return self.__unpad(plain_text), True
def __pad(self, plain_text):
# PKCS#7 padding
bs = self.block_size
pad = lambda s: s + (bs - len(s) % bs) * (bs - len(s) % bs).to_bytes(1, 'big')
return pad(plain_text)
@staticmethod
def __unpad(plain_text):
# Incorrect padding check
try:
# Un-pad PKCS#7
un_pad = lambda s: s[:-int.from_bytes(s[-1:], 'big')]
return un_pad(plain_text)
except:
raise Exception("Un-padding failed")
|
from pextant.mesh.triangularmesh import TriPolyMesh
from pextant.solvers.astarMesh import MeshSearchElement
ames_em = TriPolyMesh('../../data/maps/Ames/Ames.tif').loadSubSection()
start = MeshSearchElement(ames_em._getMeshElement(10))
#end = MeshSearchElement(ames_em._getMeshElement(10))
|
"""!/usr/bin/env python3"""
import sys
from PyQt5 import QtWidgets
from source.mainWindow import MazeGenApp
def main():
app = QtWidgets.QApplication(sys.argv)
app.setStyle("fusion")
window = MazeGenApp()
window.show()
app.exec_()
if __name__ == "__main__":
main()
|
import torch.nn as nn
import torch
import torch.nn.functional as F
import torchvision.models
import os
import utils.network_utils
from utils.pointnet2_utils import PointNetSetAbstraction,PointNetFeaturePropagation
from models.graphx import PointCloudGraphXDecoder
from losses.earth_mover_distance import EMD
# Set the path for pretrain weight
os.environ['TORCH_HOME'] = '/media/caig/FECA2C89CA2C406F/sketch3D/pretrain_models'
Conv = nn.Conv2d
def wrapper(func, *args, **kwargs):
class Wrapper(nn.Module):
def __init__(self):
super().__init__()
self.func = func
def forward(self, input):
return self.func(input, *args, **kwargs)
return Wrapper()
class CNN18Encoder(nn.Module):
"""
Image multi-scale encoder
Input:
input: input images
Output:
feats: Multi-scale image features
"""
def __init__(self, in_channels, activation=nn.ReLU()):
super().__init__()
self.block1 = nn.Sequential()
self.block1.conv1 = Conv(in_channels, 16, 3, padding=1)
self.block1.relu1 = activation
self.block1.conv2 = Conv(16, 16, 3, padding=1)
self.block1.relu2 = activation
self.block1.conv3 = Conv(16, 32, 3, stride=2, padding=1)
self.block1.relu3 = activation
self.block1.conv4 = Conv(32, 32, 3, padding=1)
self.block1.relu4 = activation
self.block1.conv5 = Conv(32, 32, 3, padding=1)
self.block1.relu5 = activation
self.block1.conv6 = Conv(32, 64, 3, stride=2, padding=1)
self.block1.relu6 = activation
self.block1.conv7 = Conv(64, 64, 3, padding=1)
self.block1.relu7 = activation
self.block1.conv8 = Conv(64, 64, 3, padding=1)
self.block1.relu8 = activation
self.block3 = nn.Sequential()
self.block3.conv1 = Conv(64, 128, 3, stride=2, padding=1)
self.block3.relu1 = activation
self.block3.conv2 = Conv(128, 128, 3, padding=1)
self.block3.relu2 = activation
self.block3.conv3 = Conv(128, 128, 3, padding=1)
self.block3.relu3 = activation
self.block4 = nn.Sequential()
self.block4.conv1 = Conv(128, 256, 5, stride=2, padding=2)
self.block4.relu1 = activation
self.block4.conv2 = Conv(256, 256, 3, padding=1)
self.block4.relu2 = activation
self.block4.conv3 = Conv(256, 256, 3, padding=1)
self.block4.relu3 = activation
self.block5 = nn.Sequential()
self.block5.conv1 = Conv(256, 512, 5, stride=2, padding=2)
self.block5.relu1 = activation
self.block5.conv2 = Conv(512, 512, 3, padding=1)
self.block5.relu2 = activation
self.block5.conv3 = Conv(512, 512, 3, padding=1)
self.block5.relu3 = activation
self.block5.conv4 = Conv(512, 512, 3, padding=1)
self.block5.relu4 = activation
def forward(self, input):
feats = []
output = input
for block in self.children():
output = block(output)
feats.append(output)
return feats
class TransformPC(nn.Module):
"""
Transform point cloud to camera coordinate
Input:
xyz: float tensor, (BS,N_PTS,3); input point cloud
values assumed to be in (-1,1)
az: float tensor, (BS); azimuthal angle of camera in radians
el: float tensor, (BS); elevation of camera in radians
Output:
xyz_out: float tensor, (BS,N_PTS,3); output point cloud in camera
co-ordinates
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.n_pts = cfg.CONST.NUM_POINTS
def forward(self, xyz, az, el):
batch_size = xyz.size(0)
cam_xyz = self.world2cam(xyz, az, el, batch_size, N_PTS=self.n_pts)
return cam_xyz
def world2cam(self, xyz, az, el, batch_size, N_PTS=1024):
# y ---> x
rotmat_az=[
[torch.cos(az),torch.sin(az),torch.zeros_like(az)],
[-torch.sin(az),torch.cos(az),torch.zeros_like(az)],
[torch.zeros_like(az),torch.zeros_like(az), torch.ones_like(az)]
]
rotmat_az = [ torch.stack(x) for x in rotmat_az ]
# z ---> x, in dataloader, az = original az - 90 degree, which means here is actually x ----> -z
rotmat_el=[
[torch.cos(el),torch.zeros_like(az), torch.sin(el)],
[torch.zeros_like(az),torch.ones_like(az),torch.zeros_like(az)],
[-torch.sin(el),torch.zeros_like(az), torch.cos(el)]
]
rotmat_el = [ torch.stack(x) for x in rotmat_el ]
rotmat_az = torch.stack(rotmat_az, 0) # [3,3,B]
rotmat_el = torch.stack(rotmat_el, 0) # [3,3,B]
rotmat_az = rotmat_az.permute(2, 0, 1) # [B,3,3]
rotmat_el = rotmat_el.permute(2, 0, 1) # [B,3,3]
rotmat = torch.matmul(rotmat_el, rotmat_az)
# Transformation(t)
# Distance of object from camera - fixed to 2
d = 2.
# Calculate translation params
tx, ty, tz = [0, 0, d]
tr_mat = torch.unsqueeze(torch.tensor([tx, ty, tz]), 0).repeat(batch_size, 1) # [B,3]
tr_mat = torch.unsqueeze(tr_mat,2) # [B,3,1]
tr_mat = tr_mat.permute(0, 2, 1) # [B,1,3]
tr_mat = tr_mat.repeat(1, N_PTS, 1) # [B,N_PTS,3]
tr_mat = utils.network_utils.var_or_cuda(tr_mat) # [B,N_PTS,3]
xyz_out = torch.matmul(rotmat, xyz.permute(0, 2, 1)) - tr_mat.permute(0, 2, 1)
return xyz_out.permute(0, 2, 1)
class FeatureProjection(nn.Module):
"""
Project the pointcloud to 2d image and get the corresponding image features at
the project location
Input:
img_feats: multi-scale image features
pc: input point clouds (in camera coordinate) [B, N, 3]
Output:
pc_feats_trans: pointcloud xyz + multi-view image features (by feature ptojection)
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.concat = wrapper(torch.cat, dim=-1)
def forward(self, img_feats, pc):
pc_feats = []
pc_feats += [self.get_projection(img_feat, pc) for img_feat in img_feats]
pc_feats_trans = self.concat(pc_feats)
return pc_feats_trans
def _project(self, img_feats, xs, ys):
x, y = xs.flatten(), ys.flatten()
idb = torch.arange(img_feats.shape[0], device=img_feats.device)
idb = idb[None].repeat(xs.shape[1], 1).t().flatten().long()
x1, y1 = torch.floor(x), torch.floor(y)
x2, y2 = torch.ceil(x), torch.ceil(y)
q11 = img_feats[idb, :, x1.long(), y1.long()].to(img_feats.device)
q12 = img_feats[idb, :, x1.long(), y2.long()].to(img_feats.device)
q21 = img_feats[idb, :, x2.long(), y1.long()].to(img_feats.device)
q22 = img_feats[idb, :, x2.long(), y2.long()].to(img_feats.device)
weights = ((x2 - x) * (y2 - y)).unsqueeze(1)
q11 *= weights
weights = ((x - x1) * (y2 - y)).unsqueeze(1)
q21 *= weights
weights = ((x2 - x) * (y - y1)).unsqueeze(1)
q12 *= weights
weights = ((x - x1) * (y - y1)).unsqueeze(1)
q22 *= weights
out = q11 + q12 + q21 + q22
return out.view(img_feats.shape[0], -1, img_feats.shape[1])
def get_projection(self, img_feat, pc):
_, _, h_, w_ = tuple(img_feat.shape)
X, Y, Z = pc[..., 0], pc[..., 1], pc[..., 2]
w = (420.*X/abs(Z) + (111.5))
h = (420.*Y/abs(Z) + (111.5))
w = torch.clamp(w, 0., 223.)
h = torch.clamp(h, 0., 223.)
x = w / (223. / (w_ - 1.))
y = h / (223. / (h_ - 1.))
feats = self._project(img_feat, x, y)
return feats
class PointNet2(nn.Module):
"""
Point cloud segmentation (set abstraction + feature propagation) in pointnet++
Input:
xyz: input points position [B, N, 3]
output:
point_feature: per-point features encode by pointnet [B, 128, N]
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.sa1 = PointNetSetAbstraction(npoint=1024, radius=0.1, nsample=64, in_channel=3, mlp=[64, 64, 128], group_all=False)
self.sa2 = PointNetSetAbstraction(npoint=384, radius=0.2, nsample=64, in_channel=128 + 3, mlp=[128, 128, 256], group_all=False)
self.sa3 = PointNetSetAbstraction(npoint=128, radius=0.4, nsample=64, in_channel=256 + 3, mlp=[256, 256, 512], group_all=False)
self.sa4 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[512, 512, 1024], group_all=True)
self.fp4 = PointNetFeaturePropagation(in_channel=512 + 1024, mlp=[512, 512])
self.fp3 = PointNetFeaturePropagation(in_channel=256 + 512 , mlp=[512, 256])
self.fp2 = PointNetFeaturePropagation(in_channel=128 + 256 , mlp=[256, 128])
self.fp1 = PointNetFeaturePropagation(in_channel=0 + 128 , mlp=[128, 128, 128])
def forward(self, xyz):
xyz = xyz.transpose(2, 1) # [B, C, N]
l0_xyz = xyz
l0_points = None
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, l0_points, l1_points)
return l0_points
class EdgeRes(nn.Module):
"""
input:
- inp: b x num_dims x num_points
outputs:
- out: b x num_dims x num_points
"""
def __init__(self, use_SElayer: bool = False):
super(EdgeRes, self).__init__()
self.k = 8
self.conv1 = torch.nn.Conv2d(6, 64, kernel_size=1, bias=False)
self.conv2 = torch.nn.Conv2d(128, 128, kernel_size=1, bias=False)
self.conv3 = torch.nn.Conv2d(256, 1024, kernel_size=1, bias=False)
self.conv4 = torch.nn.Conv2d(2176, 512, kernel_size=1, bias=False)
self.conv5 = torch.nn.Conv2d(1024, 256, kernel_size=1, bias=False)
self.conv6 = torch.nn.Conv2d(512, 128, kernel_size=1, bias=False)
self.use_SElayer = use_SElayer
if use_SElayer:
self.se1 = SELayer(channel=64)
self.se2 = SELayer(channel=128)
self.se4 = SELayer(channel=512)
self.se5 = SELayer(channel=256)
self.se6 = SELayer(channel=128)
self.bn1 = torch.nn.BatchNorm2d(64)
self.bn2 = torch.nn.BatchNorm2d(128)
self.bn3 = torch.nn.BatchNorm2d(1024)
self.bn4 = torch.nn.BatchNorm2d(512)
self.bn5 = torch.nn.BatchNorm2d(256)
self.bn6 = torch.nn.BatchNorm2d(128)
self.th = nn.Tanh()
def forward(self, x):
npoints = x.size()[2]
# x: [batch_size, 4, num_points]
if self.use_SElayer:
x = get_graph_feature(x, k=self.k) # [bs, 8, num_points, k]
x = F.relu(self.se1(self.bn1(self.conv1(x)))) # [bs, 64, num_points, k]
x = x.max(dim=-1, keepdim=False)[0] # [bs, 64, num_points]
pointfeat = x # [batch_size, 64, num_points]
x = get_graph_feature(x, k=self.k) # [bs, 128, num_points, k]
x = F.relu(self.se2(self.bn2(self.conv2(x))))
x = x.max(dim=-1, keepdim=False)[0] # [bs, 128, num_points]
else:
x = get_graph_feature(x, k=self.k) # [bs, 8, num_points, k]
x = F.relu(self.bn1(self.conv1(x))) # [bs, 64, num_points, k]
x = x.max(dim=-1, keepdim=False)[0] # [bs, 64, num_points]
pointfeat = x # [batch_size, 64, num_points]
x = get_graph_feature(x, k=self.k) # [bs, 128, num_points, k]
x = F.relu(self.bn2(self.conv2(x)))
x = x.max(dim=-1, keepdim=False)[0] # [bs, 128, num_points]
x = get_graph_feature(x, k=self.k) # [bs, 256, num_points, k]
x = self.bn3(self.conv3(x)) # [batch_size, 1024, num_points, k]
x = x.max(dim=-1, keepdim=False)[0] # [bs, 1024, num_points]
x, _ = torch.max(x, 2) # [batch_size, 1024]
x = x.view(-1, 1024) # [batch_size, 1024]
x = x.view(-1, 1024, 1).repeat(1, 1, npoints) # [batch_size, 1024, num_points]
x = torch.cat([x, pointfeat], 1) # [batch_size, 1088, num_points]
if self.use_SElayer:
x = get_graph_feature(x, k=self.k) # [bs, 2176, num_points, k]
x = F.relu(self.se4(self.bn4(self.conv4(x))))
x = x.max(dim=-1, keepdim=False)[0] # [bs, 512, num_points]
x = get_graph_feature(x, k=self.k) # [bs, 1024, num_points, k]
x = F.relu(self.se5(self.bn5(self.conv5(x))))
x = x.max(dim=-1, keepdim=False)[0] # [bs, 256, num_points]
x = get_graph_feature(x, k=self.k) # [bs, 512, num_points, k]
x = F.relu(self.se6(self.bn6(self.conv6(x))))
x = x.max(dim=-1, keepdim=False)[0] # [bs, 128, num_points]
else:
x = get_graph_feature(x, k=self.k) # [bs, 2176, num_points, k]
x = F.relu(self.bn4(self.conv4(x)))
x = x.max(dim=-1, keepdim=False)[0] # [bs, 512, num_points]
x = get_graph_feature(x, k=self.k) # [bs, 1024, num_points, k]
x = F.relu(self.bn5(self.conv5(x)))
x = x.max(dim=-1, keepdim=False)[0] # [bs, 256, num_points]
x = get_graph_feature(x, k=self.k) # [bs, 512, num_points, k]
x = F.relu(self.bn6(self.conv6(x)))
x = x.max(dim=-1, keepdim=False)[0] # [bs, 128, num_points]
return x
class SELayer(nn.Module):
"""
input:
x:(b, c, m, n)
output:
out:(b, c, m', n')
"""
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid(),
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
def knn(x, k: int):
"""
inputs:
- x: b x npoints1 x num_dims (partical_cloud)
- k: int (the number of neighbor)
outputs:
- idx: int (neighbor_idx)
"""
# x : (batch_size, feature_dim, num_points)
# Retrieve nearest neighbor indices
if torch.cuda.is_available():
from knn_cuda import KNN
ref = x.transpose(2, 1).contiguous() # (batch_size, num_points, feature_dim)
query = ref
_, idx = KNN(k=k, transpose_mode=True)(ref, query)
else:
inner = -2 * torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x ** 2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
def get_graph_feature(x, k: int = 20, idx=None):
"""
inputs:
- x: b x npoints1 x num_dims (partical_cloud)
- k: int (the number of neighbor)
- idx: neighbor_idx
outputs:
- feature: b x npoints1 x (num_dims*2)
"""
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
if idx is None:
idx = knn(x, k=k) # (batch_size, num_points, k)
device = idx.device
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous()
feature = x.view(batch_size * num_points, -1)[idx, :]
feature = feature.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature - x, x), dim=3).permute(0, 3, 1, 2).contiguous() # edge (neighbor - point)
return feature
class LinearDisplacementNet(nn.Module):
"""
Predict the displacement from pointcloud features and image features
Input:
pc_features: poincloud features from pointnet2 [B, D, N]
proj_features: image features from feature projection [B, N, D']
noises: noises vector [B, N, n_length]
Output:
displacement: perpoint displacement [B, C, N]
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.conv1 = nn.Conv1d(1120, 960, 1)
self.bn1 = nn.BatchNorm1d(960)
self.conv2 = nn.Conv1d(960, 512, 1)
self.bn2 = nn.BatchNorm1d(512)
self.conv3 = nn.Conv1d(512, 256, 1)
self.bn3 = nn.BatchNorm1d(256)
self.conv4 = nn.Conv1d(256, 128, 1)
self.bn4 = nn.BatchNorm1d(128)
self.conv5 = nn.Conv1d(128, 64, 1)
self.bn5 = nn.BatchNorm1d(64)
self.conv6 = nn.Conv1d(64, 3, 1)
def forward(self, transform_xyz, proj_features, pc_features, noises):
noises = noises.transpose(2, 1) # [B, n_length, N]
noises = utils.network_utils.var_or_cuda(noises)
proj_features = proj_features.transpose(2, 1) # [B, D', N]
proj_features = utils.network_utils.var_or_cuda(proj_features)
# concat the img features after each point features
refine_features = torch.cat((pc_features, proj_features, noises), 1) # [B, D+D'+n_length, N]
refine_features = F.relu(self.bn1(self.conv1(refine_features)))
refine_features = F.relu(self.bn2(self.conv2(refine_features)))
refine_features = F.relu(self.bn3(self.conv3(refine_features)))
refine_features = F.relu(self.bn4(self.conv4(refine_features)))
refine_features = F.relu(self.bn5(self.conv5(refine_features)))
displacements = self.conv6(refine_features)
displacements = F.sigmoid(displacements) * self.cfg.UPDATER.RANGE_MAX * 2 - self.cfg.UPDATER.RANGE_MAX
return displacements
class GraphxDisplacementNet(nn.Module):
"""
Predict the displacement from pointcloud features and image features
Input:
transform_xyz: pointcloud xyz [B, N, 3]
pc_features: poincloud features from pointnet2 [B, D, N]
proj_features: image features from feature projection [B, N, D']
noises: noises vector [B, N, n_length]
Output:
displacement: perpoint displacement [B, C, N]
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
deform_net = PointCloudGraphXDecoder
self.graphx = deform_net(in_features=1123, in_instances=cfg.GRAPHX.NUM_INIT_POINTS, activation=nn.ReLU())
def forward(self, transform_xyz, proj_features, pc_features, noises):
noises = utils.network_utils.var_or_cuda(noises)
proj_features = utils.network_utils.var_or_cuda(proj_features)
pc_features = pc_features.transpose(2, 1) # [B, N, D]
refine_features = torch.cat((transform_xyz, pc_features, proj_features, noises), 2) # [B, N, 3+D+D'+n_length]
displacements = self.graphx(refine_features)
displacements = displacements.transpose(2, 1)
displacements = F.sigmoid(displacements) * self.cfg.UPDATER.RANGE_MAX * 2 - self.cfg.UPDATER.RANGE_MAX
return displacements
class Updater(nn.Module):
"""
Refine the point cloud based on the input image
Input:
xyz: point cloud from reconstruction model
Ouput:
update_pc: updated point cloud
"""
def __init__(self, cfg, in_channels, activation=nn.ReLU(), optimizer=None):
super().__init__()
self.cfg = cfg
self.img_enc = CNN18Encoder(in_channels, activation)
self.transform_pc = TransformPC(cfg)
self.feature_projection = FeatureProjection(cfg)
if cfg.UPDATER.PC_ENCODE_MODULE == 'Pointnet++':
self.pc_encode = PointNet2(cfg)
elif cfg.UPDATER.PC_ENCODE_MODULE == 'EdgeRes':
self.pc_encode = EdgeRes(use_SElayer=True)
if cfg.UPDATER.PC_DECODE_MODULE == 'Linear':
self.displacement_net = LinearDisplacementNet(cfg)
elif cfg.UPDATER.PC_DECODE_MODULE == 'Graphx':
self.displacement_net = GraphxDisplacementNet(cfg)
self.optimizer = None if optimizer is None else optimizer(self.parameters())
# emd loss
self.emd = EMD()
if torch.cuda.is_available():
self.img_enc = torch.nn.DataParallel(self.img_enc, device_ids=cfg.CONST.DEVICE).cuda()
self.transform_pc = torch.nn.DataParallel(self.transform_pc, device_ids=cfg.CONST.DEVICE).cuda()
self.feature_projection = torch.nn.DataParallel(self.feature_projection, device_ids=cfg.CONST.DEVICE).cuda()
self.pc_encode = torch.nn.DataParallel(self.pc_encode, device_ids=cfg.CONST.DEVICE).cuda()
self.displacement_net = torch.nn.DataParallel(self.displacement_net, device_ids=cfg.CONST.DEVICE).cuda()
self.emd = torch.nn.DataParallel(self.emd, device_ids=cfg.CONST.DEVICE).cuda()
self.cuda()
def forward(self, img, xyz, view_az, view_el):
img_features = self.img_enc(img)
transform_xyz = self.transform_pc(xyz, view_az, view_el)
proj_features = self.feature_projection(img_features, transform_xyz)
pc_features = self.pc_encode(transform_xyz)
noises = torch.normal(mean=0.0, std=1, size=(self.cfg.CONST.BATCH_SIZE, self.cfg.CONST.NUM_POINTS, self.cfg.UPDATER.NOISE_LENGTH))
displacements = self.displacement_net(transform_xyz, proj_features, pc_features, noises)
displacements = displacements.transpose(2, 1)
refine_pc = xyz + displacements
return refine_pc
def loss(self, img, xyz, gt_pc, view_az, view_el):
refine_pc = self(img, xyz, view_az, view_el)
# EMD
loss = torch.mean(self.emd(refine_pc, gt_pc))
return loss, refine_pc
def learn(self, img, xyz, gt_pc, view_az, view_el):
self.train(True)
self.optimizer.zero_grad()
loss, _ = self.loss(img, xyz, gt_pc, view_az, view_el)
loss.backward()
self.optimizer.step()
loss_np = loss.detach().item()
del loss
return loss_np
|
import argparse
import datetime
import hashlib
import hmac
from datetime import datetime
from urllib.parse import quote_plus
import boto3
import urllib3
from botocore.exceptions import ClientError
def copy_logs_from_rds_to_s3(rds_instance_name, s3_bucket_name, region, log_prefix="", min_size=0):
"""
Download log files from an RDS instance, and upload them to an S3 bucket. Adopted from AWS's RDS support tool
'move-rds-logs-to-s3'.
See: https://github.com/awslabs/rds-support-tools/tree/master/database-logs/move-rds-logs-to-s3
:param rds_instance_name: The RDS instance name to download log files from
:param s3_bucket_name: The S3 bucket to upload log files to
:param region: The region where the S3 bucket and RDS instance are located
:param log_prefix: Filter log files with this prefix
:param min_size: The minimum size of log files to download, in bytes
"""
config_file_name = f"{rds_instance_name}/backup_config"
# Initialize
rds_client = boto3.client('rds', region_name=region)
s3_client = boto3.client('s3', region_name=region)
http = urllib3.PoolManager()
last_written_this_run = 0
last_written_time = 0
backup_start_time = datetime.now()
# Check if the S3 bucket exists and is accessible
try:
s3_client.head_bucket(Bucket=s3_bucket_name)
except ClientError as e:
error_code = int(e.response['ResponseMetadata']['HTTPStatusCode'])
if error_code == 404:
raise RuntimeError(f"Error: Bucket name {s3_bucket_name} not found")
raise RuntimeError(f"Error: Unable to access bucket name, error: {e.response['Error']['Message']}")
# Get the config file, if the config isn't present this is the first run
try:
s3_response = s3_client.get_object(Bucket=s3_bucket_name, Key=config_file_name)
last_written_time = int(s3_response['Body'].read(s3_response['ContentLength']))
print(f"Retrieving files with last written time after {str(last_written_time)} and min size {str(min_size)} B")
except ClientError as e:
error_code = int(e.response['ResponseMetadata']['HTTPStatusCode'])
if error_code == 404:
print("It appears this is the first log import, all files will be retrieved from RDS")
min_size = 0 # We don't want to filter by file size on the first run
else:
raise RuntimeError(f"Error: Unable to access config file, error: {e.response['Error']['Message']}")
# Copy the logs in batches to s3
copied_file_count = 0
log_marker = ""
more_logs_remaining = True
while more_logs_remaining:
db_logs = rds_client.describe_db_log_files(
DBInstanceIdentifier=rds_instance_name,
FilenameContains=log_prefix,
FileLastWritten=last_written_time,
Marker=log_marker,
FileSize=min_size
)
if 'Marker' in db_logs and db_logs['Marker'] != "":
log_marker = db_logs['Marker']
else:
more_logs_remaining = False
# Copy the logs in this batch
for db_log in db_logs['DescribeDBLogFiles']:
print(f"FileNumber: {copied_file_count + 1}")
filename = db_log['LogFileName']
size = int(db_log['Size'])
log_last_written = int(db_log['LastWritten'])
print(f"Downloading file: {filename} found w/ LastWritten value of: {log_last_written} ({size} bytes)")
# Download the log file
try:
log_file_data = get_log_file_via_rest(http, filename, rds_instance_name, region)
except Exception as e:
raise RuntimeError(f"File '{filename}' download failed: {e}")
if log_last_written > last_written_this_run:
last_written_this_run = log_last_written + 1
compressed_size = len(log_file_data)
pct_difference = 100 * (compressed_size - size) // size
print(f"Compressed log file size: {compressed_size} bytes ({pct_difference}% difference)")
# Upload the log file to S3
object_name = f"{rds_instance_name}/log_{backup_start_time.isoformat()}/{filename}.gz"
try:
s3_client.put_object(Bucket=s3_bucket_name, Key=object_name, Body=log_file_data)
copied_file_count += 1
except ClientError as e:
err_msg = f"Error writing object to S3 bucket, S3 ClientError: {e.response['Error']['Message']}"
raise RuntimeError(err_msg)
print(f"Uploaded log file {object_name} to S3 bucket {s3_bucket_name}")
print(f"Copied {copied_file_count} file(s) to S3")
# Update the last written time in the config
if last_written_this_run > 0:
try:
s3_client.put_object(
Bucket=s3_bucket_name,
Key=config_file_name,
Body=str.encode(str(last_written_this_run))
)
except ClientError as e:
err_msg = f"Error writing the config to S3 bucket, S3 ClientError: {e.response['Error']['Message']}"
raise RuntimeError(err_msg)
print(f"Wrote new config to {config_file_name} in S3 bucket {s3_bucket_name} with timestamp {last_written_this_run}")
print("Log file export complete")
def get_log_file_via_rest(http, filename, db_instance_identifier, region):
"""
AWS's web API is a bit esoteric and requires an arduous signing process. In general, the process can
be broken down into the following four steps:
1. Create a canonical request
2. Use the canonical request and additional metadata to create a string for signing.
3. Derive a signing key from your AWS secret access key. Then use the signing key, and the string
from the previous step, to create a signature.
4. Add the resulting signature to the HTTP request in a header or as a query string parameter.
Ultimately, this entire process is is necessary because the RDS SDK is broken when it comes to
downloading log file portions from RDS (ugh).
See:
https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
https://github.com/aws/aws-cli/issues/2268
https://github.com/aws/aws-cli/issues/3079
https://github.com/aws/aws-sdk-net/issues/921
:param http: A urllib3 http client
:param filename: The filename of the log file to download
:param db_instance_identifier: The DB instance to download log files from
:param region: The AWS region where the DB instance is located
:return: The log file data, gzip encoded
"""
method = 'GET'
service = 'rds'
host = f"rds.{region}.amazonaws.com"
endpoint = f"https://{host}"
# Credentials are intended to be implicitly provided and likely come from env vars or IAM roles
credentials = boto3.Session().get_credentials()
access_key = credentials.access_key
secret_key = credentials.secret_key
session_token = credentials.token
if access_key is None or secret_key is None:
raise RuntimeError('No access key is available.')
# Create a date for headers and the credential string
t = datetime.utcnow()
amz_date = t.strftime('%Y%m%dT%H%M%SZ') # Format date as YYYYMMDD'T'HHMMSS'Z'
datestamp = t.strftime('%Y%m%d') # Date w/o time, used in credential scope
canonical_uri = f"/v13/downloadCompleteLogFile/{db_instance_identifier}/{filename}"
# Create the canonical headers and signed headers. Header names and value must be trimmed
# and lowercase, and sorted in ASCII order. Note trailing \n in canonical_headers. The
# 'signed_headers' variable is the list of headers that are being included as part of the
# signing process. For requests that use query strings, only 'host' is included in the
# signed headers.
canonical_headers = f"host:{host}\n"
signed_headers = 'host'
# Algorithm must match the hashing algorithm used, in this case SHA-256 (recommended)
algorithm = 'AWS4-HMAC-SHA256'
credential_scope = f"{datestamp}/{region}/{service}/aws4_request"
# Build the canonical query string with the elements gathered above
canonical_querystring = build_canonical_query_string(
access_key,
credential_scope,
amz_date,
signed_headers,
session_token
)
# Create payload hash. For GET requests, the payload is an empty string ("").
payload_hash = hashlib.sha256(''.encode("utf-8")).hexdigest()
# Combine elements to create create the canonical API request
canonical_request = \
f"{method}\n{canonical_uri}\n{canonical_querystring}\n{canonical_headers}\n{signed_headers}\n{payload_hash}"
# Hash the request so it can be signed
hashed_request = hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()
string_to_sign = f"{algorithm}\n{amz_date}\n{credential_scope}\n{hashed_request}"
# Create the signing key
signing_key = get_signature_key(secret_key, datestamp, region, service)
# Sign the hashed request (string_to_sign) using the signing key
signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
# Add signing information to the request. The auth information can be either in the query
# string value or in a header named Authorization. Here we put everything into the query
# string.
signed_querystring = f"{canonical_querystring}&X-Amz-Signature={signature}"
# Send the API request. The 'host' header must exist as a header in the request. In this case,
# it's is added automatically by the Python urllib3 lib.
request_url = f"{endpoint}{canonical_uri}?{signed_querystring}"
print(f"Request URL: {request_url}")
# Setting the encoding to gzip has potential to save ~90% on file size
response = http.request(method, request_url, decode_content=False, headers={"Accept-Encoding": "gzip"})
print(f"Response code: {response.status}")
if response.status > 200:
raise RuntimeError(f"Could not download log file due to HTTP error status {response.status}")
return response.data
def get_signature_key(key, date, region_name, service_name):
"""
AWS key derivation functions.
See: http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
:param key: The signing key
:param date: The current date w/o time, YYYYMMDD
:param region_name: The AWS region
:param service_name: The AWS service name, e.g. RDS, S3, etc.
:return: The signing key
"""
def sign(k, msg): return hmac.new(k, msg.encode('utf-8'), hashlib.sha256).digest()
key_date = sign(('AWS4' + key).encode('utf-8'), date)
key_region = sign(key_date, region_name)
key_service = sign(key_region, service_name)
key_signing = sign(key_service, 'aws4_request')
return key_signing
def build_canonical_query_string(access_key, credential_scope, amz_date, signed_headers, session_token=None):
"""
Create the canonical query string. In this example, request parameters are in the query string. Query string values
must be URL-encoded (space=%20). The parameters must be sorted by name.
See: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
:param access_key: The AWS access key
:param credential_scope: The AWS credential scope
:param amz_date: The current date, in AWS's specific date format YYYYMMDD'T'HHMMSS'Z'
:param signed_headers: The headers top be signed in the request
:param session_token: The AWS session token, if it exists (default: None)
:return: The canonical query string, as defined in the AWS documentation
"""
credentials = quote_plus(f"{access_key}/{credential_scope}")
canonical_querystring = ''
canonical_querystring += 'X-Amz-Algorithm=AWS4-HMAC-SHA256'
canonical_querystring += '&X-Amz-Credential=' + credentials
canonical_querystring += '&X-Amz-Date=' + amz_date
canonical_querystring += '&X-Amz-Expires=30'
if session_token is not None:
canonical_querystring += '&X-Amz-Security-Token=' + quote_plus(session_token)
canonical_querystring += '&X-Amz-SignedHeaders=' + signed_headers
return canonical_querystring
def parse_args():
parser = argparse.ArgumentParser(description='Move logs from RDS to S3.')
parser.add_argument('--rds-instance-name', action='store', required=True, help='The RDS instance name')
parser.add_argument('--s3-bucket-name', action='store', required=True, help='The S3 bucket name')
parser.add_argument('--aws-region', action='store', required=True, help='The AWS region')
parser.add_argument('--log-prefix', action='store', required=False,
help='Filter logs with this prefix (default: empty string)', default="")
parser.add_argument('--min-size', action='store', required=False, type=int,
help='Filters logs less than the specified size in bytes (default: 0)', default=0)
return parser.parse_args()
def lambda_handler(event, context):
"""
Invoked by AWS Lambda. Args are expected to be passed as in the trigger event.
See: https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html
:param event: The Lambda event data. Assumed to be a 'dict' with the following keys:
* rds_instance_name: The RDS instance name to download log files from
* s3_bucket_name: The S3 bucket to upload log files to
* region: The region where the S3 bucket and RDS instance are located
* log_prefix: Filter log files with this prefix
* min_size: The minimum size of log files to download, in bytes
:param context: The context of the Lambda invocation. See:
https://docs.aws.amazon.com/lambda/latest/dg/python-context.html
"""
print("Invoked by Lambda event:", event)
print("Request ID:", context.aws_request_id)
print("Log stream name:", context.log_stream_name)
print("Log group name:", context.log_group_name)
print("Memory limit (MB):", context.memory_limit_in_mb)
copy_logs_from_rds_to_s3(
event['rds_instance_name'],
event['s3_bucket_name'],
event['aws_region'],
event['log_prefix'],
event['min_size']
)
# Run from the command line
if __name__ == '__main__':
args = parse_args()
copy_logs_from_rds_to_s3(
args.rds_instance_name,
args.s3_bucket_name,
args.aws_region,
args.log_prefix,
args.min_size
)
|
import sys
sys.dont_write_bytecode = True # No '*.pyc' precompiled files
from rich import print
# print('=', )
from lib_local_various import showvar
from lib_main import combine_dicts
from rich.console import Console
from pyfiglet import figlet_format
from lib_main import negative_console
from lib_main import positive_console
from scapy.all import get_windows_if_list
import netifaces
from netifaces import ifaddresses
from netaddr import IPAddress, IPNetwork
from ipaddress import ip_network
from lib_parameters import EMPTIES
class IpSettings:
'''
Works for MS Windows 10 OS.
Check if there is an active LAN interface providing a valid connection,
to outside (that is: to the internet in most cases).
If the PC has multiple network interfaces, only the right one is selected.
All virtual, vpn, wifi, loopback etc. interfaces will be skipped.
Uses the "scappy" and "netifaces" libraries.
Then it extracts the complete network settings from the operating system, like:
names & description of interfaces, their IP addresses, netmasks etc.
These cannot be obtained with one library only.
'''
# def __init(self, primary_key='guid'):
# self.primary_key = primary_key
# ID of a network interface used by MS Windows
primary_key = 'guid'
def scapy_interfaces(self):
''' Uses the "scapy" library.
Extracts partial information about active interfaces.
The part relates to most but not all settings.
RETURNS:
ifaces (list of dicts):
List of active interfaces with most of their settings.
Each element contains one interface's incomplete settings.
'''
ifaces = []
items = ('name', 'description', 'guid', 'ipv4_metric', 'netmask', 'gw_ip')
for iface_from_win in get_windows_if_list():
dict_to_append = {}
dict_to_append['ip'] = ''
for item in items:
dict_to_append[item] = '' # Initialize
if item in iface_from_win:
if iface_from_win[item] not in EMPTIES:
dict_to_append[item] = iface_from_win[item]
if 'ips' in iface_from_win:
if (
iface_from_win['ips'] not in EMPTIES
and
len(iface_from_win['ips']) > 0
):
dict_to_append['ip'] = iface_from_win['ips'][-1]
ifaces.append(dict_to_append)
return ifaces
def netifaces_interfaces(self):
'''
Uses the "netifaces" library.
Extracts the remaining part of information about active interfaces.
RETURNS:
ifaces (list of dicts):
List of active interfaces and their netmasks.
Each element contains one interface's incomplete settings.
'''
ifaces = []
for iface in netifaces.interfaces():
if netifaces.AF_INET in ifaddresses(iface):
iface_data = ifaddresses(iface)[netifaces.AF_INET]
dict_to_append = {}
dict_to_append['netmask'] = ''
dict_to_append['guid'] = iface
if 'netmask' in iface_data[0]:
dict_to_append['netmask'] = iface_data[0]['netmask']
ifaces.append(dict_to_append)
return ifaces
def netifaces_gateways(self):
'''
Uses "netifaces" library .
Extracts the gateway related information about the interfaces.
RETURNS:
gateways (list of dicts):
Each element contains the gateway setting for one
active interface.
'''
gateways = []
netifs_gws = netifaces.gateways()
def_gw_ip = None # IP address of the default system gw
def_gw_guid = None # GUID of the i-face having the def. sys. gw.
if netifaces.AF_INET in netifs_gws:
if 'default' in netifs_gws:
if netifaces.AF_INET in netifs_gws['default']:
def_gw_info = netifs_gws['default'][netifaces.AF_INET]
def_gw_ip = def_gw_info[0]
def_gw_guid = def_gw_info[1]
intf_gw_infos = netifs_gws[netifaces.AF_INET]
for intf_gw_info in intf_gw_infos:
dict_to_append = {'gw_ip': ''}
dict_to_append['gw_ip'] = intf_gw_info[0]
dict_to_append['guid'] = intf_gw_info[1]
if intf_gw_info[1] == def_gw_guid and intf_gw_info[0] == def_gw_ip:
dict_to_append['gw_is_def'] = True
else:
dict_to_append['gw_is_def'] = False
gateways.append(dict_to_append)
return gateways
def active_ifaces(self, verbose=True):
''' Uses previous functions to bundle
all information about all active interfaces.
RETURNS:
active_ifaces (list of dicts):
List of all active interfaces on a Windows machine.
Each element represents one active interface.
The element of the list may have the following keys:
'name' - name of the interface
'description' - description of the interface
'guid' - GUID of the interface object in the OS
'ipv4_metric' - metric/cost of the interface
'ip' - IP address of the interface
'netmask' - netmask
'gw_ip' - IP address of the interface's gateway
'gw_is_def' - Warning: This value is not reliable!
Use 'ipv4_metric' in your calculations instead.
Shows if the OS considers it a default gateway.
'''
ifaces_combined = combine_dicts(
self.scapy_interfaces(),
self.netifaces_interfaces(),
self.primary_key
)
active_ifaces = combine_dicts(
ifaces_combined,
self.netifaces_gateways(),
self.primary_key)
if verbose is True:
print('-' * 64)
print('This is the list of all active interfaces on this PC.')
for iface in active_ifaces:
print(iface)
print('-' * 64)
return active_ifaces
def filtered_ifaces(self, verbose=True):
''' From the input list of active interfaces,
filters out the ones that cannot be connecting to outside.
For PC with one LAN connection only, it is not that relevant.
But may be important if the PC has more interfaces, eg:
loopback, virtual etc. These are filtered out.
ARGS:
ifaces (list of dicts):
One element is a dict with all relevant interface's properties.
Usually it will be the output from previous functions.
iface_excls (list of strs):
If an iface's name or desc. includes any of these strs,
it will be disregarded.
ip_excls (list of strs):
If an iface's IP starts with any of these strs,
it will be disregarded.
verbose (bool): Decides if debug messages are printed.
RETURN:
filtered_results (list of dicts):
Validated interfaces,
that can be connecting the host to the outside world.
'''
# An interface to be considered:
# - must not have one of below in its name or description:
iface_excls = (
'loop', 'wifi', 'wireless', 'vmware', 'box', 'vpn', 'tunnel'
)
# - must not have its IP starting with:
ip_excls = ('127', '0', '255', '169.254')
ifaces = self.active_ifaces()
filtered_results = []
if verbose is True:
print('-' * 64)
print('Interfaces that will be selected or rejected as valid for outside connection:')
for iface in ifaces:
reject_reasons = []
if any(iface_excl in iface['name'].lower()
for iface_excl in iface_excls):
reject_reasons.append('Invalid name. '
'It suggests not a regular LAN connection.')
if any(iface_excl in iface['description'].lower()
for iface_excl in iface_excls):
reject_reasons.append('Invalid description. '
'It suggests not a regular LAN connection.')
if iface['ip'].startswith(ip_excls) or len(iface['ip']) == 0:
reject_reasons.append('Invalid IP address. '
'It suggests an unreachable subnet.')
if 'gw_ip' not in iface or len(iface['gw_ip']) == 0:
reject_reasons.append('No gateway IP address.')
if len(iface['netmask']) == 0:
reject_reasons.append('Invalid gateway or interface not started yet')
if len(reject_reasons) > 0:
if verbose is True:
print('-' * 4)
negative_console.print('Interface ', iface['name'],
'is not valid. Reasons:')
for reason in reject_reasons:
negative_console.print('\t', reason)
print('-' * 64)
else: # Only if iface was not rejected for any reasons.
if verbose is True:
positive_console.print('Interface ', iface['name'],
'is valid.')
filtered_results.append(iface)
filtered_results.sort(reverse=False, key=lambda item: item['ipv4_metric'])
if verbose is True:
if len(filtered_results) == 0:
negative_console.print(
'No active interfaces connecting your PC were found!')
return filtered_results
def final_iface(self, verbose=True):
''' This is the function returning the final result.
From the list with validated interfaces,
selects one that has the best metric.
Return:
final_dict (dict):
That is the network interface that is considered,
to be the one connecting the PC to Internet.
The keys are:
'name'
'description'
'ip'
'gateway'
'subnet'
'''
final_dict = {}
filtered_ifaces = self.filtered_ifaces()
if len(filtered_ifaces) > 0:
name = filtered_ifaces[0]['name']
description = filtered_ifaces[0]['description']
ip =filtered_ifaces[0]['ip']
netmask =filtered_ifaces[0]['netmask']
gateway =filtered_ifaces[0]['gw_ip']
# Transforms IP/bits -> IP & netmask
subnet = str(ip_network(
filtered_ifaces[0]['ip'] + '/' + filtered_ifaces[0]['netmask'],
strict=False
)
).split('/')[0]
final_dict = {
'name': name,
'description': description,
'ip': ip,
'netmask': netmask,
'gateway': gateway,
'subnet': subnet
}
positive_console.print('=' * 96)
positive_console.print(
figlet_format(
'Final IP and active interface settings', font="cybermedium"
))
if verbose is True:
positive_console.print('Interf. name:\t', name)
positive_console.print('Interf. descr.:\t', description)
positive_console.print('ip:\t\t', ip)
positive_console.print('netmask:\t', netmask)
positive_console.print('gateway:\t', gateway)
positive_console.print('=' * 96)
return final_dict
def main():
print('Library for interfaces of your local PC.')
if __name__ == '__main__':
main()
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
"""
This module defines the behaviors of ``RooCurve``.
"""
from __future__ import absolute_import
import numpy
import uproot
import uproot.behaviors.TGraph
# '@fUniqueID', '@fBits', 'fName', 'fTitle', 'fLineColor', 'fLineStyle', 'fLineWidth',
# 'fFillColor', 'fFillStyle', 'fMarkerColor', 'fMarkerStyle', 'fMarkerSize', 'fNpoints',
# 'fX', 'fY', 'fFunctions', 'fHistogram', 'fMinimum', 'fMaximum', '_yAxisLabel',
# '_ymin', '_ymax', '_normValue'
def _parse_errs(xvalues, errs):
xvals, yvals = errs.values()
# Index of one-past right edge
right_ind = numpy.argmax(xvals) + 2
up_x = xvals[:right_ind]
up_y = yvals[:right_ind]
down_x = numpy.flip(xvals[right_ind:])
down_y = numpy.flip(yvals[right_ind:])
if (not numpy.all(numpy.diff(up_x) >= 0)) or (
not numpy.all(numpy.diff(down_x) >= 0)
):
raise ValueError("RooCurve x values are not increasing")
up = numpy.interp(xvalues, up_x, up_y)
down = numpy.interp(xvalues, down_x, down_y)
return (up, down)
def _centers(edges):
return (edges[1:] + edges[:-1]) / 2
class RooCurve(uproot.behaviors.TGraph.TGraph):
"""Behaviors for RooCurve.
Beyond the behavior of a ``TGraph`` this also provides functionality to
interpolate the graph at provided points, or extract a stored histogram
(given bin edges).
"""
@property
def name(self):
"""
The name of the histogram.
"""
return self.member("fName")
@property
def title(self):
"""
The title of the histogram.
"""
return self.member("fTitle")
@property
def curve_type(self):
"""
Determines whether curve represents values or errors by checking if it is open or closed.
Returns "VALUES" or "ERRORS".
"""
xvals = self.values(axis="x")
if numpy.isclose(xvals[0], xvals[-1]):
return "ERRORS"
else:
return "VALUES"
def interpolate(self, xvalues):
"""
Args:
xvalues (array_like): xvalues to interpolate at.
Returns y values when RooCurve is interpolated at the given x values.
"""
if self.curve_type != "VALUES":
raise ValueError(
"interpolate can only be called on a value (open) curve. "
"Try interpolate_errors."
)
xvals, yvals = self.values()
return numpy.interp(xvalues, xvals, yvals)
def interpolate_asymm_errors(self, xvalues):
"""
Args:
xvalues (array_like): xvalues to interpolate at.
Returns:
up (array_like): Upper boundary of uncertainty band.
down (array_like): Lower boundary of uncertainty band.
Returns asymmetric y errors when RooCurve is interpolated at the given x values.
"""
if self.curve_type != "ERRORS":
raise ValueError(
"interpolate_errors can only be called on an error (closed) curve. "
"Try interpolate."
)
up, down = _parse_errs(xvalues, self)
return (up, down)
def interpolate_errors(self, xvalues):
"""
Args:
xvalues (array_like): xvalues to interpolate at.
Returns y errors when RooCurve is interpolated at the given x values.
"""
if self.curve_type != "ERRORS":
raise ValueError(
"interpolate_errors can only be called on an error (closed) curve. "
"Try interpolate."
)
up, down = _parse_errs(xvalues, self)
return numpy.abs((up - down) / 2)
def to_boost(self, bin_edges, error_curve=None):
"""
Args:
bin_edges (array_like): Bin edges for histogram.
error_curve (RooCurve): RooCurve visualizing errors.
Returns ``boost-histogram`` object by interpolating ``RooCurve``.
"""
if self.curve_type != "VALUES":
raise ValueError(
"to_boost should be called on the value curve. The error curve is passed using the"
"error_curve parameter."
)
boost_histogram = uproot.extras.boost_histogram()
axis = boost_histogram.axis.Variable(bin_edges, underflow=False, overflow=False)
axis.name = self.name
axis.title = self.title
centers = _centers(bin_edges)
values = self.interpolate(centers)
if error_curve is not None:
errs = error_curve.interpolate_errors(centers)
variances = numpy.square(errs)
hist = boost_histogram.Histogram(
axis, storage=boost_histogram.storage.Weight()
)
hist.name = self.name
hist.title = self.title
view = hist.view()
view.value = values
view.variance = variances
return hist
else:
hist = boost_histogram.Histogram(
axis, storage=boost_histogram.storage.Double()
)
hist.name = self.name
hist.title = self.title
view = hist.view()
view[...] = values
return hist
def to_hist(self, bin_edges, error_curve=None):
"""
Args:
bin_edges (array_like): Bin edges for histogram.
error_curve (RooCurve): RooCurve visualizing errors.
Returns ``hist`` object by interpolating ``RooCurve``.
"""
return uproot.extras.hist().Hist(self.to_boost(bin_edges, error_curve))
|
from audiomate import annotations
import numpy as np
from evalmate import alignment
from evalmate import evaluator
import pytest
class TestKWSEvaluator:
def test_evaluate_with_two_label_lists(self, kws_ref_and_hyp_label_list):
ll_ref, ll_hyp = kws_ref_and_hyp_label_list
result = evaluator.KWSEvaluator().evaluate(ll_ref, ll_hyp)
expected_matches = [
alignment.LabelPair(annotations.Label('up', start=5.28, end=5.99),
annotations.Label('up', start=5.20, end=5.88)),
alignment.LabelPair(annotations.Label('down', start=10.35, end=11.12),
annotations.Label('right', start=10.30, end=11.08)),
alignment.LabelPair(annotations.Label('right', start=20.87, end=22.01), None),
alignment.LabelPair(annotations.Label('up', start=33.00, end=33.4), None),
alignment.LabelPair(annotations.Label('up', start=33.4, end=33.8), None),
alignment.LabelPair(annotations.Label('down', start=39.28, end=40.0),
annotations.Label('down', start=39.27, end=40.01)),
alignment.LabelPair(None, annotations.Label('up', start=32.00, end=32.5)),
alignment.LabelPair(None, annotations.Label('up', start=34.2, end=34.8)),
alignment.LabelPair(None, annotations.Label('left', start=39.3, end=39.9))
]
assert isinstance(result, evaluator.KWSEvaluation)
assert sorted(expected_matches) == sorted(result.label_pairs)
def test_evaluate_corpus_with_hyp_labels(self, kws_ref_corpus_and_hyp_labels):
result = evaluator.KWSEvaluator().evaluate(kws_ref_corpus_and_hyp_labels[0], kws_ref_corpus_and_hyp_labels[1])
assert isinstance(result, evaluator.KWSEvaluation)
assert result.confusion.total == 24
assert result.confusion.correct == 17
assert result.confusion.substitutions == 4
assert result.confusion.deletions == 3
assert result.confusion.insertions == 6
def test_evaluate_with_empty_hyp(self):
ref = evaluator.Outcome(label_lists={
'a': annotations.LabelList(labels=[
annotations.Label('one', 2.5, 4.5),
annotations.Label('two', 10.5, 11.5),
]),
'b': annotations.LabelList(labels=[
annotations.Label('one', 1.5, 1.9),
]),
'c': annotations.LabelList(labels=[
annotations.Label('two', 4.5, 4.9),
annotations.Label('two', 10.5, 11.5),
]),
})
hyp = evaluator.Outcome(label_lists={
'a': annotations.LabelList(labels=[
annotations.Label('one', 2.5, 4.5),
annotations.Label('two', 10.5, 11.5),
]),
'b': annotations.LabelList(labels=[
]),
'c': annotations.LabelList(labels=[
annotations.Label('two', 4.5, 4.9),
annotations.Label('two', 10.5, 11.5),
]),
})
result = evaluator.KWSEvaluator().evaluate(ref, hyp)
assert result.confusion.total == 5
class TestKWSEvaluation:
def test_false_rejection_rate(self, kws_ref_corpus_and_hyp_labels):
result = evaluator.KWSEvaluator().evaluate(kws_ref_corpus_and_hyp_labels[0], kws_ref_corpus_and_hyp_labels[1])
per_keyword = [2 / 7, 1 / 4, 2 / 6, 1 / 3, 1 / 4]
assert result.false_rejection_rate() == pytest.approx(np.mean(per_keyword))
def test_false_rejection_rate_for_subgroup_of_keywords(self, kws_ref_corpus_and_hyp_labels):
result = evaluator.KWSEvaluator().evaluate(
kws_ref_corpus_and_hyp_labels[0],
kws_ref_corpus_and_hyp_labels[1]
)
per_keyword = [2 / 7, 1 / 4]
assert result.false_rejection_rate(['one', 'four']) == pytest.approx(np.mean(per_keyword))
def test_false_rejection_rate_with_no_occurences_returns_zero(self):
result = evaluator.KWSEvaluator().evaluate(
annotations.LabelList(labels=[
]),
annotations.LabelList(labels=[
annotations.Label('four', 2.5, 3.0)
])
)
assert result.false_rejection_rate() == 0.0
def test_false_rejection_rate_for_single_keyword(self, kws_ref_corpus_and_hyp_labels):
result = evaluator.KWSEvaluator().evaluate(kws_ref_corpus_and_hyp_labels[0], kws_ref_corpus_and_hyp_labels[1])
assert result.false_rejection_rate(keywords='four') == pytest.approx(1.0 / 4.0)
def test_false_alarm_rate(self, kws_ref_corpus_and_hyp_labels):
result = evaluator.KWSEvaluator().evaluate(kws_ref_corpus_and_hyp_labels[0], kws_ref_corpus_and_hyp_labels[1])
per_keyword = np.array([0 / 143.4, 4 / 146.4, 2 / 144.4, 1 / 147.4, 3 / 146.4])
assert result.false_alarm_rate() == pytest.approx(np.mean(per_keyword))
def test_false_alarm_rate_for_subgroup_of_keywords(self, kws_ref_corpus_and_hyp_labels):
result = evaluator.KWSEvaluator().evaluate(
kws_ref_corpus_and_hyp_labels[0],
kws_ref_corpus_and_hyp_labels[1]
)
per_keyword = np.array([0 / 143.4, 3 / 146.4])
assert result.false_alarm_rate(['one', 'four']) == pytest.approx(np.mean(per_keyword))
def test_false_alarm_rate_for_single_keyword(self, kws_ref_corpus_and_hyp_labels):
result = evaluator.KWSEvaluator().evaluate(kws_ref_corpus_and_hyp_labels[0], kws_ref_corpus_and_hyp_labels[1])
assert result.false_alarm_rate(keywords='four') == pytest.approx(3 / (150.4 - 4))
def test_term_weighted_value(self, kws_ref_corpus_and_hyp_labels):
result = evaluator.KWSEvaluator().evaluate(kws_ref_corpus_and_hyp_labels[0], kws_ref_corpus_and_hyp_labels[1])
p_miss = np.array([2 / 7, 1 / 4, 2 / 6, 1 / 3, 1 / 4]).mean()
p_fa = np.array([0 / 143.4, 4 / 146.4, 2 / 144.4, 1 / 147.4, 3 / 146.4]).mean()
beta = (0.1 / 1.0) * (((10 ** -4) ** -1) - 1)
twv = 1 - (p_miss + beta * p_fa)
assert result.term_weighted_value() == pytest.approx(twv)
def test_term_weighted_value_for_subgroup_of_keyword(self, kws_ref_corpus_and_hyp_labels):
result = evaluator.KWSEvaluator().evaluate(
kws_ref_corpus_and_hyp_labels[0],
kws_ref_corpus_and_hyp_labels[1]
)
p_miss = np.array([2 / 7, 1 / 4]).mean()
p_fa = np.array([0 / 143.4, 3 / 146.4]).mean()
beta = (0.1 / 1.0) * (((10 ** -4) ** -1) - 1)
twv = 1 - (p_miss + beta * p_fa)
assert result.term_weighted_value(['one', 'four']) == pytest.approx(twv)
def test_term_weighted_value_for_single_keyword(self, kws_ref_corpus_and_hyp_labels):
result = evaluator.KWSEvaluator().evaluate(kws_ref_corpus_and_hyp_labels[0], kws_ref_corpus_and_hyp_labels[1])
p_miss = 1.0 / 4.0
p_fa = 3 / (150.4 - 4)
beta = (0.1 / 1.0) * (((10 ** -4) ** -1) - 1)
twv = 1 - (p_miss + beta * p_fa)
assert result.term_weighted_value(keywords='four') == pytest.approx(twv)
|
import matplotlib.pyplot as plt
import numpy as np
import scipy
import cvxpy as cp
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import pandas as pd
def preprocess():
data = pd.read_csv('weatherAUS.csv')
# Drop certain features any any data with null values
data = data.drop(['Sunshine', 'Evaporation', 'Cloud3pm', 'Cloud9am',
'Location', 'RISK_MM','Date'], axis=1)
data = data.dropna(how='any')
# Change labels
data['RainToday'].replace({'No': 0, 'Yes': 1}, inplace = True)
data['RainTomorrow'].replace({'No': -1, 'Yes': 1}, inplace = True)
# Change categorical data to integers
categorical_columns = ['WindGustDir', 'WindDir3pm', 'WindDir9am']
data = pd.get_dummies(data, columns=categorical_columns)
# standardize data set
scaler = preprocessing.MinMaxScaler()
scaler.fit(data)
data = pd.DataFrame(scaler.transform(data),
index=data.index, columns=data.columns)
y = data.pop('RainTomorrow')
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2)
return X_train, X_test, y_train, y_test
class LinearSVM(object):
"""A support vector machine with linear kernel that trains using
the primal convex minimization problem"""
def __init__(self, C=1.0):
self.C = C
self.w = None
self.b = None
def train(self, X, y):
"""Use training arrays to set the values of self.w and self.b"""
if isinstance(X, pd.DataFrame):
X = X.values
if isinstance(y, pd.DataFrame):
y = y.values
y = np.array([-1 if x == 0 else 1 for x in y])
nrows, ncols = np.shape(X)
ζ = cp.Variable(nrows)
# insert the correct length for w
w = cp.Variable(ncols)
b = cp.Variable()
# use cp.sum_squares and cp.sum to form the objective function
objective = cp.Minimize(0.5 * cp.sum_squares(w) + self.C * cp.sum(ζ))
# apply the optimization constraints (hint: cp.multiply)
constraints = [cp.multiply(y, X * w + b) >= 1 - ζ,
ζ >= 0]
prob = cp.Problem(objective, constraints)
prob.solve()
self.w = w.value
self.b = b.value
def predict(self, X_test):
"""Return a numpy array of prediction labels"""
if isinstance(X_test, pd.DataFrame):
X_test = X_test.values
predict = np.dot(X_test, self.w) + self.b
predict = [1 if x >= 0 else 0 for x in predict]
return np.array(predict)
def linear_kernel(a, b):
"""Return the data converted by linear kernel"""
return np.dot(a, b.T)
def polynomial_kernel(a, b):
"""Return the data converted by polynomial kernel"""
return (np.dot(a, b.T) + 1) ** 2
def rbf_kernel(a, b):
"""Return the data converted by RBF kernel"""
return np.exp(-(np.dot(a, a.T) + np.dot(b, b.T) - 2 * np.dot(a, b.T)))
class SVM(object):
def __init__(self, kernel=rbf_kernel, C=1.0):
self.kernel = kernel
self.C = C
self.X = None
self.y = None
self.α = None
self.b = None
def train(self, X, y):
"""Use training arrays X and y to set the values of
self.α and self.b"""
if isinstance(X, pd.DataFrame):
X = X.values
if isinstance(y, pd.DataFrame):
y = y.values
y = np.array([-1 if x == 0 else 1 for x in y])
nrows, ncols = np.shape(X)
α = cp.Variable(nrows)
w = cp.Variable(ncols)
# form a kernel matrix (as a numpy array)
K = self.kernel(X, X)
objective = cp.Minimize(1/2 * cp.quad_form(cp.multiply(α, y), K)
- cp.sum(α))
# list the constraints for the optimization problem
constraints = [α >= 0,
α <= self.C,
α * y == 0]
prob = cp.Problem(objective, constraints)
prob.solve()
self.X = X
self.y = y
# fill in the value of α
self.α = α.value
# fill in the value of b
self.b = np.mean(y - np.dot(X, np.dot(X.T, self.α * self.y)))
def predict(self, X_test):
"""Return a numpy array of prediction labels"""
if isinstance(X_test, pd.DataFrame):
X_test = X_test.values
predict = np.dot(rbf_kernel(X_test, X_test), self.α * self.y) + self.b
predict = [1 if x >= 0 else 0 for x in predict]
return np.array(predict)
|
# Generates random data in specified shape - Lucas kort (Jun. 23, 2021)
import numpy as np
import pandas as pd
import random as rand
import math
import tkinter as tk #hide tk window
from tkinter import filedialog #get a file dialog window
#Configuração dos dados de saída---------------------------------
record_size = 5 #tamanho de cada vetor de dados
record_total_n = 1000 #número de vetores por dataset
a = 1 #limite inferior de intervalo log(theta)
b = 3.5 #limite superior de intervalo log(theta)
gamma = 0.5 #inicialização de parâmetro gamma (Gamma mínimo)
gamma_plus = 0.5 #incremento de gamma
gamma_each = 150 #incrementar gamma a cada x conjunto de dados
#----------------------------------------------------------------
#função de densidade weibull
def weibull(theta,gamma,x,x_size):
weibull_x = np.zeros(x_size)
for i in range(x_size):
weibull_x[i] = theta*(-math.log(x[i]))**(1/gamma)
return weibull_x
#salvar em arquivo do excel
def export_xlsx(parameters,time_weibull,record_size,index_col):
index = range(index_col) #Index na ordem crescente
rand_index = np.random.permutation(index) #tornar a ordem do index aleatória
#Primeiro aleatório, depois crescente, para planilha ficar na ordem crescente
df_parameters=pd.DataFrame(
parameters,
index=rand_index,
columns=['Theta','Log(Theta)','Gamma']
)
header = []
for i in range(record_size): #cabeçalho para os tempos
header.append('T' + str(i+1))
df_time_weibull=pd.DataFrame(
time_weibull,
index=rand_index,
columns=header
)
new_index = np.random.permutation(index) #tornar a ordem dos dados aleatória
root = tk.Tk()
root.withdraw()
export_file_path = filedialog.asksaveasfilename(defaultextension ='.xlsx') #local de salvamento + extensão .xlsx
with pd.ExcelWriter(export_file_path) as writer: #escrever em mais de uma planilha ao mesmo tempo
df_parameters.reindex(index).to_excel(writer,sheet_name = 'Parâmetros')
df_time_weibull.reindex(index).to_excel(writer,sheet_name = 'Tempos Weibull')
#Inicialização de variáveis
parameters = np.zeros((record_total_n,3))
time_weibull = np.zeros((record_total_n,record_size))
x = np.random.default_rng().uniform(0,1,record_size)
#geração automática de dados Weibull
for i in range(record_total_n):
log_theta = a+(b-a)*rand.random()
theta = 10**log_theta
if (i%gamma_each==0):
gamma += gamma_plus
time_weibull[i,:] = np.sort(weibull(theta,gamma,x,record_size).copy())
parameters[i,:] = [theta,log_theta,gamma]
#Exportar dados para arquivo do excel
export_xlsx(parameters,time_weibull,record_size,record_total_n)
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for WebSocket tunnelling with Cloud IAP."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import os
import struct
import sys
from googlecloudsdk.core import context_aware
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import http_proxy_types
import httplib2
import six
from six.moves.urllib import parse
import socks
URL_SCHEME = 'wss'
URL_HOST = 'tunnel.cloudproxy.app'
MTLS_URL_HOST = 'mtls.tunnel.cloudproxy.app'
URL_PATH_ROOT = '/v4'
CONNECT_ENDPOINT = 'connect'
RECONNECT_ENDPOINT = 'reconnect'
SUBPROTOCOL_NAME = 'relay.tunnel.cloudproxy.app'
SUBPROTOCOL_TAG_LEN = 2
SUBPROTOCOL_HEADER_LEN = SUBPROTOCOL_TAG_LEN + 4
SUBPROTOCOL_MAX_DATA_FRAME_SIZE = 16384
SUBPROTOCOL_TAG_CONNECT_SUCCESS_SID = 0x0001
SUBPROTOCOL_TAG_RECONNECT_SUCCESS_ACK = 0x0002
SUBPROTOCOL_TAG_DATA = 0x0004
SUBPROTOCOL_TAG_ACK = 0x0007
# The proxy_info field should be either None or type httplib2.ProxyInfo
IapTunnelTargetInfo = collections.namedtuple(
'IapTunnelTarget',
['project', 'zone', 'instance', 'interface', 'port', 'url_override',
'proxy_info', 'network', 'region', 'host'])
class CACertsFileUnavailable(exceptions.Error):
pass
class IncompleteData(exceptions.Error):
pass
class InvalidWebSocketSubprotocolData(exceptions.Error):
pass
class MissingTunnelParameter(exceptions.Error):
pass
class UnexpectedTunnelParameter(exceptions.Error):
pass
class PythonVersionMissingSNI(exceptions.Error):
pass
class UnsupportedProxyType(exceptions.Error):
pass
def ValidateParameters(tunnel_target):
"""Validate the parameters.
Inspects the parameters to ensure that they are valid for either a VM
instance-based connection, or a host-based connection.
Args:
tunnel_target: The argument container.
Raises:
MissingTunnelParameter: A required argument is missing.
UnexpectedTunnelParameter: An unexpected argument was found.
UnsupportedProxyType: A non-http proxy was specified.
"""
for field_name, field_value in tunnel_target._asdict().items():
if not field_value and field_name in ('project', 'port'):
raise MissingTunnelParameter('Missing required tunnel argument: ' +
field_name)
if tunnel_target.region or tunnel_target.network or tunnel_target.host:
for field_name, field_value in tunnel_target._asdict().items():
if not field_value and field_name in ('region', 'network', 'host'):
raise MissingTunnelParameter('Missing required tunnel argument: ' +
field_name)
if field_value and field_name in ('instance', 'interface', 'zone'):
raise UnexpectedTunnelParameter('Unexpected tunnel argument: ' +
field_name)
else:
for field_name, field_value in tunnel_target._asdict().items():
if not field_value and field_name in ('zone', 'instance', 'interface'):
raise MissingTunnelParameter('Missing required tunnel argument: ' +
field_name)
if tunnel_target.proxy_info:
proxy_type = tunnel_target.proxy_info.proxy_type
if (proxy_type and proxy_type != socks.PROXY_TYPE_HTTP):
raise UnsupportedProxyType(
'Unsupported proxy type: ' +
http_proxy_types.REVERSE_PROXY_TYPE_MAP[proxy_type])
def CheckCACertsFile(ignore_certs):
"""Get and check that CA cert file exists."""
ca_certs = httplib2.CA_CERTS
custom_ca_certs = properties.VALUES.core.custom_ca_certs_file.Get()
if custom_ca_certs:
ca_certs = custom_ca_certs
if not os.path.exists(ca_certs):
error_msg = 'Unable to locate CA certificates file.'
log.warning(error_msg)
error_msg += ' [%s]' % ca_certs
if ignore_certs:
log.info(error_msg)
else:
raise CACertsFileUnavailable(error_msg)
return ca_certs
def CheckPythonVersion(ignore_certs):
if (not ignore_certs and
(six.PY2 and sys.version_info < (2, 7, 9) or
six.PY3 and sys.version_info < (3, 2, 0))):
raise PythonVersionMissingSNI(
'Python version %d.%d.%d does not support SSL/TLS SNI needed for '
'certificate verification on WebSocket connection.' %
(sys.version_info.major, sys.version_info.minor,
sys.version_info.micro))
def CreateWebSocketConnectUrl(tunnel_target):
"""Create Connect URL for WebSocket connection."""
if tunnel_target.host:
return _CreateWebSocketUrl(CONNECT_ENDPOINT,
{'project': tunnel_target.project,
'region': tunnel_target.region,
'network': tunnel_target.network,
'host': tunnel_target.host,
'port': tunnel_target.port},
tunnel_target.url_override)
else:
return _CreateWebSocketUrl(CONNECT_ENDPOINT,
{'project': tunnel_target.project,
'zone': tunnel_target.zone,
'instance': tunnel_target.instance,
'interface': tunnel_target.interface,
'port': tunnel_target.port},
tunnel_target.url_override)
def CreateWebSocketReconnectUrl(tunnel_target, sid, ack_bytes):
"""Create Reconnect URL for WebSocket connection."""
url_query_pieces = {'sid': sid, 'ack': ack_bytes}
if tunnel_target.host:
url_query_pieces['region'] = tunnel_target.region
else:
url_query_pieces['zone'] = tunnel_target.zone
return _CreateWebSocketUrl(RECONNECT_ENDPOINT, url_query_pieces,
tunnel_target.url_override)
def _CreateWebSocketUrl(endpoint, url_query_pieces, url_override):
"""Create URL for WebSocket connection."""
scheme = URL_SCHEME
use_mtls = bool(context_aware.Config())
hostname = MTLS_URL_HOST if use_mtls else URL_HOST
path_root = URL_PATH_ROOT
if url_override:
url_override_parts = parse.urlparse(url_override)
scheme, hostname, path_override = url_override_parts[:3]
if path_override and path_override != '/':
path_root = path_override
qs = parse.urlencode(url_query_pieces)
path = ('%s%s' % (path_root, endpoint) if path_root.endswith('/')
else '%s/%s' % (path_root, endpoint))
return parse.urlunparse((scheme, hostname, path, '', qs, ''))
def CreateSubprotocolAckFrame(ack_bytes):
try:
# TODO(b/139055137) Remove str(...)
return struct.pack(str('>HQ'), SUBPROTOCOL_TAG_ACK, ack_bytes)
except struct.error:
raise InvalidWebSocketSubprotocolData('Invalid Ack [%r]' % ack_bytes)
def CreateSubprotocolDataFrame(bytes_to_send):
# TODO(b/139055137) Remove str(...)
return struct.pack(str('>HI%ds' % len(bytes_to_send)),
SUBPROTOCOL_TAG_DATA, len(bytes_to_send), bytes_to_send)
def ExtractSubprotocolAck(binary_data):
return _ExtractUnsignedInt64(binary_data)
def ExtractSubprotocolConnectSuccessSid(binary_data):
data_len, binary_data = _ExtractUnsignedInt32(binary_data)
return _ExtractBinaryArray(binary_data, data_len)
def ExtractSubprotocolData(binary_data):
data_len, binary_data = _ExtractUnsignedInt32(binary_data)
return _ExtractBinaryArray(binary_data, data_len)
def ExtractSubprotocolReconnectSuccessAck(binary_data):
return _ExtractUnsignedInt64(binary_data)
def ExtractSubprotocolTag(binary_data):
return _ExtractUnsignedInt16(binary_data)
def _ExtractUnsignedInt16(binary_data):
if len(binary_data) < 2:
raise IncompleteData()
# TODO(b/139055137) Remove str(...)
return (struct.unpack(str('>H'), binary_data[:2])[0],
binary_data[2:])
def _ExtractUnsignedInt32(binary_data):
if len(binary_data) < 4:
raise IncompleteData()
# TODO(b/139055137) Remove str(...)
return (struct.unpack(str('>I'), binary_data[:4])[0],
binary_data[4:])
def _ExtractUnsignedInt64(binary_data):
if len(binary_data) < 8:
raise IncompleteData()
# TODO(b/139055137) Remove str(...)
return (struct.unpack(str('>Q'), binary_data[:8])[0],
binary_data[8:])
def _ExtractBinaryArray(binary_data, data_len):
if len(binary_data) < data_len:
raise IncompleteData()
# TODO(b/139055137) Remove str(...)
return (struct.unpack(str('%ds' % data_len), binary_data[:data_len])[0],
binary_data[data_len:])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Donfig Developers
# Copyright (c) 2014-2018, Anaconda, Inc. and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import tempfile
import shutil
from contextlib import contextmanager
try:
from contextlib import AbstractContextManager
except ImportError:
AbstractContextManager = object
try:
from contextlib import suppress
except ImportError:
# Python <3.4
@contextmanager
def suppress(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def tmpfile(extension='', dir=None):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
with suppress(OSError):
os.remove(filename)
# copied from cpython 3.7 source
class nullcontext(AbstractContextManager):
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager::
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
|
import logging
import typing
import arrow
import pymongo
import pymongo.errors
from pymongo import MongoClient
class ReceiveDailyAbstract:
DATABASE_NAME = 'ChineseFuturesRaw'
COLLECTION_NAME = None
def __init__(self):
db = MongoClient()[self.DATABASE_NAME]
self.mongo_coll = db[self.COLLECTION_NAME]
if self.COLLECTION_NAME not in db.collection_names():
self.mongo_coll.create_index([(
'TradingDay', pymongo.ASCENDING
)], unique=True)
# whether to replace all conflicted data
self.replace_all: bool = False
def fetchRaw(self, _tradingday: str) -> typing.Any:
"""
fetch raw data of _tradingday and return
:param _tradingday: which day to fetch
:return: the raw data
"""
raise NotImplementedError
def storeRaw(self, _tradingday: str, _raw_data: typing.Any):
"""
store raw data into mongodb
:param _tradingday: which day to store
:param _raw_data: raw data from fetchRaw(...)
:return: None
"""
logging.info('{} storeRaw: {}'.format(
self.COLLECTION_NAME, _tradingday
))
try:
self.mongo_coll.insert_one({
'TradingDay': _tradingday,
'Raw': _raw_data,
})
except pymongo.errors.DuplicateKeyError as e:
logging.warning(e)
if self.replace_all:
self.mongo_coll.replace_one(
{'TradingDay': _tradingday},
{'TradingDay': _tradingday, 'Raw': _raw_data}
)
else:
tmp = input('Replace existing data?(y/n/a): ')
if tmp == 'y' or tmp == 'a':
self.mongo_coll.replace_one(
{'TradingDay': _tradingday},
{'TradingDay': _tradingday, 'Raw': _raw_data}
)
if tmp == 'a':
self.replace_all = True
def loadRaw(self, _tradingday: str) -> typing.Any:
"""
load raw from mongodb
:param _tradingday: which day to load
:return: the raw data of _tradingday
"""
ret = self.mongo_coll.find_one({
'TradingDay': _tradingday
})
if ret is None:
return None
return ret['Raw']
@staticmethod
def rawToDicts(
_tradingday: str, _raw_data: typing.Any
) -> typing.Tuple[dict, dict, dict]:
"""
turn raw data into dicts
:param _tradingday: which tradingday
:param _raw_data: raw data turned
:return: return data_dict, instrument_dict, product_dict
"""
raise NotImplementedError
@staticmethod
def iterTradingDay(
_begin_date: str, _end_date: str = None
) -> str:
"""
iter day from _begin_date to _end_date day by day
:param _begin_date: the begin day
:param _end_date: the end day, excluded
:return: day
"""
if _end_date is None:
_end_date = arrow.now().format('YYYYMMDD')
tradingday = _begin_date
while tradingday < _end_date:
yield tradingday
tradingday = arrow.get(
tradingday, 'YYYYMMDD'
).shift(days=1).format('YYYYMMDD')
def lastTradingDay(self) -> typing.Union[None, str]:
"""
get the last tradingday stored in mongodb
:return: str
"""
ret = self.mongo_coll.find_one(
sort=[('TradingDay', pymongo.DESCENDING)]
)
if ret:
return ret['TradingDay']
return None
def iterFetchAndStore(self, _begin_date):
for tradingday in self.iterTradingDay(_begin_date):
self.storeRaw(tradingday, self.fetchRaw(tradingday))
|
"""
Last Updated : 30/08/19
Purpose: Socket Programming Assignment for Network Fundamentals.
Authors: Jayden Lee, Vivian Huynh, Albert Ferguson
"""
# imports
import socket as sc
from datetime import datetime as dt
def HTTPServer(Port, *args, **kwargs):
"""
Take a given port to bind to an existing IP address. Default ip
address is set to LocalHost, a kwarg option over-rides this.
This will create a simple HTTP web server to send html page files
as requested to the client.
-- Parameters --
Port the port to bind to __default__ or given ip address
-- Args --
-D debug flag, if passed in arg list then verbose level set to max.
-- KwArgs --
IP over-ride the default LocalHost definition with a user defined
IP address.
"""
# Creating a socket object and temp binding variable
with sc.socket(sc.AF_INET, sc.SOCK_STREAM, 0) as server_socket:
# this must be a tuple
if 'IP' in kwargs:
_IP = kwargs.get("IP")
else:
_IP = "127.0.0.1" # LocalHost is default
toBind = (_IP, int(Port))
server_socket.bind(toBind)
# bind the server details so clients have a constant address to contact
# Then, ennable listening for a defined queue length of clients
# addition of any integer > 0 will define a backlog limit of
# unaccepted connex's, must be at least 1 to accept any init connex's!!
# further info see docs at: https://docs.python.org/3/library/socket.html
server_socket.listen(1)
while True:
# now that the server is ready....await a connex'
print("\t\t#### Ready to serve! ####\n")
# accept a new handshake connex'
connection_socket, addr = server_socket.accept()
print("Acquired Connection at {}:\t{}\n".format(dt.now(), addr))
# Create a new connex, use to recieve/send http info client <-> server
# addr is the host IP requesting the document.
# In the case that the client requests a non-existant file, except and
# send them to the 404 error page. But first 'try' to connect them to
# the file they req'd
try:
# now redirect client to their own dedicated socket, pipeline further
# comms to this socket port.
GET_req = connection_socket.recv(1024)
# Recieves 1024 bytes from the client GET req. This is broken down
# with string manip's to retreive the req'd file name
if '-D' in args:
print(GET_req,'\n')
# file name is second elem of GET list val's
fn = GET_req.split()[1].decode()
# set protocol and response message for our new pipeline response
header = "HTTP/1.1 200 OK\r\n"
return_data = header.encode('utf-8')
if '-D' in args:
print(str(fn),'\n', header, '\n')
else:
pass
# now open the file, remove the extraneous '/' with
# string manip, [1:] means all to end from index = 1
# with open performs auto close of file on indentation break
with open(fn[1:], 'rb') as file:
file_data = file.read() # read the file into memory
return_data += file_data
if '-D' in args:
# verbose mode high, print all arg values for debug check
print(GET_req, '\n', file_data, '\n')
else:
pass
connection_socket.send(return_data)
# send EOF vals
connection_socket.send("\r\n".encode())
connection_socket.close()
# except IOError is a standard except for file handling errors
except IOError:
with open("404.html", 'rb') as file:
# If we catch an IO error and the file doesn't exist
return_data = file.read()
connection_socket.send(return_data)
# send EOF vals
connection_socket.send("\r\n".encode())
connection_socket.close()
HTTPServer(80, '-D')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-22 12:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20151225_1353'),
]
operations = [
migrations.AlterField(
model_name='user',
name='bio',
field=models.TextField(help_text='Describe yourself with 500 characters or less. There will be no formatting.', max_length=500, verbose_name='biography'),
),
]
|
import moeda
preco = float(input('Informe o preço: R$ '))
print(f'O preço com acréscimo de 10% fica R$ {moeda.aumentar(preco, 10)}')
print(f'O preço com desconto de 10% fica R$ {moeda.diminuir(preco, 10)}')
print(f'O dobro do preço é R$ {moeda.dobro(preco)}')
print(f'A metade do preço é R$ {moeda.metade(preco)}')
|
"""
Copy specific lines of the original csv which contains all data, to
a new csv, which contains only selected classes.
"""
from yaml_util import read_list_yaml
import csv
import os.path
## exp_run_config
# input
original_csv_path = '../data/ucf101.csv'
classes_yaml_path = 'ucf_101_motion_classes.yaml'
key_name = 'classes'
# output
output_dir = 'output_csv'
output_file_name = 'ucf101_motion.csv'
def main():
output_path = os.path.join(output_dir, output_file_name)
assert not os.path.isfile(output_path), "the output csv exist. Need to rename the output_file_name."
list_classes = read_list_yaml(classes_yaml_path, key_name)
csv.register_dialect('ucf101_dialect', delimiter=',', skipinitialspace=True)
with open(original_csv_path, 'r') as csv_file, open(output_path, 'w') as f_out:
reader = csv.reader(csv_file, dialect='ucf101_dialect')
writer = csv.writer(f_out, delimiter=',')
for row in reader:
if row[1] in list_classes:
writer.writerow(row)
if __name__ == '__main__':
main()
|
# encoding: utf-8
"""
Tests of io.base
"""
from __future__ import absolute_import, division
try:
import unittest2 as unittest
except ImportError:
import unittest
from ...core import objectlist
from ...io.baseio import BaseIO
import numpy
class TestIOObjects(unittest.TestCase):
def test__raise_error_when_not_readable_or_writable(self):
reader = BaseIO()
for ob in objectlist:
if ob not in BaseIO.readable_objects:
meth = getattr(reader , 'read_'+ob.__name__.lower() )
self.assertRaises(AssertionError, meth, )
if ob not in BaseIO.writeable_objects:
meth = getattr(reader , 'write_'+ob.__name__.lower() )
self.assertRaises(AssertionError, meth, () )
if __name__ == "__main__":
unittest.main()
|
from pinto.security import RootACL
class Root(RootACL):
pass
def includeme(config):
config.add_route('category', '/{tag}', factory=Root)
|
# THIS IS THE MAIN CODE
def cal_multiple(a, b):
return int(a) * int(b)
def cal_addition(a, b):
return int(a) + int(b)
if __name__ == '__main__':
print(">>>>> " + str(cal_score(2, 4)))
|
from flask import Flask
import os
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello " + os.environ.get("NAME", "you") + "\n"
if __name__ == "__main__":
port = int(os.environ.get("PORT", 3000))
app.run(debug=True,host='0.0.0.0',port=port)
|
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
import copy
import functools
import gc
import inspect
import os
import random
import re
import threading
import time
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import numpy as np
from .file_utils import (
ExplicitEnum,
is_psutil_available,
is_sagemaker_dp_enabled,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_tpu_available,
)
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed).
Args:
seed (`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
tf.random.set_seed(seed)
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (`np.ndarray`): Predictions of the model.
label_ids (`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Union[np.ndarray, Tuple[np.ndarray]]
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class HubStrategy(ExplicitEnum):
END = "end"
EVERY_SAVE = "every_save"
CHECKPOINT = "checkpoint"
ALL_CHECKPOINTS = "all_checkpoints"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see [`~Trainer.hyperparameter_search`]).
Parameters:
run_id (`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (`float`):
The objective that was obtained for this run.
hyperparameters (`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the [`Trainer`], the sum of all metrics otherwise.
Args:
metrics (`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second")]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
from .integrations import is_optuna_available
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
from .integrations import is_ray_tune_available
assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`"
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
def default_hp_space_sigopt(trial):
return [
{"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"},
{"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"},
{"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"},
{
"categorical_values": ["4", "8", "16", "32", "64"],
"name": "per_device_train_batch_size",
"type": "categorical",
},
]
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
SIGOPT = "sigopt"
default_hp_space = {
HPSearchBackend.OPTUNA: default_hp_space_optuna,
HPSearchBackend.RAY: default_hp_space_ray,
HPSearchBackend.SIGOPT: default_hp_space_sigopt,
}
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
return dist.get_world_size()
elif local_rank != -1 and is_torch_available():
import torch
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless `psutil` is available. Install with `pip install psutil`.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example :
```python
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
```
At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`.
To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`].
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
"""derives the stage/caller name automatically"""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def start(self):
"""start tracking for the caller's stage"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
# gpu
if self.torch is not None:
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
"""stop tracking for the passed stage"""
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
if self.torch is not None:
self.torch.cuda.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
# gpu
if self.torch is not None:
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(
begin=self.gpu_mem_used_at_start,
end=self.gpu_mem_used_now,
alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start),
peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
)
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(
begin=self.cpu_mem_used_at_start,
end=self.cpu_mem_used_now,
alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start),
peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
)
# reset - cycle finished
self.cur_stage = None
def update_metrics(self, stage, metrics):
"""updates the metrics"""
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
# if we need additional debug info, enable the following
# for t in ["begin", "end"]:
# if stage in self.cpu and t in self.cpu[stage]:
# metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t]
# if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
# metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t]
# since memory can be allocated before init, and it might be difficult to track overall
# memory usage, in particular for GPU, let's report memory usage at the point init was called
if stages[0] == "init":
metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"]
if self.torch is not None:
metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"]
# if we also wanted to report any additional memory allocations in between init and
# whatever the next stage was we could also report this:
# if self.cpu["init"]["end"] != self.cpu[stage]["begin"]:
# metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"]
# if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]:
# metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"]
def stop_and_update_metrics(self, metrics=None):
"""combine stop and metrics update in one call for simpler code"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
def denumpify_detensorize(metrics):
"""
Recursively calls `.item()` on the element of the dictionary passed
"""
if isinstance(metrics, (list, tuple)):
return type(metrics)(denumpify_detensorize(m) for m in metrics)
elif isinstance(metrics, dict):
return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
elif isinstance(metrics, np.generic):
return metrics.item()
elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1:
return metrics.item()
return metrics
def number_of_arguments(func):
"""
Return the number of arguments of the passed function, even if it's a partial function.
"""
if isinstance(func, functools.partial):
total_args = len(inspect.signature(func.func).parameters)
return total_args - len(func.args) - len(func.keywords)
return len(inspect.signature(func).parameters)
class ShardedDDPOption(ExplicitEnum):
SIMPLE = "simple"
ZERO_DP_2 = "zero_dp_2"
ZERO_DP_3 = "zero_dp_3"
OFFLOAD = "offload"
AUTO_WRAP = "auto_wrap"
|
import os
import time
import sys
import numpy as np
from glob import glob
import torch
import cv2
from torchvision import transforms
import matplotlib.pyplot as plt
from network_weight import UNet
from network import UNet as HUNet
import argparse
from draw_skeleton import create_colors, draw_skeleton
"""
Height and Weight Information from Unconstrained Images
https://github.com/canaltinigne/DeepHeightWeight
Run the following command to get mask, joint locations
height and weight of a person in an image.
(Image should include a person with full-body visible)
python HWFinder.py -i [IMAGE ADDRESS] -g [GPU NUMBER] -r [RESOLUTION]
"""
if __name__ == "__main__":
# PARSER SETTINGS
np.random.seed(23)
parser = argparse.ArgumentParser(description="Height and Weight Information from Unconstrained Images")
parser.add_argument('-i', '--image', type=str, required=True, help='Image Directory')
parser.add_argument('-g', '--gpu', type=int, default=0, help='GPU selection')
parser.add_argument('-r', '--resolution', type=int, required=True, help='Resolution for Square Image')
args = parser.parse_args()
# Height
model_h = HUNet(128)
pretrained_model_h = torch.load('/content/drive/My Drive/Colab Notebooks/AI_Australia/Models/model_ep_48.pth.tar')
# Weight
model_w = UNet(128, 32, 32)
pretrained_model_w = torch.load('/content/drive/My Drive/Colab Notebooks/AI_Australia/Models/model_ep_37.pth.tar')
model_h.load_state_dict(pretrained_model_h["state_dict"])
model_w.load_state_dict(pretrained_model_w["state_dict"])
if torch.cuda.is_available():
model = model_w.cuda(args.gpu)
else:
model = model_w
# Reading Image
assert ".jpg" in args.image or ".png" in args.image or ".jpeg" in args.image, "Please use .jpg or .png format"
RES = args.resolution
X = cv2.cvtColor(cv2.imread(args.image), cv2.COLOR_BGR2RGB).astype('float32')
scale = RES / max(X.shape[:2])
X_scaled = cv2.resize(X, (0,0), fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
if X_scaled.shape[1] > X_scaled.shape[0]:
p_a = (RES - X_scaled.shape[0])//2
p_b = (RES - X_scaled.shape[0])-p_a
X = np.pad(X_scaled, [(p_a, p_b), (0, 0), (0,0)], mode='constant')
elif X_scaled.shape[1] <= X_scaled.shape[0]:
p_a = (RES - X_scaled.shape[1])//2
p_b = (RES - X_scaled.shape[1])-p_a
X = np.pad(X_scaled, [(0, 0), (p_a, p_b), (0,0)], mode='constant')
o_img = X.copy()
X /= 255
X = transforms.ToTensor()(X).unsqueeze(0)
if torch.cuda.is_available():
X = X.cuda()
model.eval()
with torch.no_grad():
m_p, j_p, _, w_p = model(X)
del model
if torch.cuda.is_available():
model = model_h.cuda(args.gpu)
else:
model = model_h
model.eval()
with torch.no_grad():
_, _, h_p = model(X)
fformat = '.png'
if '.jpg' in args.image:
fformat = '.jpg'
elif '.jpeg' in args.image:
fformat = '.jpeg'
mask_out = m_p.argmax(1).squeeze().cpu().numpy()
joint_out = j_p.argmax(1).squeeze().cpu().numpy()
pred_2 = j_p.squeeze().cpu().numpy()
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(mask_out.astype('uint8'))
colors = create_colors(30)
img_sk = np.zeros((128,128,3))
joint_pos = []
for i in range(1, num_labels):
p_res = np.expand_dims((labels==i).astype(int),0) * pred_2
ct_ = 1
positions = []
for i in range(1,19):
positions.append(np.unravel_index(p_res[ct_,:,:].argmax(), p_res[ct_,:,:].shape))
ct_ += 1
joint_pos.append(positions)
mask_out_RGB = np.concatenate([255*mask_out[:, :, np.newaxis],
255*mask_out[:, :, np.newaxis],
mask_out[:, :, np.newaxis],
], axis=-1)
layer = cv2.addWeighted(o_img.astype('uint8'), 0.55, mask_out_RGB.astype('uint8'), 0.45, 0)
img_sk = draw_skeleton(layer/255, joint_pos, colors)
out_name = args.image.split("/")[-1].replace(fformat, '.mask.png')
out_name_j = args.image.split("/")[-1].replace(fformat, '.joint.png')
out_name_sk = args.image.split("/")[-1].replace(fformat, '.skeleton.png')
with open("/content/out/" + args.image.split("/")[-1].replace(fformat, '.info.txt'), 'w') as out_file:
out_file.write("Image: " + args.image)
out_file.write("\nHeight: {:.1f} cm\nWeight: {:.1f} kg".format(100*h_p.item(), 100*w_p.item()))
cv2.imwrite("/content/out" + out_name, (255*mask_out).astype('uint8'))
plt.imsave("/content/out" + out_name_j, joint_out, cmap='jet')
plt.imsave("/content/out" + out_name_sk, img_sk)
print("\nImage: " + args.image)
print("Height: {:.1f} cm\nWeight: {:.1f} kg".format(100*h_p.item(), 100*w_p.item()))
print("Mask and Joints can be found in /out directory")
del model
|
import array
import async
import binascii
import collections
import hashlib
import os
import random
import shutil
import sqlite3
import struct
import subprocess
import sys
import types
import valtool
deque = collections.deque
_bin2hex = binascii.hexlify
_hex2bin = binascii.unhexlify
def _reiterable(xs):
if type(xs) in (tuple, list, set, frozenset, dict):
return xs
else:
return tuple(xs)
def _valhash(x):
return valtool.Hasher().eat(x).digest()
def _seqhash(xs):
return valtool.Hasher().eatseq(xs).digest()
def _ensure_dirs(path):
d = os.path.split(path)[0]
if not os.path.exists(d):
os.makedirs(d)
def _remove_clean(keep, rest):
"""Remove file or tree at 'os.path.join(keep,rest)'. Then remove all empty dirs
up to but not including 'keep'. Does not fail when things don't exist."""
assert not os.path.isabs(rest)
rest = rest.rstrip(os.path.sep)
if rest != '':
p = os.path.join(keep, rest)
if os.path.isdir(p):
shutil.rmtree(p)
elif os.path.isfile(p):
os.remove(p)
while True:
rest = os.path.dirname(rest)
if rest == '': break
p = os.path.join(keep, rest)
try:
os.rmdir(p)
except:
if os.path.isdir(p):
break
def _sql_ensure_table(cxn, name, cols, ixs=()):
cur = cxn.cursor()
cur.execute("select name from sqlite_master where type='table' and name=?", (name,))
if cur.fetchone() is None:
cur.execute("create table " + name + "(" + ",".join(cols) + ")")
i = 0
for ix in ixs:
cur.execute("create index " + ("%s__index_%d"%(name,i)) + " on " + name + "(" + ",".join(ix) + ")")
i += 1
def _flatten(x):
if getattr(x, "__iter__", False):
for it in x:
for y in _flatten(it):
yield y
else:
yield x
class Cmd(object):
def __init__(me, ctx, cwd=None, env=None, executable=None, tag=None,
pool=None, showout=False, showerr=True):
me._ctx = ctx
me._infs = set()
me._toks = []
me._oxs = {}
me.cwd = cwd
me.env = env or dict(os.environ)
me.executable = executable # if os.name == 'nt' else None
me.pool = pool
me.tag = tag
me.showout = showout
me.showerr = showerr
def lit(me, *toks):
me._toks += _flatten(toks)
return me
def inf(me, path, fmt="%s"):
path = os.path.normpath(path)
me._infs.add(path)
me._toks.append(fmt % path)
return me
def infs(me, paths, fmt="%s"):
for p in paths:
path = os.path.normpath(p)
me._infs.add(path)
me._toks.append(fmt % path)
return me
def outf(me, path, fmt="%s"):
me._oxs[path] = (len(me._toks), fmt)
me._toks.append(None)
return me
def prepare_a(me):
yield async.Sync(me._ctx.infiles_a(me._infs))
for o in me._oxs:
ix, fmt = me._oxs[o]
me._oxs[o] = fmt % (yield async.Sync(me._ctx.outfile_a(o)))
me._toks[ix] = me._oxs[o]
me.shline = subprocess.list2cmdline(me._toks)
me.outs = me._oxs
def exec_a(me):
if not hasattr(me, 'shline'):
yield async.Sync(me.prepare_a())
@async.assign_pool(me.pool)
def go():
pipe = subprocess.PIPE
try:
p = subprocess.Popen(me._toks, cwd=me.cwd, env=me.env, stdin=pipe, stdout=pipe, stderr=pipe)
except OSError, e:
e.filename = getattr(e,'filename',None) or me._toks[0]
raise e
me.stdout, me.stderr = p.communicate()
me.returncode = p.returncode
if me.tag is not None:
tag = me.tag + ': '
else:
tag = ''
if me.showout or me.showerr:
#print>>sys.stderr, '[RUN] ' + tag + me.shline
print>>sys.stderr, tag + me.shline
yield async.Sync(go)
if me.showerr and me.stderr != '':
print>>sys.stderr, '-'*72 + '\n[MSG] ' + tag + me.shline + '\n\n' + \
me.stderr + ('' if me.stderr[-1] == '\n' else '\n') + '-'*72
if me.showout and me.stdout != '':
print>>sys.stderr, '-'*72 + '\n[OUT] ' + tag + me.shline + '\n\n' + \
me.stdout + ('' if me.stdout[-1] == '\n' else '\n') + '-'*72
if me.returncode != 0:
raise subprocess.CalledProcessError(me.returncode, me.shline)
class Host(object):
def canonify(me, x):
return x
def lift_file(me, path):
assert False
def unlift_file(me, x):
assert False
def query_a(me, keys, stash):
assert False
class MemoHost(Host):
"""Given a host, memoize it so that redundant key lookups are cached. This
makes sense when we expect the state of the world to remain frozen for the
lifetime of this host object.
"""
def __init__(me, host):
me.host = host
me.cache = {}
def canonify(me, x):
return me.host.canonify(x)
def lift_file(me, path):
return me.host.lift_file(path)
def unlift_file(me, x):
return me.host.unlift_file(x)
def query_a(me, keys, stash):
host = me.host
cache = me.cache
keys = keys if type(keys) is set else set(keys)
vals = {}
for k in keys:
if k in cache:
vals[k] = cache[k]
if len(vals) != len(keys):
keys1 = tuple(k for k in keys if k not in vals)
vals1 = yield async.Sync(host.query_a(keys1, stash))
vals.update(vals1)
cache.update(vals1)
assert len(vals) == len(keys)
yield async.Result(vals)
class _FileHost(Host):
"""A host whose keys are interpreted as filesystem paths, the returned hashes values are content hashes.
"""
def __call__(*a,**kw):
raise Exception("FileHost is an instance, not a constructor!")
def canonify(me, x):
return os.path.abspath(x)
def lift_file(me, path):
return path
def unlift_file(me, x):
return x
def query_a(me, paths, stash):
def action(path, old):
t0, h0 = old if old is not None else (0, '')
if os.path.exists(path):
t1 = int(os.path.getmtime(path)*10000)
if t0 != t1:
md5 = hashlib.md5()
with open(path, 'rb') as f:
for b in iter(lambda: f.read(8192), ''):
md5.update(b)
h1 = md5.digest()
else:
h1 = h0
else:
t1, h1 = 0, ''
return (t1, h1) if (t1, h1) != (t0, h0) else old
reals = dict((p, os.path.realpath(p)) for p in paths)
ans = yield async.Sync(stash.updates_a(reals.keys(), action))
ans = dict((k,'' if th is None else th[1]) for k,th in ans.iteritems())
yield async.Result(ans)
FileHost = _FileHost()
def TestNo(y):
return MatchNone
class Match(object):
def inputs_a(me, xs, query_a):
"""given tuple of input names 'xs', returns test over tuples of hashes"""
assert False
def args(me, xs):
"""given tuple of arg names 'xs', returns test over tuple of values"""
assert False
def result(me, y):
"""reached function return of value y"""
assert False
class _MatchNone(Match):
def inputs_a(me, xs, query_a):
yield async.Result(TestNo)
def args(me, xs):
return TestNo
def result(me, y):
pass
def __call__(*a,**b):
assert False # MatchNone is not a constructor!
MatchNone = _MatchNone()
class TestEqualAny(object):
def __init__(me, values, next_match):
"""vals: list of values to test equality, next_match: val->Match"""
me.values = values if isinstance(values, tuple) else tuple(values)
me.next_match = next_match
def __call__(me, y):
return me.next_match(y) if y in me.values else MatchNone
class TestNotEqualAll(object):
def __init__(me, values, next_match):
"""vals: list of values to test equality, next_match: val->Match"""
me.values = values if isinstance(values, tuple) else tuple(values)
me.next_match = next_match
def __call__(me, y):
return me.next_match(y) if y not in me.values else MatchNone
class MatchArgs(Match):
Accept = object()
def __init__(me, argstest, collector,
seed={}, merge=lambda old,xys:(lambda d:(d.update(xys),d)[1])(dict(old))):
"""accepts only inputs that match current host hash value, defers to
argstest to generate test lambda for args.
argstest: takes (acc, xs, next_match), returns tester
collector: takes ({x:y}, result) for argument name and values x,y
"""
me._argstest = argstest
me._collector = collector
me._acc = seed
me._merge = merge
def inputs_a(me, xs, query_a):
hs = yield async.Sync(query_a(xs))
hs = tuple(hs[x] for x in xs)
yield async.Result(TestEqualAny((hs,), lambda _: me))
def args(me, xs):
def next_match(ys):
xys = dict((xs[i],ys[i]) for i in xrange(len(xs)))
return MatchArgs(me._argstest, me._collector, me._merge(me._acc, xys), me._merge)
return me._argstest(me._acc, xs, next_match)
def result(me, ans):
return me._collector(me._acc, ans)
class Config(object):
path = '.'
def make_host(me):
return MemoHost(FileHost)
def arg_group(me, x):
return None
def input_group(me, x):
return None
def group_comparer(me):
return cmp
def group_legible(me, grp):
return False
def _key_group(me, key):
return (me.arg_group if key[0]==_keykind_arg else me.input_group)(key[1])
def _keys_group(me, keys):
for key in keys:
return me._key_group(key)
def _keys_groups(me, keys):
return frozenset(me._key_group(key) for key in keys)
class Oven(object):
@classmethod
def new_a(cls, config):
assert isinstance(config, Config)
me = cls()
me._config = config
me._host = config.make_host()
me._path = os.path.abspath(config.path)
me._dbcxn = None
me._dbpath = os.path.join(me._path, "db")
me._dbpool = async.Pool(size=1)
def schema(cxn):
_sql_ensure_table(cxn, 'outdirs', ('path','bump'), [['path']])
yield async.Sync(me._dbjob(schema))
me._stash = yield async.Sync(_Stash.new_a(me))
me._logdb = yield async.Sync(_LogDb.new_a(me))
yield async.Result(me)
def _dbjob(me, job):
@async.assign_pool(me._dbpool)
def wrap():
if me._dbcxn is None:
_ensure_dirs(me._dbpath)
me._dbcxn = sqlite3.connect(me._dbpath, timeout=10*60)
me._dbcxn.execute('pragma synchronous=off')
return job(me._dbcxn)
return wrap
def close_a(me):
@async.assign_pool(me._dbpool)
def close_it():
if me._dbcxn is not None:
me._dbcxn.commit()
me._dbcxn.close()
me._dbcxn = None
yield async.Sync(close_it)
def config(me):
return me._config
def host(me):
return me._host
def query_a(me, keys):
return me._host.query_a(keys, me._stash)
def _outfile_a(me, path):
""" returns a tuple (abs-path,stuff), stuff is only used to delete the file later.
"""
def bump(cxn):
cur = cxn.cursor()
cur.execute('select bump from outdirs where path=?', (path,))
got = cur.fetchone()
if got is None:
got = 0
cur.execute('insert into outdirs(path,bump) values(?,?)', (path,got+1))
else:
got = got[0]
cur.execute('update outdirs set bump=? where path=?', (got+1,path))
cxn.commit()
return got
n = yield async.Sync(me._dbjob(bump))
d,dp = os.path.splitdrive(path)
if os.path.isabs(path):
ovpath = os.path.join('o-abs'+('' if d=='' else '.'+d), str(n), dp[1:])
else:
ovpath = os.path.join('o-rel'+('' if d=='' else '.'+d), str(n), dp)
opath = os.path.join(me._path, ovpath)
assert not os.path.exists(opath)
_ensure_dirs(opath)
yield async.Result((opath,ovpath))
def _is_outfile(me, path):
o = os.path.join(me._path, 'o-')
p = os.path.abspath(path)
return p.startswith(o) # ugly, should use os.path.samefile
def _memo_a(me, fun_a, view):
def calc_a(view1, log):
ctx = _Context(view1, log)
try:
result = yield async.Sync(fun_a(ctx))
except:
e = sys.exc_info()
for ovpath in ctx._outfs:
_remove_clean(me._path, ovpath)
raise e[0], e[1], e[2]
finally:
for f in ctx._subfuts:
yield async.Wait(f)
ctx._bagtip.terminate()
bags = ctx._bagdag.flattened()
log.add_bags(bags)
yield async.Result(result)
funh = _valhash(fun_a)
#sys.stderr.write('fun %r = %r\n' % (fun_a.func_code.co_name, _bin2hex(funh)))
log = yield async.Sync(me._logdb.memo_a(funh, view, calc_a))
yield async.Result(log)
def memo_a(me, fun_a, argroot=None):
argroot = _UserArgView.easy_root(argroot)
view = _View.root(me, argroot)
log = yield async.Sync(me._memo_a(fun_a, view))
yield async.Result(log.result()) # will throw if fun_a did, but thats ok
def search_a(me, funtest):
return me._logdb.search_a(funtest)
def argmap_single(f):
def g(xs, uav):
ys = {}
for x in xs:
uav.scope_push()
ys[x] = f(x, uav)
uav.scope_pop()
return ys
g._bake_argmap = True
return g
def argmap_many(f):
f._bake_argmap = True
return f
class _Stash(object):
@classmethod
def new_a(cls, oven):
me = cls()
me._oven = oven
def schema(cxn):
_sql_ensure_table(cxn, 'stash', ('hk0','hk1','val'), [['hk0']])
yield async.Sync(oven._dbjob(schema))
yield async.Result(me)
def updates_a(me, keys, action):
size_i = struct.calcsize('<i')
def go(cxn):
try:
cur = cxn.cursor()
ans = {}
changed = False
for k in keys:
hk = _valhash(k)
hk0 = struct.unpack_from('<i', hk)[0]
hk1 = buffer(hk, size_i)
row = cur.execute("select val from stash where hk0=? and hk1=?", (hk0,hk1)).fetchone()
old = None if row is None else valtool.unpack(row[0])
new = action(k, old)
ans[k] = new
if row is not None and new is None:
cur.execute('delete from stash where hk0=? and hk1=?', (hk0,hk1))
changed = True
elif row is None and new is not None:
val = valtool.pack(new)
cur.execute('insert into stash(hk0,hk1,val) values(?,?,?)', (hk0,hk1,buffer(val)))
changed = True
elif old is not new:
val = valtool.pack(new)
cur.execute('update stash set val=? where hk0=? and hk1=?', (buffer(val),hk0,hk1))
changed = True
if changed:
cxn.commit()
return ans
except:
cxn.rollback()
raise
ans = yield async.Sync(me._oven._dbjob(go))
yield async.Result(ans)
def gets_a(me, keys):
return me.updates_a(keys, lambda key,old: old)
def puts_a(me, keyvals):
return me.updates_a(keyvals.keys(), lambda key,old: keyvals[key])
class _Context(object):
def __init__(me, view, log):
assert isinstance(view, _View)
assert isinstance(log, _WipLog)
me._view = view
view.spynote = me
me._oven = view._oven
me._config = view._oven._config
me._bagdag = _BagDag(me._config)
me._bagtip = me._bagdag.branch()
me._bagscope = None
me._log = log
me._outfs = []
me._subfuts = []
@staticmethod
def _spy(note, keyvals):
if note is not None:
ctx = note
assert isinstance(ctx, _Context)
ctx._log.add_keys(keyvals)
def _add_bag(me, keys):
if me._bagscope is not None:
me._bagscope.add_bag(keys)
else:
me._bagtip.add_bag(keys)
def arg(me, x):
me._add_bag(((_keykind_arg,x),))
return me._view.args((x,), me._spy)[x]
def __getitem__(me, x):
me._add_bag(((_keykind_arg,x),))
return me._view.args((x,), me._spy)[x]
def args(me, xs):
xs = _reiterable(xs)
me._add_bag((_keykind_arg,x) for x in xs)
return me._view.args(xs, me._spy)
def argseq(me, *xs):
me._add_bag((_keykind_arg,x) for x in xs)
ys = me._view.args(xs, me._spy)
return (ys[x] for x in xs)
def argdict(me, m):
m = m if type(m) is dict else dict(m)
xs = m.values()
me._add_bag((_keykind_arg,x) for x in xs)
ys = me._view.args(xs, me._spy)
return dict((a,ys[m[a]]) for a in m)
def input_a(me, x):
key = (_keykind_input,x)
me._add_bag((key,))
ys = yield async.Sync(me._view.keys_a((key,), me._spy))
yield async.Result(ys[key])
def inputs_a(me, xs):
keys = tuple((_keykind_input,x) for x in xs)
me._add_bag(keys)
ys = yield async.Sync(me._view.keys_a(keys, me._spy))
yield async.Result(dict((x,ys[k,x]) for k,x in ys))
def infile_a(me, path):
host = me._view._oven.host()
return me.input_a(host.lift_file(path))
def infiles_a(me, paths):
host = me._view._oven.host()
return me.inputs_a((host.lift_file(p) for p in paths))
def outfile_a(me, path):
opath, stuff = yield async.Sync(me._view._oven._outfile_a(path))
me._outfs.append(stuff)
yield async.Result(opath)
def __call__(me, *a, **kw):
return me.memo_a(*a, **kw)
def memo_a(me, fun_a, argmap=None):
argmap = _UserArgView.easy_map(argmap)
subbags = me._bagtip.branch()
fut = yield async.Begin(
me._oven._memo_a(fun_a, _View.child(me._view, argmap)),
future_receiver=me._subfuts.append
)
sublog = yield async.Wait(fut)
bags = sublog.bags()
bags = _lift_bags(me._config, me._view, argmap, bags)
for bag in bags:
subbags.add_bag(bag)
subbags.terminate()
yield async.Result(sublog.result()) # ok if raises
def scope_push(me):
if me._bagscope is None:
me._bagscope = _BagScope()
else:
me._bagscope.push()
def scope_pop(me):
if 0 == me._bagscope.pop():
me._bagscope.submit(me._bagtip)
me._bagscope = None
_keykind_arg = 0
_keykind_input = 1
class _UserArgView(object):
def __init__(me, view, spy):
assert view is None or isinstance(view, _View)
me._view = view
me._spy = spy
def args(me, xs):
return me._view.args(xs, me._spy)
def arg(me, x):
return me.args((x,))[x]
def __getitem__(me, x):
return me.args((x,))[x]
def argseq(me, *xs):
ys = me.args(xs)
return (ys[x] for x in xs)
def argdict(me, m):
m = m if type(m) is dict else dict(m)
ys = me.args(m.values())
return dict((a,ys[m[a]]) for a in m)
def scope_push(me):
pass
def scope_pop(me):
pass
@staticmethod
def easy_root(f):
if f is None:
return lambda x: None
elif type(f) is dict:
return f.get
else:
return f
@staticmethod
def easy_map(f):
if f is None:
@argmap_many
def g(xs, uav):
return uav.args(xs)
return g
elif type(f) is dict:
@argmap_many
def f1(xs, uav):
up = uav.args(set(xs) - set(f))
return dict((x,f[x] if x in f else up[x]) for x in xs)
return f1
else:
return f
class _View(object):
@classmethod
def clone(cls, that):
assert isinstance(that, _View)
me = cls()
me._oven = that._oven
me._parent = that._parent
me._argmap = that._argmap
me._memo = dict(that._memo)
me.spynote = that.spynote
return me
@classmethod
def fresh(cls, that):
assert isinstance(that, _View)
me = cls()
me._oven = that._oven
me._parent = that._parent
me._argmap = that._argmap
me._memo = {}
me.spynote = that.spynote
return me
@classmethod
def root(cls, oven, argroot):
assert isinstance(oven, Oven)
me = cls()
me._oven = oven
me._parent = None
me._argmap = lambda xs,uav: dict((x,argroot(x)) for x in xs)
me._memo = {}
me.spynote = None
return me
@classmethod
def child(cls, parent, argmap):
assert isinstance(parent, _View)
assert hasattr(argmap, '_bake_argmap')
me = cls()
me._oven = parent._oven
me._parent = parent
me._argmap = argmap
me._memo = {}
me.spynote = None
return me
def args(me, xs, spy=None):
xs = _reiterable(xs)
xs1 = set(x for x in xs if (_keykind_arg,x) not in me._memo)
ys = me._argmap(xs1, _UserArgView(me._parent, spy))
if spy is not None:
spy(me.spynote, dict(((_keykind_arg,x),ys[x]) for x in ys))
for x in xs:
if x not in xs1:
ys[x] = me._memo[_keykind_arg,x]
else:
me._memo[_keykind_arg,x] = ys[x]
return ys
def keys_a(me, keys, spy=None):
keys = _reiterable(keys)
xs = {}
for k,x in keys:
if k not in me._memo:
if k not in xs: xs[k] = []
xs[k].append(x)
ys = {}
if _keykind_arg in xs:
ys1 = me._argmap(xs[_keykind_arg], _UserArgView(me._parent, spy))
ys.update(((_keykind_arg,x),y) for x,y in ys1.items())
if _keykind_input in xs:
ys1 = yield async.Sync(me._oven.query_a(xs[_keykind_input]))
ys.update(((_keykind_input,x),y) for x,y in ys1.items())
inps = set((_keykind_input,x) for x in xs[_keykind_input])
p = me._parent
while p is not None:
inps.difference_update(p._memo)
if len(inps) == 0: break
ys1 = dict((key,ys[key]) for key in inps)
p._memo.update(ys1)
if spy is not None:
spy(p.spynote, ys1)
p = p._parent
if spy is not None:
spy(me.spynote, ys)
for key in keys:
if key in me._memo:
ys[key] = me._memo[key]
else:
me._memo[key] = ys[key]
yield async.Result(ys)
def keys_memoed(me, keys):
return dict((key,me._memo[key]) for key in keys)
class _BagScope(object):
def __init__(me):
me._root = []
me._stk = [me._root]
def push(me):
me._stk.append([])
def pop(me):
s = me._stk.pop()
if len(me._stk) == 0:
me._stk = None
return 0
if len(s) > 0:
if len(me._stk[-1]) == 0 or me._stk[-1][-1][0] != 'kids':
me._stk[-1].append(('kids',[]))
me._stk[-1][-1][1].append(s)
return len(me._stk)
def add_bag(me, keys):
me._stk[-1].append(('bag',_reiterable(keys)))
def submit(me, tip):
assert isinstance(tip, _BagDag._Tip)
def f(p, tip):
for t,x in p:
if t == 'bag':
tip.add_bag(x)
elif t == 'kids':
ktips = [tip.branch() for k in x]
for i in xrange(len(x)):
f(x[i], ktips[i])
ktips[i].terminate()
f(me._root, tip)
class _BagDag(object):
class _Tip(object):
def __init__(me, dag, parent):
me._dag = dag
dag._open_n += 1
me._parent = parent
me._kids = []
if parent is None or parent._prevs is not None:
me._prevs = set(() if parent is None else parent._prevs)
me._keyset = set(() if parent is None else parent._keyset)
else:
me._prevs = None
me._keyset = None
def branch(me):
kid = _BagDag._Tip(me._dag, me)
me._kids.append(kid)
return kid
def add_bag(me, keys):
if me._prevs is None:
return # adding to a terminated tip does nothing
keys = tuple(keys)
config = me._dag._config
gkeys = {}
for key in keys:
if key not in me._keyset:
me._keyset.add(key)
g = config._key_group(key)
if g not in gkeys: gkeys[g] = set()
gkeys[g].add(key)
if len(gkeys) > 0:
nd = _BagDag._Node(len(me._prevs), gkeys)
if len(me._prevs) == 0:
me._dag._roots.append(nd)
for prev in me._prevs:
prev.nexts.append(nd)
me._prevs.clear()
me._prevs.add(nd)
def terminate(me):
for k in tuple(me._kids):
k.terminate()
if me._parent is not None:
me._parent._prevs |= me._prevs
me._parent._keyset.update(me._keyset)
me._parent._kids.remove(me)
else:
me._dag._keyset.update(me._keyset)
me._dag._open_n -= 1
me._kids = None
me._prevs = None
me._keyset = None
class _Node(object):
__slots__ = ('prev_n','gkeys','nexts')
def __init__(me, prev_n, gkeys):
me.prev_n = prev_n
me.gkeys = gkeys
me.nexts = []
def __init__(me, config):
assert isinstance(config, Config)
me._open_n = 0
me._config = config
me._roots = []
me._keyset = set()
me._kids = []
def branch(me):
return me._Tip(me, None)
def flattened(me):
assert me._open_n == 0
config = me._config
card = {}
for key in me._keyset:
g = config._key_group(key)
card[g] = 1 + card.get(g, 0)
def runner():
nd_gs = dict((nd,set(nd.gkeys)) for nd in me._roots)
nd_n = dict((nd,nd.prev_n) for nd in me._roots)
def advance(g):
for nd in tuple(nd_gs):
nd_gs[nd].discard(g)
if len(nd_gs[nd]) == 0:
del nd_gs[nd]
for nd1 in nd.nexts:
if nd1 not in nd_n:
nd_n[nd1] = nd1.prev_n
if nd_n[nd1] > 100:
sys.stderr.write('prev_n {} {}\n'.format(nd_n[nd1], nd1))
nd_n[nd1] -= 1
if nd_n[nd1] == 0:
nd_gs[nd1] = set(nd1.gkeys)
return nd_gs
return nd_gs, advance
best_path = None
gcmp = config.group_comparer()
nil = object()
for rand_seed in xrange(10):
rand = random.Random(rand_seed)
path = deque()
nd_gs, advance = runner()
while len(nd_gs) > 0:
gn = []
g1 = nil
for nd in nd_gs:
for g in nd_gs[nd]:
if card[g] == 1:
g1 = g if g1 is nil or gcmp(g, g1) < 0 else g1
else:
gn.append(g)
if g1 is not nil:
g = g1
else:
gn.sort(cmp=gcmp)
g = gn[0 if rand_seed==0 else rand.randint(0,len(gn)-1)]
path.append(g)
nd_gs = advance(g)
if best_path is None or len(path) < len(best_path):
best_path = path
path = best_path
flat = []
flatset = set()
nd_gs, advance = runner()
while len(nd_gs) > 0:
g = path.popleft()
keys = set()
for nd in nd_gs:
keys |= nd.gkeys.get(g, frozenset())
keys -= flatset
flatset |= keys
if len(keys) > 0:
flat.append(keys)
nd_gs = advance(g)
return flat
def _lift_bags(config, view, kid_argmap, keyss):
view = _View.fresh(view) # no memo
view.spynote = view
bags = []
for keys in keyss:
xs = {}
for k,x in keys:
if k not in xs: xs[k] = []
xs[k].append(x)
dag = _BagDag(config)
scope = _BagScope()
if _keykind_arg in xs:
scope.push()
def spy(tag, ys):
if tag is view:
scope.add_bag(ys)
class UAV(_UserArgView):
def scope_push(me):
scope.push()
def scope_pop(me):
scope.pop()
kid_argmap(xs[_keykind_arg], UAV(view, spy))
scope.pop()
if _keykind_input in xs:
scope.push()
bag = ((_keykind_input,x) for x in xs[_keykind_input])
scope.add_bag(bag)
scope.pop()
tip = dag.branch()
scope.submit(tip)
tip.terminate()
bags.extend(dag.flattened())
return bags
class _Log(object):
def __init__(me, funh):
me._funh = funh
me._bags = []
me._done = False
me._result = None
me._err = None
def funh(me):
return me._funh
def add_bags(me, keyss):
assert not me._done
for keys in keyss:
me._bags.append(set(keys))
def finish(me, result):
assert not me._done
me._done = True
me._result = result
me._err = None
me._upon_done()
def explode(me, ex, tb):
assert not me._done
me._done = True
me._err = (ex, tb)
me._upon_done()
def _upon_done(me):
pass
def bags(me):
assert me._done
return me._bags
def result(me):
assert me._done
if me._err is not None:
raise type(me._err[0]), me._err[0], me._err[1]
else:
return me._result
def error(me):
assert me._done
return me._err
def shift_bag(me, ix, keys):
assert me._done
keys = set(keys)
missing = set(keys)
bags = me._bags
n = len(keys)
i = ix
while True:
if n == 0 or i == len(bags):
bags.insert(ix, keys)
return missing
n0 = len(bags[i])
missing -= bags[i]
bags[i] -= keys
n1 = len(bags[i])
n -= n0 - n1
if n1 == 0:
del bags[i]
else:
i += 1
class _WipLog(_Log):
def __init__(me, funh):
super(_WipLog, me).__init__(funh)
me._seen = [] # [([(kind,x)],hash)]
me._bar = async.Barrier()
def add_keys(me, keyvals):
ks = {}
for key in keyvals:
k,x = key
if k not in ks: ks[k] = []
ks[k].append(key)
for keys in ks.values():
me._seen.append((keys, _seqhash((key,keyvals[key]) for key in keys)))
me._bar.fireall()
def _upon_done(me):
me._bar.fireall()
class _LogDb(object):
@classmethod
def new_a(cls, oven):
assert isinstance(oven, Oven)
me = cls()
me._oven = oven
me._lock = async.Lock()
me._signew = async.Signal(False, lambda a,b: a or b)
me._wips = {} # {funh: set(_WipLog)} -- work-in-progress
me._valenc = {}
me._valdec = {}
def schema(cxn):
_sql_ensure_table(cxn, 'logtrie', ('parent','ans_a','ans_b','query'), [['parent','ans_a']])
_sql_ensure_table(cxn, 'valbag', ('val','hash'), [['hash']])
yield async.Sync(oven._dbjob(schema))
yield async.Result(me)
_wraptag_ptr = 0
_chunk_tys = (tuple,list,set,frozenset,dict)
_wraptag_chunk_tag = dict(zip(_chunk_tys, range(1,1+len(_chunk_tys))))
_wraptag_chunk_ty = dict(zip(range(1,1+len(_chunk_tys)), _chunk_tys))
_items = {
tuple: lambda x:x,
list: lambda x:x,
set: sorted,
frozenset: sorted,
dict: lambda x: sorted(x.iteritems())
}
if False: # with chunking
def _pack_control(me, cxn):
def control(x, cata, wrap):
if type(x) in (tuple,list,set,frozenset,dict):
chks = me._chunkify(me._items[type(x)](x))
if len(chks) > 1:
wrap(control_chunk, me._wraptag_chunk_tag[type(x)], chks)
else:
cata(x, control)
else:
cata(x, control)
def control_chunk(x, cata, wrap):
n = cata(x, control)
if n >= 64:
wrap(control, me._wraptag_ptr, me._encode_val(cxn, x))
return control
else: # without chunking
def _pack_control(me, cxn):
def control(x, cata, wrap):
cata(x, control)
return control
def _unpack_unwrap(me, cxn):
def unwrap(tag, getcomps):
if tag == me._wraptag_ptr:
(row,) = getcomps(1)
return me._decode_val(cxn, row)
else:
(chks,) = getcomps(1)
items = []
for chk in chks:
items.extend(chk)
return me._wraptag_chunk_ty[tag](items)
return unwrap
def _encode_val(me, cxn, val):
v = buffer(valtool.pack(val, me._pack_control(cxn)))
h = valtool.Hasher().raw(v).digest()
if h not in me._valenc:
h1 = struct.unpack_from("<i", h)[0]
cur = cxn.cursor()
cur.execute('select rowid,val from valbag where hash=?', (h1,))
row = None
for got in cur.fetchall():
if got[1] == v:
row = got[0]
break
if row is None:
cur.execute('insert into valbag(val,hash) values(?,?)', (v,h1))
row = cur.lastrowid
me._valenc[h] = row
me._valdec[row] = val
cur.close()
return me._valenc[h]
def _decode_val(me, cxn, row):
if row not in me._valdec:
cur = cxn.cursor()
cur.execute('select val from valbag where rowid=?', (row,))
val = valtool.unpack(cur.fetchone()[0], me._unpack_unwrap(cxn))
me._valdec[row] = val
me._valenc[_valhash(val)] = row
cur.close()
return me._valdec[row]
@staticmethod
def _chunkify(xs):
szavg = 16
while 8*szavg < len(xs):
szavg *= 2
def test(x):
c = struct.unpack_from("<i", valtool.Hasher().eat(x).digest())[0]
return c & szavg-1 == 0
chks = [[]]
for x in xs:
if len(chks[-1]) > 0 and test(x):
chks.append([])
chks[-1].append(x)
return chks
_sizeof_int = struct.calcsize("<i")
def _encode_query(me, cxn, done, qarg, legible):
if done:
return buffer(valtool.pack((0, me._encode_val(cxn, qarg))))
else:
ks = {_keykind_arg:[], _keykind_input:[]}
for k,x in qarg:
ks[k].append(x)
for k in sorted(ks):
ks[k].sort()
t = (
1 if legible else 2,
me._encode_val(cxn, ks[_keykind_arg]),
me._encode_val(cxn, ks[_keykind_input])
)
return buffer(valtool.pack(t))
def _decode_query(me, cxn, s):
s = valtool.unpack(s)
if s[0] == 0:
return (True, me._decode_val(cxn, s[1]), None)
else:
legible = s[0] == 1
keys = set()
for x in me._decode_val(cxn, s[1]):
keys.add((_keykind_arg, x))
for x in me._decode_val(cxn, s[2]):
keys.add((_keykind_input, x))
return (False, keys, legible)
@staticmethod
def _form_ans(legible, keyvals):
ks = keyvals.keys()
ks.sort()
return (tuple if legible else _seqhash)(keyvals[k] for k in ks)
def _split_ans(me, legible, ans):
if legible:
ans = valtool.pack(ans)
a = struct.unpack_from("<i", valtool.Hasher().raw(ans).digest())[0]
b = buffer(ans)
else:
a = struct.unpack_from("<i", ans)[0]
b = buffer(ans, me._sizeof_int)
return a, b
@staticmethod
def _merge_ans(legible, a, b):
if legible:
return valtool.unpack(b)
else:
return struct.pack("<i", a) + str(b)
def memo_a(me, funh, view, calc_a):
# return: _Log
# calc_a: view,_Log -> result
config = me._oven._config
# check that we aren't subsumed by any wip
disjoint = set() # set(_Log)
if funh not in me._wips:
me._wips[funh] = set()
wips = me._wips[funh]
while True:
disjoint &= wips
if len(disjoint) == len(wips): # disjoint with all wips
acq = me._lock.acquire()
new_wip = me._signew.begin_frame()
yield async.Wait(acq)
if not new_wip.aggregate():
break
else:
me._lock.release()
else: # test disjointness with wip
wip = (wips - disjoint).pop()
ix = 0
while True:
while ix == len(wip._seen):
if wip._done:
yield async.Result(wip)
else:
yield async.Wait(wip._bar.enlist())
keys, h0 = wip._seen[ix]
ys = yield async.Sync(view.keys_a(keys))
ix += 1
h1 = _seqhash((key,ys[key]) for key in keys)
if h0 != h1:
disjoint.add(wip)
break
# we have the lock, test the memo cache
parent, keys, legible, ans = -1, None, False, funh
log = _Log(funh) # build as we traverse trie
while True:
def look(cxn, parent, legible, ans):
cur = cxn.cursor()
ans_a, ans_b = me._split_ans(legible, ans)
cur.execute(
"select rowid, query " +
"from logtrie " +
"where parent=? and ans_a=? and ans_b=?",
(parent, ans_a, ans_b))
r = cur.fetchone()
if r is None:
return None
cur.close()
return (r[0],) + me._decode_query(cxn, r[1])
got = yield async.Sync(me._oven._dbjob(
lambda cxn: look(cxn, parent, legible, ans)
))
if got is None:
log = None
break
parent, done, qarg, legible = got
if done:
log.finish(qarg)
break
else:
keys = qarg
grp = config._keys_group(keys)
log.add_bags((keys,))
ys = yield async.Sync(view.keys_a(keys))
ans = me._form_ans(legible, ys)
if log is not None:
me._lock.release()
else: # must compute
view = _View.fresh(view)
log = _WipLog(funh)
wips.add(log)
me._signew.pulse(True) # signal new wip created
me._lock.release()
try:
result = yield async.Sync(calc_a(view, log))
log.finish(result)
except Exception, e:
log.explode(e, sys.exc_traceback)
# done computing, put in cache
class Thunk(object):
def __init__(me, **kw):
for x in kw:
setattr(me, x, kw[x])
box = Thunk(
ix = 0,
parent = -1,
legible = False,
ans = funh,
redundant = None,
missing = None
)
yield async.Wait(me._lock.acquire())
wips.discard(log)
def prefix(cxn):
cur = cxn.cursor()
box.redundant = False
box.missing = frozenset()
while True:
ans_a, ans_b = me._split_ans(box.legible, box.ans)
cur.execute(
"select rowid, query " +
"from logtrie " +
"where parent=? and ans_a=? and ans_b=?",
(box.parent, ans_a, ans_b))
got = cur.fetchone()
if got is None:
return
box.parent = got[0]
done, keys, box.legible = me._decode_query(cxn, got[1])
if done or box.ix == len(log._bags):
box.redundant = True
return
box.missing = log.shift_bag(box.ix, keys)
if len(box.missing) > 0:
return
box.ans = me._form_ans(box.legible, view.keys_memoed(keys))
box.ix += 1
while True:
yield async.Sync(me._oven._dbjob(prefix))
if len(box.missing) > 0:
yield async.Sync(view.keys_a(box.missing))
else:
break
if box.redundant:
sys.stderr.write('WARNING: redundant computation detected!\n')
else:
def insert(cxn):
cur = cxn.cursor()
ok = False
try:
while box.ix <= len(log._bags):
ans_a, ans_b = me._split_ans(box.legible, box.ans)
if box.ix == len(log._bags):
done = True
qarg = log._result
else:
done = False
qarg = log._bags[box.ix]
qstr = me._encode_query(cxn, done, qarg, box.legible)
cur.execute(
"insert into logtrie(parent,ans_a,ans_b,query) " +
"values(?,?,?,?)",
(box.parent, ans_a, ans_b, qstr))
box.parent = cur.lastrowid
if not done:
box.legible = config.group_legible(config._keys_group(qarg))
box.ans = me._form_ans(box.legible, view.keys_memoed(qarg))
box.ix += 1
ok = True
finally:
if ok:
cxn.commit()
else:
cxn.rollback()
if log.error() is None:
yield async.Sync(me._oven._dbjob(insert))
me._lock.release()
yield async.Result(log)
if False: # search is totally out-of-date
def search_a(me, funtest):
def find(cxn, par, legible, test):
cur = cxn.cursor()
if test is TestNo:
rows = ()
elif isinstance(test, TestEqualAny):
rows = []
for ans in test.values:
ans_a, ans_b = me._split_ans(legible, ans)
cur.execute(
"select rowid,ans_a,ans_b,query " +
"from logtrie " +
"where par=? and ans_a=? and ans_b=?",
(par, ins_a, ans_b))
rows += cur.fetchall()
else:
cur.execute(
"select rowid,ans_a,ans_b,query " +
"from logtrie " +
"where par=?",
(par,))
rows = cur.fetchall()
ans = []
for row,ans_a,ans_b,query in rows:
ans = me._merge_ans(legible, ans_a, ans_b)
m = test(ans)
if m is not MatchNone:
done, qarg, legible = me._decode_query(cxn, query)
ans.append((row, done, qarg, legible, m))
return ans
oven = me._oven
query_a = oven.query_a
def hashed_test(test):
if type(test) is TestEqualAny:
h2v = dict((_seqhash(v),v) for v in test.values)
return TestEqualAny(h2v.keys(), lambda h: test(h2v[h]))
elif type(test) is TestNotEqualAll:
hs = tuple(_seqhash(v) for v in test.values)
class T(tuple):
def __getitem__(me, i):
raise Exception("you should't be looking at this")
return TestNotEqualAll(hs, lambda h: test(T()))
else:
assert False
def cont_found_a(fut):
# depth-first
assert fut is find_futs[-1]
find_futs.pop()
if len(find_futs) > 0:
futs[find_futs[0]] = (cont_found_a, find_futs[0])
for row,done,qarg,legible,m in fut.result():
if done:
m.result(qarg)
else:
keys = qarg
xs = {}
for k,x in keys:
if k not in xs: xs[k] = set()
xs[k].add(x)
if _keykind_arg in xs:
xs[_keykind_arg] = sorted(xs[_keykind_arg])
arg_test = m.args(xs[_keykind_arg])
if not legible:
arg_test = hashed_test(test)
else:
arg_test = None
if _keykind_input in xs:
xs[_keykind_input] = sorted(xs[_keykind_input])
inp_test_fut = yield async.Begin(m.inputs_a(xs[_keykind_input], query_a))
else:
inp_test_fut = async.Future()
inp_test_fut.finish(None)
futs[fut] = (cont_test_a, arg_test, inp_test_fut, row, legible, keys)
def cont_test_a(arg_test, inp_test_fut, row, legible, keys):
inp_test = inp_test_fut.result()
# here, combine tests arg & input
# HARD! need to combine Matches
fut = yield async.Begin(oven._dbjob(
lambda cxn: find(cxn, row, legible, test)
))
if len(find_futs) == 0:
futs[fut] = (cont_found_a, fut)
find_futs.append(fut)
fut = yield async.Begin(oven._dbjob(
lambda cxn: find(cxn, -1, False, hashed_test(funtest))
))
find_futs = deque([fut])
futs = {fut: (cont_found_a, fut)}
while len(futs) > 0:
fut = yield async.WaitAny(futs)
cont = futs.pop(fut)
yield async.Sync(cont[0](*cont[1:]))
|
def copy(source, destination):
f1 = open(source)
f2 = open(destination, 'w')
f2.write(f1.read())
f1.close()
f2.close
|
import os.path
import re
import uuid
from django.conf import settings
from django.db.models import Q, Max
from django.urls import reverse
from django.utils import timezone
import jwt
def some(args):
return any(args) and not all(args)
def get_version_hash() -> str:
return str(uuid.uuid4()).replace("-", "")
def generate_slug(model) -> str:
if not model:
return uuid.uuid4().hex[:settings.SLUG_LENGTH]
while True:
slug = uuid.uuid4().hex[:settings.SLUG_LENGTH]
if not model.objects.filter(slug=slug).exists():
return slug
def generate_slugs_batch(model, count:int) -> set:
if count <= 0:
return set()
slugs = set(uuid.uuid4().hex[:settings.SLUG_LENGTH] for i in range(count))
while len(slugs) < count:
slugs.add(uuid.uuid4().hex[:settings.SLUG_LENGTH])
while True:
duplicate_slugs = model.objects.filter(slug__in=slugs).values_list("slug", flat=True)
if not duplicate_slugs.exists():
break
slugs.difference_update(duplicate_slugs)
while len(slugs) < count:
slugs.add(uuid.uuid4().hex[:settings.SLUG_LENGTH])
return slugs
VALID_SLUG_PATT = re.compile(r'^[a-zA-Z0-9]{10,}$')
def is_valid_slug(slug:str) -> bool:
return bool(VALID_SLUG_PATT.match(slug))
def get_date_conflict_Q(start, end):
return (
Q(start__lte=start, end__gte=start)
| Q(start__gte=start, start__lte=end)
| Q(start__gte=start, end__lte=end)
| Q(start__lte=start, end__gte=end))
def get_next_journal_entry_display_id_for_company(company) -> int:
from api.models import JournalEntry
last_id = JournalEntry.objects.filter(period__company=company).aggregate(m=Max("display_id"))['m'] or 0
return last_id + 1
def get_report_page_breadcrumbs(period, report_name:str) -> list:
company = period.company
date_format = "%b %-d, %Y"
return [
{
'value':company.name,
'href':reverse("app-company", kwargs={'slug':company.slug}),
}, {
'value':'periods',
'href':reverse("app-period", kwargs={'slug':company.slug})
}, {
'value':f'{period.start.strftime(date_format)} -> {period.end.strftime(date_format)}',
'href':reverse("app-period-details", kwargs={'slug':period.slug})
}, {
'value':report_name,
}
]
def get_company_periods_up_to_and_excluding(period):
""" Get all periods for a company leading up to the given period, including the given period
"""
company = period.company
return (company.period_set
.exclude(id=period.id)
.filter(company=company, end__lte=period.start))
def get_company_periods_up_to(period):
""" Get all periods for a company leading up to the given period, including the given period
"""
company = period.company
return (company.period_set
.filter(company=company, end__lte=period.end))
def get_dr_cr_balance(dr_total:int, cr_total:int):
balance = 0
if dr_total > 0 and dr_total > cr_total:
balance = dr_total - cr_total
elif dr_total > 0 and dr_total < cr_total:
balance = cr_total - dr_total
elif cr_total > 0 and cr_total > dr_total:
balance = cr_total - dr_total
elif cr_total > 0 and cr_total < dr_total:
balance = dr_total - cr_total
return balance
def force_negative(val):
return abs(val) * -1
def get_photo_gallery_images():
return [
'Apple2.jpg',
'Excel.png',
'Lotus123.jpg',
'Pacioli.jpg',
'Visicalc.jpg',
'Abacus.jpg',
'Ledger.jpg',
'Calculator.png',
]
def get_account_activation_token(slug:str) -> str:
claims = {
'created_at':timezone.now().strftime("%s"),
'slug':slug,
}
return jwt.encode(claims, settings.OBJECT_SERIALIZATION_KEY, algorithm=settings.JWT_ALGORITHM).decode()
def get_account_activation_url(slug:str, token:str) -> str:
return os.path.join(settings.BASE_ABSOLUTE_URL, 'activate', slug) + "?token=" + token
def get_slug_from_account_activation_token(token:str) -> str:
data = jwt.decode(
token,
settings.OBJECT_SERIALIZATION_KEY,
algorithms=[settings.JWT_ALGORITHM])
return data['slug']
|
from .loader import read_sparse_matrix
from .util import load_gensim_word2vec
import torch
import numpy as np
import scipy.sparse as sparse
import os
from os.path import isfile, join
import pickle
from transformers import BertTokenizer
import time
class Dataset(object):
"""docstring for Dataset"""
def __init__(self, config, svd=False, train=True):
# generate ppmi matrix for co-occurence
pattern_filename = config.get("data", "pattern_filename")
bert_dir = config.get("data", "bert_path")
self.context_num = int(config.getfloat("hyperparameters", "context_num"))
self.context_len = int(config.getfloat("hyperparameters", "context_len"))
self.max_seq_length = int(config.getfloat("hyperparameters", "max_seq_length"))
k = int(config.getfloat("hyperparameters", "svd_dimension"))
self.batch_size = int(config.getfloat("hyperparameters", "batch_size"))
self.negative_num = int(config.getfloat("hyperparameters", "negative_num"))
self.tokenizer = BertTokenizer.from_pretrained(bert_dir)
csr_m, self.id2word, self.vocab, _ = read_sparse_matrix(
pattern_filename, same_vocab=True)
self.word2id = {}
for i in range(len(self.id2word)):
self.word2id[self.id2word[i]] = i
self.matrix = csr_m.todok()
self.p_w = csr_m.sum(axis=1).A[:,0]
self.p_c = csr_m.sum(axis=0).A[0,:]
self.N = self.p_w.sum()
# for w2v
# self.wordvecs = load_gensim_word2vec("/home/shared/acl-data/embedding/ukwac.model",
# self.vocab)
# self.wordvec_weights = self.build_emb()
#print(self.matrix.shape)
print('SVD matrix...')
tr_matrix = sparse.dok_matrix(self.matrix.shape)
self.left_has = {}
self.right_has = {}
for (l,r) in self.matrix.keys():
pmi_lr = (np.log(self.N) + np.log(self.matrix[(l,r)])
- np.log(self.p_w[l]) - np.log(self.p_c[r]))
ppmi_lr = np.clip(pmi_lr, 0.0, 1e12)
tr_matrix[(l,r)] = ppmi_lr
if l not in self.left_has:
self.left_has[l] = []
self.left_has[l].append(r)
if r not in self.right_has:
self.right_has[r] = []
self.right_has[r].append(l)
self.ppmi_matrix = tr_matrix
U, S, V = sparse.linalg.svds(self.ppmi_matrix.tocsr(), k=k)
self.U = U.dot(np.diag(S))
self.V = V.T
# for context
w2v_dir = "/home/shared/acl-data/embedding/"
vocab_path = "/home/shared/acl-data/corpus/"
print('Loading vocab...')
self.load_vocab(w2v_dir, vocab_path)
print('Loading context...')
if train:
self.context_dir = config.get("data", "context")
has_context_word_id_list = self.load_target_word(self.context_dir)
self.context_dict = {}
for matrix_id in range(len(self.vocab)):
word = self.id2word[matrix_id]
if word in self.context_w2i:
context_id = self.context_w2i[word]
if context_id in has_context_word_id_list:
self.context_dict[matrix_id] = self.load_word_context_for_bert(context_id)
# self.positive_data, self.positive_label = self.generate_positive()
self.get_avail_vocab()
else:
self.context_dict = {}
self.context_dir = config.get("data", "context_oov")
has_context_word_id_list = self.load_target_word(self.context_dir)
for context_id in has_context_word_id_list:
self.context_dict[context_id] = self.load_word_context_for_bert(context_id)
def load_target_word(self, data_dir):
target_word_list = [int(f.split('.')[0]) for f in os.listdir(data_dir) if isfile(join(data_dir, f))]
return np.asarray(target_word_list)
def get_avail_vocab(self):
avail_vocab = []
for idx in range(len(self.vocab)):
if idx in self.context_dict:
avail_vocab.append(idx)
self.avail_vocab = np.asarray(avail_vocab)
print('Available word num: {}'.format(len(avail_vocab)))
shuffle_indices_left = np.random.permutation(len(self.avail_vocab))[:2000]
shuffle_indices_right = np.random.permutation(len(self.avail_vocab))[:2000]
dev_data = []
dev_label = []
self.dev_dict = {}
for id_case in range(2000):
id_left = self.avail_vocab[shuffle_indices_left[id_case]]
id_right = self.avail_vocab[shuffle_indices_right[id_case]]
dev_data.append([id_left,id_right])
dev_label.append(self.U[id_left].dot(self.V[id_right]))
self.dev_dict[(id_left, id_right)] = 1
self.dev_data = np.asarray(dev_data)
self.dev_label = np.asarray(dev_label)
def build_emb(self):
self.word2id = {}
for i in range(len(self.id2word)):
self.word2id[self.id2word[i]] = i
tensors = []
ivocab = []
self.w2embid = {}
self.embid2w = {}
for word in self.wordvecs:
vec = torch.from_numpy(self.wordvecs[word])
self.w2embid[self.word2id[word]] = len(ivocab)
self.embid2w[len(ivocab)] = self.word2id[word]
ivocab.append(word)
tensors.append(vec)
assert len(tensors) == len(ivocab)
tensors = torch.cat(tensors).view(len(ivocab), 300)
return tensors
def load_vocab(self, w2v_dir, data_dir):
i2w_path = os.path.join(data_dir, 'ukwac_id2word.pkl')
w2i_path = os.path.join(data_dir, 'ukwac_word2id.pkl')
with open(i2w_path, 'rb') as fr:
self.context_i2w = pickle.load(fr)
with open(w2i_path, 'rb') as fr:
self.context_w2i = pickle.load(fr)
self.PAD = 0
self.UNK = 1
# w2v_model = Word2Vec.load(w2v_path)
# emb = w2v_model.wv
# oi2ni = {}
# new_embedding = []
# new_embedding.append(np.zeros(300))
# new_embedding.append(np.zeros(300))
# cnt_ni = 2
# for _id, word in i2w.items():
# if word in emb:
# oi2ni[_id] = cnt_ni
# cnt_ni += 1
# new_embedding.append(emb[word])
# else:
# oi2ni[_id] = self.UNK
oi2ni_path = os.path.join(w2v_dir, 'context_word_oi2ni.pkl')
w2v_path = os.path.join(w2v_dir, 'context_word_w2v.model.npy')
with open(oi2ni_path, 'rb') as fr:
self.context_i2embid = pickle.load(fr)
self.context_word_emb = np.load(w2v_path)
def load_word_context(self, word_idx):
context_path = os.path.join(self.context_dir, '{}.txt'.format(word_idx))
context_list = []
with open(context_path, 'r') as fr:
flag_right = False
cnt_line = 0
for line in fr:
line = line.strip()
if len(line) != 0:
context = [int(num) for num in line.split(' ')]
else:
context = []
context = [self.context_i2embid[num] for num in context]
if not flag_right:
left_context = [self.PAD] * self.context_len
if len(context) >= self.context_len:
left_context = context[(len(context) - self.context_len):]
else:
left_context[(self.context_len-len(context)):] = context
flag_right = True
else:
right_context = [self.PAD] * self.context_len
if len(context) >= self.context_len:
right_context = list(reversed(context[:self.context_len]))
else:
right_context[self.context_len-len(context):] = list(reversed(context))
context_list.append([left_context, right_context])
flag_right = False
cnt_line += 1
if cnt_line == 2* self.context_num:
break
if len(context_list) <= self.context_num:
for i in range(self.context_num - len(context_list)):
context_list.append([[self.PAD]*self.context_len, [self.PAD]*self.context_len])
return context_list
def load_word_context_for_bert(self, word_idx):
context_path = os.path.join(self.context_dir, '{}.txt'.format(word_idx))
context_list = []
with open(context_path, 'r') as fr:
flag_right = False
cnt_line = 0
for line in fr:
line = line.strip()
if len(line) != 0:
context = [int(num) for num in line.split(' ')]
else:
context = []
context = [self.context_i2w[num] for num in context]
context = [each for each in context if each != "@card@"]
context = [each for each in context if "http" not in each]
context = [each for each in context if "JavaScript" not in each]
if not flag_right:
if len(context) >= self.context_len:
left_context = context[(len(context) - self.context_len):]
else:
left_context = context
flag_right = True
left_context.append(self.context_i2w[word_idx])
else:
if len(context) >= self.context_len:
right_context = context[:self.context_len]
else:
right_context = context
full_context = '[CLS] ' + " ".join(left_context + right_context) + ' [SEP]'
context_list.append(full_context)
flag_right = False
cnt_line += 1
if cnt_line == 2 * self.context_num:
break
if word_idx == self.context_w2i['kinetoscope'] or word_idx == self.context_w2i['device']:
print(context_list)
if len(context_list) <= self.context_num:
for i in range(self.context_num - len(context_list)):
context_list.append(" ".join(['[PAD]']*(2 *self.context_len + 1)))
batched_output = self.tokenizer.batch_encode_plus(context_list, \
add_special_tokens=False, padding='max_length', truncation=True, max_length= self.max_seq_length, \
return_attention_mask=True)
if np.asarray(batched_output['input_ids']).dtype == "object":
print("something is wrong on word :" + str(word_idx))
print(context_list)
print(len(context_list))
print("--------------------------------------------")
outputs = dict({'ids': batched_output['input_ids'], "mask": batched_output['attention_mask']})
return outputs
def generate_positive(self):
positive = []
label = []
key_list = list(self.ppmi_matrix.keys())
shuffle_indices = np.random.permutation(len(key_list))
for shuffle_id in shuffle_indices:
(l, r) = key_list[shuffle_id]
# if self.id2word[l] in self.wordvecs and self.id2word[r] in self.wordvecs:
# positive.append([self.w2embid[l],self.w2embid[r]])
if l in self.context_dict and r in self.context_dict:
positive.append([l, r])
score = self.U[l].dot(self.V[r])
label.append(score)
# label.append(self.ppmi_matrix[(l,r)])
# 119448 positive score
positive_train = np.asarray(positive)[:-2000]
self.dev_data = np.asarray(positive)[-2000:]
label_train = np.asarray(label)[:-2000]
self.dev_label = np.asarray(label)[-2000:]
return positive_train, label_train
def generate_negative(self, batch_data, negative_num):
negative = []
label = []
batch_size = batch_data.shape[0]
for i in range(batch_size):
# random_idx = np.random.choice(len(self.vocab), 150 , replace=False)
l = batch_data[i][0]
# l_w = self.embid2w[l]
r = batch_data[i][1]
# r_w = self.embid2w[r]
l_neg = l
r_neg = r
num = 0
for j in range(negative_num):
left_prob = np.random.binomial(1, 0.5)
while True:
if left_prob:
l_neg = np.random.choice(len(self.vocab), 1)[0]
else:
r_neg = np.random.choice(len(self.vocab), 1)[0]
# if (l_neg, r_neg) not in self.matrix.keys() and self.id2word[l_neg] in self.wordvecs and self.id2word[r_neg] in self.wordvecs:
if (l_neg, r_neg) not in self.matrix.keys() and l_neg in self.context_dict and r_neg in self.context_dict:
break
# negative.append([self.w2embid[l_neg], self.w2embid[r_neg]])
negative.append([self.context_dict[l_neg], self.context_dict[r_neg]])
score = self.U[l_neg].dot(self.V[r_neg])
# score = 0
label.append(score)
negative = np.asarray(negative)
label = np.asarray(label)
return negative, label
def get_batch(self):
num_positive = len(self.positive_data)
batch_size = self.batch_size
if num_positive% batch_size == 0:
batch_num = num_positive // batch_size
else:
batch_num = num_positive // batch_size + 1
shuffle_indices = np.random.permutation(num_positive)
for batch in range(batch_num):
start_index = batch * batch_size
end_index = min((batch+1) * batch_size, num_positive)
batch_idx = shuffle_indices[start_index:end_index]
batch_positive_data = self.positive_data[batch_idx]
batch_positive_label = self.positive_label[batch_idx]
batch_negative_data, batch_negative_label = self.generate_negative(batch_positive_data, self.negative_num)
batch_positive_context = []
for [l, r] in batch_positive_data:
batch_positive_context.append([self.context_dict[l], self.context_dict[r]])
# [batch, 2, doc, 2, seq]
batch_input = np.concatenate((batch_positive_context, batch_negative_data), axis=0)
batch_label = np.concatenate((batch_positive_label,batch_negative_label), axis=0)
yield batch_input, batch_label
def sample_batch(self):
num_data = len(self.avail_vocab)
batch_size = self.batch_size
if num_data % batch_size == 0:
batch_num = num_data // batch_size
else:
batch_num = num_data // batch_size + 1
shuffle_indices = np.random.permutation(num_data)
for batch in range(batch_num):
start_index = batch * batch_size
end_index = min((batch+1) * batch_size, num_data)
batch_idx = shuffle_indices[start_index:end_index]
batch_data_context = []
batch_data_mask = []
batch_data_score = []
batch_data = self.avail_vocab[batch_idx]
for idx_i in batch_data:
for j in range(self.negative_num):
left_prob = np.random.binomial(1, 0.5)
if left_prob:
while True:
idx_j = np.random.choice(self.avail_vocab, 1)[0]
if (idx_i, idx_j) not in self.dev_dict:
break
batch_data_context.append([self.context_dict[idx_i]['ids'], self.context_dict[idx_j]['ids']])
batch_data_mask.append([self.context_dict[idx_i]['mask'], self.context_dict[idx_j]['mask']])
score = self.U[idx_i].dot(self.V[idx_j])
else:
while True:
idx_j = np.random.choice(self.avail_vocab, 1)[0]
if (idx_j, idx_i) not in self.dev_dict:
break
batch_data_context.append([self.context_dict[idx_j]['ids'], self.context_dict[idx_i]['ids']])
batch_data_mask.append([self.context_dict[idx_j]['mask'], self.context_dict[idx_i]['mask']])
score = self.U[idx_j].dot(self.V[idx_i])
batch_data_score.append(score)
yield np.asarray(batch_data_context), np.asarray(batch_data_mask), np.asarray(batch_data_score)
def sample_batch_dev(self):
num_data = len(self.dev_data)
batch_size = self.batch_size
if num_data % batch_size == 0:
batch_num = num_data // batch_size
else:
batch_num = num_data // batch_size + 1
# shuffle_indices = np.random.permutation(num_data)
for batch in range(batch_num):
start_index = batch * batch_size
end_index = min((batch+1) * batch_size, num_data)
batch_data = self.dev_data[start_index:end_index]
# batch_data_score = self.dev_label[start_index:end_index]
batch_data_context = []
batch_data_mask = []
for pair in batch_data:
idx_i, idx_j = pair
batch_data_context.append([self.context_dict[idx_i]['ids'], self.context_dict[idx_j]['ids']])
batch_data_mask.append([self.context_dict[idx_i]['mask'], self.context_dict[idx_j]['mask']])
yield np.asarray(batch_data_context), np.asarray(batch_data_mask)
def sample_pos_neg_batch(self):
num_data = len(self.avail_vocab)
batch_size = self.batch_size
if num_data % batch_size == 0:
batch_num = num_data // batch_size
else:
batch_num = num_data // batch_size + 1
shuffle_indices = np.random.permutation(num_data)
for batch in range(batch_num):
start_index = batch * batch_size
end_index = min((batch+1) * batch_size, num_data)
batch_idx = shuffle_indices[start_index:end_index]
batch_data_context = []
batch_data_score = []
batch_data = self.avail_vocab[batch_idx]
for idx_i in batch_data:
if idx_i in self.left_has:
idx_j_list = np.random.permutation(self.left_has[idx_i])
for idx_j in idx_j_list:
if idx_j in self.avail_vocab:
# batch_data_pair.append([self.w2embid[idx_i], self.w2embid[idx_j]])
batch_data_context.append([self.context_dict[idx_i], self.context_dict[idx_j]])
score = self.U[idx_i].dot(self.V[idx_j])
batch_data_score.append(score)
break
if idx_i in self.right_has:
idx_j_list = np.random.permutation(self.right_has[idx_i])
for idx_j in idx_j_list:
if idx_j in self.avail_vocab:
# batch_data_pair.append([self.w2embid[idx_j], self.w2embid[idx_i]])
batch_data_context.append([self.context_dict[idx_j], self.context_dict[idx_i]])
score = self.U[idx_j].dot(self.V[idx_i])
batch_data_score.append(score)
break
for j in range(self.negative_num):
# left_prob = np.random.binomial(1, 0.5)
# if left_prob:
while True:
idx_j = np.random.choice(self.avail_vocab, 1)[0]
if (idx_i, idx_j) not in self.dev_dict:
break
# batch_data_pair.append([self.w2embid[idx_i], self.w2embid[idx_j]])
batch_data_context.append([self.context_dict[idx_i], self.context_dict[idx_j]])
score = self.U[idx_i].dot(self.V[idx_j])
batch_data_score.append(score)
# else:
while True:
idx_j = np.random.choice(self.avail_vocab, 1)[0]
if (idx_j, idx_i) not in self.dev_dict:
break
# batch_data_pair.append([self.w2embid[idx_j], self.w2embid[idx_i]])
batch_data_context.append([self.context_dict[idx_j], self.context_dict[idx_i]])
score = self.U[idx_j].dot(self.V[idx_i])
batch_data_score.append(score)
yield np.asarray(batch_data_context), np.asarray(batch_data_score)
|
# Licensed under a MIT licence - see file `license`
import numpy as np
import astropy.units as u
from .. import fitters
from .. import regions
from ..utils import bool_indarray
def test_image_at_once(example3_3):
psfarray, image = example3_3
class psf(fitters.BasePSFFitter):
regions = regions.image_at_once
f = psf(psfarray, image)
regs = f.regions()
assert len(list(regs)) == 1
assert regs[0].shape == (9, )
assert np.all(regs[0] == ~image.mask.flatten())
def test_image_unmasked(example3_3):
psfarray, image = example3_3
class psf(fitters.BasePSFFitter):
regions = regions.image_unmasked
# image with mask
f = psf(psfarray, image)
regs = list(f.regions())
assert len(regs) == 1
assert np.all(regs[0] == ~image.mask.flatten())
# image without mask
f = psf(psfarray, np.ones((3, 3)))
regs = list(f.regions())
assert len(regs) == 1
assert regs[0].shape == (9, )
assert np.all(regs[0])
def test_group_by_basis(example3_3):
psfarray, image = example3_3
class psf(fitters.BasePSFFitter):
regions = regions.group_by_basis
f = psf(psfarray, image)
regs = list(f.regions())
assert len(regs) == 3
# order is given by implementation, but does not matter at all.
r1 = np.array([True, True, False, True, True, False, True, False, False])
r2 = np.array([False, False, False, False, False, True, False, False, False])
r3 = np.array([False, False, True, False, False, False, False, False, False])
# We don't know the order of regs, so check if any of the three matches
# and keep a list of which one matches which.
regfound = []
for r in regs:
for i, ri in enumerate([r1, r2, r3]):
if np.all(bool_indarray(9, r) == ri):
regfound.append(i)
break
assert set(regfound) == set([0, 1, 2])
f.min_number_of_bases = 2
assert len(list(f.regions())) == 1
def test_sector_regions():
im = np.arange(1200).reshape((30, 40))
psfs = np.ones((30, 40, 15))
for center in [(1, 7), None]:
for r, phi in zip([np.arange(55), np.array([0, 1, 5, 55])],
[5, np.linspace(0., 360., 5) * u.degree]):
class PSF(fitters.BasePSFFitter):
regions = regions.sectors
sector_radius = r
sector_phi = phi
sector_center = center
f = PSF(psfs, im)
regs = np.dstack(list(f.regions()))
regs = regs.reshape((1200, -1))
# Test that each pixel is part of one and only one region
assert np.all(regs.sum(axis=1) == 1)
# test a region that has a hole in the middle
class PSF2(PSF):
sector_radius = [5, 10, 50]
f = PSF2(psfs, im)
regs = np.dstack(list(f.regions()))
regs = regs.reshape((1200, -1))
# Test that each pixel is part of one and only one region
assert regs.sum() < 1200
def test_wrapper_min_size(example3_3):
class MinSizeFitter(fitters.BasePSFFitter):
regions = regions.wrapper_min_size(regions.group_by_basis)
psfarray, image = example3_3
f = MinSizeFitter(psfarray, image)
regs = list(f.regions())
assert len(regs) == 3
f.region_min_size = 5
regs = list(f.regions())
assert len(regs) == 1
f.region_min_size = 6
regs = list(f.regions())
assert len(regs) == 0
|
import copy
from typing import List
from typing import Tuple
from data_labeling.labeling_utils import xy_on_interpolated_image_to_raw_xy, xy_on_raw_image_to_xy_on_interpolated_image
class FrameAnnotation:
def __init__(self):
self.accepted = False # whether frame was marked as annotated successfully
self.discarded = False # whether frame was marked as discarded (ignored)
self.centre_points = [] # type: List[tuple] # x, y
self.rectangles = [] # type: List[Tuple[tuple, tuple]] # (x_left, y_top), (x_right, y_bottom)
self.raw_frame_data = None # Not an annotation, but write it to the result file, just in case
def as_dict(self):
data_dict = copy.copy(self.__dict__)
data_dict['centre_points'] = []
data_dict['rectangles'] = []
for i, point in enumerate(self.centre_points):
data_dict['centre_points'].append(xy_on_interpolated_image_to_raw_xy(point))
for i, rectangle in enumerate(self.rectangles):
data_dict['rectangles'].append((xy_on_interpolated_image_to_raw_xy(rectangle[0]),
xy_on_interpolated_image_to_raw_xy(rectangle[1])))
return data_dict
@classmethod
def from_dict(cls, data_dict, do_not_scale_and_reverse=False):
item = cls()
item.__dict__.update(data_dict)
for i, point in enumerate(item.centre_points):
item.centre_points[i] = xy_on_raw_image_to_xy_on_interpolated_image(point, do_not_scale_and_reverse)
for i, rectangle in enumerate(item.rectangles):
item.rectangles[i] = (xy_on_raw_image_to_xy_on_interpolated_image(rectangle[0], do_not_scale_and_reverse),
xy_on_raw_image_to_xy_on_interpolated_image(rectangle[1], do_not_scale_and_reverse))
return item
|
"""
Same as monge_array.py but avoiding copying odd and even indices rows into 2 new arrays.
So, instead of passing a list, we will be passing the indices of the initial array in subsequent
recursions using a a range object. This gets rid of additional memory overhead
"""
from monge_array import find_min
def odd_from_even(A, odd_indices, f_even):
f = []
n_odd = len(odd_indices)
for i, index in enumerate(odd_indices):
f.append(f_even[i])
row = A[index]
left = f_even[i]
if i == len(f_even) - 1 and len(f_even) == n_odd:
right = len(row) - 1
else:
right = f_even[i + 1]
minimum_pos = find_min(row, left, right)
f.append(minimum_pos)
if n_odd < len(f_even):
f.append(f_even[-1])
return f
def leftmost_minimum_index(A, indices, power):
if len(indices) == 1:
row0 = A[indices[0]]
return [find_min(row0, 0, len(row0) - 1)]
even_indices = range(0, len(A), 2 ** power)
odd_indices = range(2 ** (power - 1), len(A), 2 ** power)
f_even = leftmost_minimum_index(A, even_indices, power + 1)
indices = odd_from_even(A, odd_indices, f_even)
return indices
if __name__ == "__main__":
A = [
[10, 17, 13, 28, 23],
[17, 22, 16, 29, 23],
[24, 28, 22, 34, 24],
[11, 13, 6, 17, 7],
[45, 44, 32, 37, 23],
[36, 33, 19, 21, 6],
[75, 66, 51, 53, 34],
]
print(leftmost_minimum_index(A, range(len(A)), power=1))
|
import torch.multiprocessing as mp
import pt_rpc_client
import pt_rpc_server
import grpc_client
import grpc_server
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--master_addr", type=str, default="127.0.0.1")
parser.add_argument("--master_port", type=str, default="29501")
parser.add_argument("--role", type=str, default="client")
parser.add_argument("--comm", type=str, default="ptrpc")
args = parser.parse_args()
if args.role == "client":
if args.comm == "ptrpc":
pt_rpc_client.run(addr=args.master_addr, port=args.master_port)
elif args.comm == "grpc":
grpc_client.run(addr=args.master_addr, port=args.master_port)
else:
raise ValueError(f"Unexpected role {args.comm}")
elif args.role == "server":
if args.comm == "ptrpc":
pt_rpc_server.run(addr=args.master_addr, port=args.master_port)
elif args.comm == "grpc":
grpc_server.run(addr=args.master_addr, port=args.master_port)
else:
raise ValueError(f"Unexpected role {args.comm}")
else:
raise ValueError(f"Unexpected role {args.role}")
if __name__ == "__main__":
main()
|
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic exec utility that allows us to set the
execute and root_helper attributes for putils.
Some projects need their own execute wrapper
and root_helper settings, so this provides that hook.
"""
import threading
from oslo_concurrency import processutils as putils
from oslo_context import context as context_utils
from oslo_utils import encodeutils
from os_brick.privileged import rootwrap as priv_rootwrap
class Executor(object):
def __init__(self, root_helper, execute=None,
*args, **kwargs):
if execute is None:
execute = priv_rootwrap.execute
self.set_execute(execute)
self.set_root_helper(root_helper)
@staticmethod
def safe_decode(string):
return string and encodeutils.safe_decode(string, errors='ignore')
@classmethod
def make_putils_error_safe(cls, exc):
"""Converts ProcessExecutionError string attributes to unicode."""
for field in ('stderr', 'stdout', 'cmd', 'description'):
value = getattr(exc, field, None)
if value:
setattr(exc, field, cls.safe_decode(value))
def _execute(self, *args, **kwargs):
try:
result = self.__execute(*args, **kwargs)
if result:
result = (self.safe_decode(result[0]),
self.safe_decode(result[1]))
return result
except putils.ProcessExecutionError as e:
self.make_putils_error_safe(e)
raise
def set_execute(self, execute):
self.__execute = execute
def set_root_helper(self, helper):
self._root_helper = helper
class Thread(threading.Thread):
"""Thread class that inherits the parent's context.
This is useful when you are spawning a thread and want LOG entries to
display the right context information, such as the request.
"""
def __init__(self, *args, **kwargs):
# Store the caller's context as a private variable shared among threads
self.__context__ = context_utils.get_current()
super(Thread, self).__init__(*args, **kwargs)
def run(self):
# Store the context in the current thread's request store
if self.__context__:
self.__context__.update_store()
super(Thread, self).run()
|
# -*- coding: UTF-8 -*-
from flask import Blueprint, request, jsonify, current_app
from main import db
from main.models.stock import Overview
from crawler.eastmoney.stock.data import get_code_hxtc
bp = Blueprint('stock_overview', __name__)
def get_pagination(model, page, page_size):
pagination = None
data = {}
pagination = model.query.order_by(model.code).paginate(
page=page, per_page=page_size, error_out=False)
if pagination:
# 对象属性:https://flask-sqlalchemy.palletsprojects.com/en/2.x/api/#flask_sqlalchemy.Pagination
data['items'] = [{'code': i.code, 'name': i.name, 'plate': i.plate, 'business_scope': i.business_scope}
for i in pagination.items]
data['page'] = pagination.page
data['pages'] = pagination.pages
data['per_page'] = pagination.per_page
data['total'] = pagination.total
return data
@bp.route('/', methods=['GET'])
def get_overview_list():
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 15, type=int)
data = get_pagination(Overview, page, page_size)
return jsonify(data)
@bp.route('/<code>', methods=['GET'])
def get_overview(code):
overview = Overview.query.filter(Overview.code == code).all()
return jsonify(overview)
def upsert_overview(data):
code = data['code']
name = data.get('name')
plate = data.get('plate')
business_scope = data.get('business_scope')
current_app.logger.info(f'Upsert <{name}({code})> 的核心题材')
overview = Overview.query.filter_by(code=code).first()
if overview:
overview.name = name
overview.plate = plate
overview.business_scope = business_scope
else:
overview = Overview(code=code, name=name, plate=plate, business_scope=business_scope)
db.session.add(overview)
db.session.commit()
@bp.route('/<code>', methods=['POST', 'PUT'])
def post_or_put_overview(code):
data = get_code_hxtc(current_app.config['CHROME_DRIVER'], code)
if data:
upsert_overview(data)
return jsonify({'msg': 'Synchronization succeeded', 'status_code': 201}), 201
else:
return jsonify({'msg': 'Synchronization failed', 'status_code': 500}), 500
|
import contextlib
from typing import Any, List, Optional, Sequence
import click
from valohai_cli.api import request
from valohai_cli.commands.project.create import create_project
from valohai_cli.consts import yes_option
from valohai_cli.ctx import get_project, set_project_link
from valohai_cli.messages import warn
from valohai_cli.utils import get_project_directory
from valohai_cli.utils.cli_utils import prompt_from_list
class NewProjectInstead(Exception):
pass
def filter_projects(projects: Sequence[dict], spec: str) -> List[dict]:
spec = str(spec).lower()
return [
project
for project in projects
if project['id'].lower() == spec or project['name'].lower() == spec
]
def choose_project(dir: str, spec: Optional[str] = None) -> Optional[dict]:
"""
Choose a project, possibly interactively.
:param dir: Directory (only used for prompts)
:param spec: An optional search string
:return: project object or None
"""
projects: List[dict] = request('get', '/api/v0/projects/', params={'limit': '1000'}).json()['results']
if not projects:
if click.confirm('You don\'t have any projects. Create one instead?'):
raise NewProjectInstead()
return None
if spec:
projects = filter_projects(projects, spec)
if not projects:
warn(f'No projects match {spec}')
return None
if len(projects) == 1:
return projects[0]
def nonlist_validator(answer: str) -> Any:
if answer.startswith('c'):
raise NewProjectInstead()
prompt = 'Which project would you like to link with {dir}?\nEnter [c] to create a new project.'.format(
dir=click.style(dir, bold=True),
)
has_multiple_owners = (len({p.get('owner', {}).get('id') for p in projects}) > 1)
def project_name_formatter(project: dict) -> str:
name: str = project['name']
with contextlib.suppress(Exception):
if has_multiple_owners:
dim_owner = click.style(project['owner']['username'] + '/', dim=True)
return f'{dim_owner}{name}'
return name
projects.sort(key=lambda project: project_name_formatter(project).lower())
return prompt_from_list(projects, prompt, nonlist_validator, name_formatter=project_name_formatter)
@click.command()
@click.argument('project', default=None, required=False)
@yes_option
def link(project: Optional[str], yes: bool) -> Any:
"""
Link a directory with a Valohai project.
"""
dir = get_project_directory()
current_project = get_project(dir)
if current_project and not yes:
click.confirm(
text='{dir} is already linked to project {name}; continue?'.format(
dir=click.style(current_project.directory, bold=True),
name=click.style(current_project.name, bold=True),
),
abort=True,
)
try:
project_obj = choose_project(dir, spec=project)
if not project_obj:
return 1
set_project_link(dir, project_obj, inform=True)
except NewProjectInstead:
name = click.prompt('Name the new project')
if name:
create_project(dir, name, yes=yes)
|
import models
import tensorflow as tf
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# todo:学習結果を出力できるよう改造
def gen_image(model, noise, filename="test"):
image = model(noise).numpy()
for i in range(image.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(image[i, :, :])
plt.axis("off")
plt.savefig(filename)
if __name__ == "__main__":
g = models.Generator()
g.build((None, 100))
g.load_weights("g_w.h5")
z = tf.random.normal((16, 100))
y = g(z).numpy()
fig = plt.figure(figsize=(4, 4))
for i in range(y.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(y[i, :, :], cmap="gray")
plt.axis("off")
plt.savefig("results\\" + str(i))
plt.show()
|
import unittest
from talipp.indicators import ATR
from TalippTest import TalippTest
class TestATR(TalippTest):
def setUp(self) -> None:
self.input_values = list(TalippTest.OHLCV_TMPL)
def test_init(self):
ind = ATR(5, self.input_values)
print(ind)
self.assertAlmostEqual(ind[-3], 0.676426, places = 5)
self.assertAlmostEqual(ind[-2], 0.665141, places = 5)
self.assertAlmostEqual(ind[-1], 0.686113, places = 5)
def test_update(self):
self.assertIndicatorUpdate(ATR(5, self.input_values))
def test_delete(self):
self.assertIndicatorDelete(ATR(5, self.input_values))
def test_purge_oldest(self):
self.assertIndicatorPurgeOldest(ATR(5, self.input_values))
if __name__ == '__main__':
unittest.main()
|
from typing import Any
from django.db import models
from django.db.models.base import ModelBase
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.core.validators import MinValueValidator
from django.dispatch import receiver
class DirectReward(models.Model):
name = models.CharField(max_length=30)
description = models.TextField(blank=True, null=True)
user = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='direct_rewards'
)
def __str__(self) -> str:
return f'{self.id} - {self.name} for user "{self.user}"'
class PointReward(models.Model):
name = models.CharField(max_length=30)
description = models.TextField(blank=True, null=True)
points = models.IntegerField(validators=[MinValueValidator(1)])
user = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='point_rewards'
)
def __str__(self) -> str:
return f'{self.id} - {self.name} ({self.points} pts)' \
f' for user "{self.user}"'
class Goal(models.Model):
name = models.CharField(max_length=30)
description = models.TextField(blank=True, null=True)
finish_date = models.DateField(blank=True, null=True)
reward_points = models.IntegerField(
blank=True, null=True, validators=[MinValueValidator(1)]
)
direct_reward = models.OneToOneField(
DirectReward, blank=True, null=True, on_delete=models.CASCADE,
related_name='goal'
)
user = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='goals'
)
def __str__(self) -> str:
return f'{self.id} - {self.name} for user "{self.user}"'
class UserPoint(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
points = models.IntegerField(default=0, validators=[MinValueValidator(0)])
def __str__(self) -> str:
return f'User "{self.user.username}" has {self.points} points'
@receiver(post_save, sender=User)
def create_user_point(sender: ModelBase, instance: User,
created: bool, **kwargs: Any) -> None:
if created:
UserPoint.objects.create(user=instance)
|
for _ in range(int(input())):
count = 0
n = input()
for i in n:
if i == '4':
count += 1
print(count)
|
from ..engine import Box2DEngine
class Box:
""" Represents a box shape for a fixture """
def __init__(self, width=0, height=0):
""" Initialize the Box """
self.width = width
self.height = height
def apply(self, body, fixtureDef):
""" Apply the shape and fixture definition to the body """
return Box2DEngine.Body_AddBoxFixture(body.id, fixtureDef, self.width, self.height)
|
#q4.Write a program to check whether the input alphabet is vowel or not using if-else.
inp=input("enter a alphabet: ")
if (inp=='a'or inp=='e'or inp=='i'or inp=='o' or inp=='u'):
print('alphabet is vowels')
else:
print("alphabet is consonant")
|
from django.db import models
import datetime
FRIDAY = 4
class Todo(models.Model):
done = models.BooleanField(default=False)
description = models.TextField()
def markCompleted(self):
if datetime.datetime.now().weekday() != FRIDAY:
self.done = True
|
import os
from datamanager.project import Project
from datamanager.filemanager import FileManager
from properties import dataFolderPath, always_write_to_disk
class DBManager(FileManager):
"""
Class that implements a DB manager. To use this class, you must first call the method
initialize_write_to_disk, then optionally call any other method for writing data to
disk, and finally call the method finalize_write_to_disk.
"""
def __init__(self):
"""
Initializes this DB manager.
"""
self.create_folder_if_it_does_not_exist(dataFolderPath)
def initialize_write_to_disk(self, project_name):
"""
Initializes the writing of a project to disk. Creates all the necessary directories.
:param project_name: the name of the project to be written to disk.
"""
rootfolder = os.path.join(dataFolderPath, project_name)
self.create_folder_if_it_does_not_exist(rootfolder)
self.create_folder_if_it_does_not_exist(os.path.join(rootfolder, "issues"))
self.create_folder_if_it_does_not_exist(os.path.join(rootfolder, "users"))
self.create_folder_if_it_does_not_exist(os.path.join(rootfolder, "events"))
self.create_folder_if_it_does_not_exist(os.path.join(rootfolder, "comments"))
def read_project_from_disk(self, project_name):
"""
Reads a project from disk given the name of the project that is also the folder
of the project.
:param project_name: the name of the project to be read from disk.
:returns: an object of type Project.
"""
project = Project()
rootfolder = os.path.join(dataFolderPath, project_name)
project["info"] = self.read_json_from_file_if_it_exists(os.path.join(rootfolder, "info.json"))
project["issues"] = self.read_jsons_from_folder(os.path.join(rootfolder, "issues"), "id")
project["users"] = self.read_jsons_from_folder(os.path.join(rootfolder, "users"), "id")
project["events"] = self.read_jsons_from_folder(os.path.join(rootfolder, "events"), "id")
project["comments"] = self.read_jsons_from_folder(os.path.join(rootfolder, "comments"), "id")
return project
def project_exists(self, project_name):
"""
Check if a project exists in the disk given the name of the project that is also the folder
of the project. The existence of the project is determined by whether it has an info.json file.
:param project_name: the name of the project to be read from disk.
:returns: True if the project exists, or False otherwise.
"""
return os.path.exists(os.path.join(dataFolderPath, project_name, "info.json"))
def finalize_write_to_disk(self, project_name, project, crawldatetime, lastcrawlcomplete):
"""
Finalizes the writing of a project to disk. Closes any open buffers.
:param project_name: the name of the project to be written to disk.
:param project: the project data to be written to disk.
:param crawldatetime: the time that this crawl started.
:param lastcrawlcomplete: the status of the last crawl, either True for complete of False otherwise.
"""
if not always_write_to_disk:
rootfolder = os.path.join(dataFolderPath, project_name)
self.write_json_to_file(os.path.join(rootfolder, "info.json"), project["info"])
for issue in project["issues"].values():
self.write_json_to_file(os.path.join(rootfolder, "issues", str(issue["id"]) + ".json"), issue)
for user in project["users"].values():
self.write_json_to_file(os.path.join(rootfolder, "users", str(user["key"]) + ".json"), user)
for event in project["events"].values():
self.write_json_to_file(os.path.join(rootfolder, "events", str(event["id"]) + ".json"), event)
for comment in project["comments"].values():
self.write_json_to_file(os.path.join(rootfolder, "comments", str(comment["id"]) + ".json"), comment)
project["info"]["lastcrawlcomplete"] = lastcrawlcomplete
project["info"]["lastcrawled"] = crawldatetime
rootfolder = os.path.join(dataFolderPath, project_name)
self.write_json_to_file(os.path.join(rootfolder, "info.json"), project["info"])
def write_project_info_to_disk(self, project_name, info):
"""
Writes the info of a project to disk.
:param project_name: the name of the project.
:param info: the info to be written to disk.
"""
if always_write_to_disk:
rootfolder = os.path.join(dataFolderPath, project_name)
self.write_json_to_file(os.path.join(rootfolder, "info.json"), info)
def write_project_issue_to_disk(self, project_name, issue):
"""
Writes an issue of a project to disk.
:param project_name: the name of the project.
:param issue: the issue to be written to disk.
"""
if always_write_to_disk:
rootfolder = os.path.join(dataFolderPath, project_name)
self.write_json_to_file(os.path.join(rootfolder, "issues", str(issue["id"]) + ".json"), issue)
def write_project_user_to_disk(self, project_name, user):
"""
Writes a user of a project to disk.
:param project_name: the name of the project.
:param user: the user to be written to disk.
"""
if always_write_to_disk:
rootfolder = os.path.join(dataFolderPath, project_name)
self.write_json_to_file(os.path.join(rootfolder, "users", str(user["key"]) + ".json"), user)
def write_project_event_to_disk(self, project_name, event):
"""
Writes an event of a project to disk.
:param project_name: the name of the project.
:param event: the event to be written to disk.
"""
if always_write_to_disk:
rootfolder = os.path.join(dataFolderPath, project_name)
self.write_json_to_file(os.path.join(rootfolder, "events", str(event["id"]) + ".json"), event)
def write_project_comment_to_disk(self, project_name, comment):
"""
Writes a comment of a project to disk.
:param project_name: the name of the project.
:param comment: the comment to be written to disk.
"""
if always_write_to_disk:
rootfolder = os.path.join(dataFolderPath, project_name)
self.write_json_to_file(os.path.join(rootfolder, "comments", str(comment["id"]) + ".json"), comment)
def write_project_worklog_to_disk(self, project_name, worklog):
"""
Writes a worklog of a project to disk.
:param project_name: the name of the project.
:param worklog: the worklog to be written to disk.
"""
if always_write_to_disk:
rootfolder = os.path.join(dataFolderPath, project_name)
self.write_json_to_file(os.path.join(rootfolder, "worklogs", str(worklog["id"]) + ".json"), worklog)
|
from flask_restful import Resource, fields, marshal_with, reqparse, request, marshal
from flask_socketio import emit
from core.models.ticket_item import TicketItemModel
from core.models.ticket import TicketModel
from core.views.table import TableDetailById
from core.views.menu import MenuItemById
from core.models.menu import Menu
from core.views.ticket import ticket_resource_fields, TicketById
from core.views.menu import menu_resource_fields
from core import db, socketio
ticket_item_resource_fields = {
'order_item_id' : fields.Integer,
'ticket_id': fields.Integer,
'menu_id': fields.Integer,
'ingredients_added': fields.String,
'ingredients_removed': fields.String,
'remark': fields.String,
'item_status': fields.String,
'quantity': fields.Integer
}
parser = reqparse.RequestParser()
parser.add_argument('ticket_id')
parser.add_argument('menu_id')
parser.add_argument('ingredients_added')
parser.add_argument('ingredients_removed')
parser.add_argument('remark')
parser.add_argument('item_status')
parser.add_argument('quantity')
parser.add_argument('order_item_ids')
class TicketItemByTicket(Resource):
@marshal_with(ticket_item_resource_fields)
def get(self, ticket_id):
"""get ticket items for a ticket id"""
ticket_item = TicketItemModel.query.filter(TicketItemModel.ticket_id == ticket_id).all()
return ticket_item, 200
class TicketItem(Resource):
@marshal_with(ticket_item_resource_fields)
def post(self):
args = parser.parse_args()
new_item = TicketItemModel(ticket_id=args.get('ticket_id'), menu_id=args.get('menu_id'),
ingredients_added=args.get('ingredients_added'),ingredients_removed=args.get('ingredients_removed'),
remark=args.get('remark'),quantity=args.get('quantity'))
db.session.add(new_item)
db.session.commit()
return new_item, 200
class UpdateTicketItems(Resource):
#@marshal_with(ticket_item_resource_fields)
def patch(self):
print(request.json)
if 'item_status' in request.json:
if 'order_item_ids' in request.json:
for order_item_id in request.json['order_item_ids']:
print(order_item_id)
ticket_item = TicketItemModel.query.get_or_404(order_item_id)
ticket_item.item_status = request.json['item_status']
db.session.commit()
all_ticket_items = TicketItemByTicket().get(ticket_item.ticket_id)[0]
if all(item['item_status'] == 'Complete' for item in all_ticket_items):
ticket = TicketModel.query.get_or_404(ticket_item.ticket_id)
ticket.ticket_status = 'Complete'
db.session.commit()
# socketio.emit('ticketsUpdated', broadcast=True)
return {}, 200
"""Gets tickets for a given table session, including menu database entry"""
class TicketItemsBySession(Resource):
#@marshal_with({**ticket_resource_fields,**ticket_item_resource_fields})
def get(self, session_id):
"""get all ticket details for a given table session"""
tickets = TicketModel.query.filter(TicketModel.session_id == session_id).all()
ticket_json = []
for t in tickets:
ticket_items = TicketItemModel.query.filter(TicketItemModel.ticket_id == t.ticket_id).all()
ticket_total = []
for ticket_item in ticket_items:
menu_entry = MenuItemById().get_no_marshal(ticket_item.menu_id)[0]
menu_marsh = marshal(menu_entry, menu_resource_fields)
ticket_item_marsh = marshal(ticket_item, ticket_item_resource_fields)
ticket_marsh = marshal(t, ticket_resource_fields)
ticket_total.append({**ticket_marsh,**ticket_item_marsh, **menu_marsh})
ticket_json.append(ticket_total)
return ticket_json, 200
class ActiveTicketMenuItems(Resource):
def get(self):
"""get all ticket details"""
tickets = TicketModel.query.filter(TicketModel.ticket_status == 'Active').all()
ticket_json = []
for t in tickets:
ticket_items = TicketItemModel.query.filter(TicketItemModel.ticket_id == t.ticket_id).all()
ticket_total = []
for ticket_item in ticket_items:
menu_entry = MenuItemById().get_no_marshal(ticket_item.menu_id)[0]
menu_marsh = marshal(menu_entry, menu_resource_fields)
ticket_item_marsh = marshal(ticket_item, ticket_item_resource_fields)
ticket_marsh = marshal(t, ticket_resource_fields)
ticket_total.append({**ticket_marsh,**ticket_item_marsh, **menu_marsh})
ticket_json.append(ticket_total)
return ticket_json, 200
class TicketPriceTotal(Resource):
def get(self, session_id):
ticket_list = TicketItemsBySession().get(session_id)
total_price = 0
for ticket in ticket_list[0]:
total_price += sum(item['price'] for item in ticket)
return [total_price], 200
|
# -*- coding: utf-8 -*-
__author__ = "Paul Schifferer <dm@sweetrpg.com>"
"""
"""
from sweetrpg_api_core.data import APIData
from unittest.mock import patch, Mock
from flask_rest_jsonapi.querystring import QueryStringManager
from bson.objectid import ObjectId
import datetime
import json
class TestModel(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def to_dict(self):
return {}
class TestDocument(object):
_id = ObjectId("1234567890abcdef12345678")
created = datetime.datetime.now()
items = ["1", "2"]
ref = ObjectId("234567890abcdef123456789")
refs = [ObjectId("34567890abcdef1234567890")]
def to_json(self):
this = {
'id': str(self._id),
'created': str(self.created),
'items': self.items,
'ref': str(self.ref),
'refs': list(map(str, self.refs)),
}
return json.dumps(this)
model_info = {
"test": {
"model": TestModel,
"document": TestDocument,
"type": "test",
"collection": "tests",
"properties": {},
},
}
@patch('sweetrpg_db.mongodb.repo.MongoDataRepository.create')
def test_create(repo_create):
repo_create.return_value = TestDocument()
api = APIData({'type': 'test', 'db': None, 'model_info': model_info})
model = api.create_object(TestModel(name="new"), {})
assert isinstance(model, TestModel)
@patch('sweetrpg_db.mongodb.repo.MongoDataRepository.get')
def test_get_object(repo_get):
repo_get.return_value = TestDocument()
api = APIData({'type': 'test', 'db': None, 'model_info': model_info})
obj = api.get_object({'id': "1"})
print(obj)
assert isinstance(obj, TestModel)
assert obj.id == "1234567890abcdef12345678"
# assert obj.created == None
assert isinstance(obj.items, list)
assert len(obj.items) == 2
assert obj.items[0] == "1"
assert obj.items[1] == "2"
assert obj.ref == "234567890abcdef123456789"
assert isinstance(obj.refs, list)
assert len(obj.refs) == 1
assert obj.refs[0] == "34567890abcdef1234567890"
# TODO
@patch('sweetrpg_db.mongodb.repo.MongoDataRepository.query')
def test_get_collection(repo_query):
repo_query.return_value = [TestDocument(), TestDocument()]
api = APIData({'type': 'test', 'db': None, 'model_info': model_info})
# TODO: need application context for QSM
# count, objs = api.get_collection(QueryStringManager({'x':1}, None), {})
#
# assert count == 2
# assert isinstance(objs[0], TestModel)
@patch('sweetrpg_db.mongodb.repo.MongoDataRepository.update')
def test_update_object(repo_update):
repo_update.return_value = TestDocument()
api = APIData({'type': 'test', 'db': None, 'model_info': model_info})
is_updated = api.update_object(TestModel(), {"name": "new"}, {"id": "1"})
assert is_updated == True
@patch('sweetrpg_db.mongodb.repo.MongoDataRepository.delete')
def test_delete_object(repo_delete):
repo_delete.return_value = True
api = APIData({'type': 'test', 'db': None, 'model_info': model_info})
is_deleted = api.delete_object(TestModel(), {"id": "1"})
assert is_deleted == True
|
#!/usr/bin/env python3
import os
from subprocess import call
import argparse
import multiprocessing
def run_unit_tests(octopus_build_dir, use_verbose_output):
octopus_test_dir = octopus_build_dir + "/test"
os.chdir(octopus_test_dir)
ctest_options = []
if use_verbose_output:
ctest_options.append("--verbose")
call(["ctest"] + ctest_options)
parser = argparse.ArgumentParser()
parser.add_argument('--type',
help='C++ compiler path',
default="unit")
parser.add_argument('--verbose',
help='Output verbose test information',
action='store_true')
parser.add_argument('--compiler',
help='C++ compiler path')
parser.add_argument('--threads',
help='The number of threads to use for building',
type=int)
args = vars(parser.parse_args())
if args["type"] not in ["unit", "valgrind", "regression"]:
print("Unknown test type " + type)
exit()
# This file is in octopus-dir/test
octopus_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
root_cmake = octopus_dir + "/CMakeLists.txt"
if not os.path.exists(root_cmake):
print("octopus source directory corrupted: root CMakeLists.txt is missing. Please re-download source code.")
exit()
octopus_build_dir = octopus_dir + "/build"
if not os.path.exists(octopus_build_dir):
print("octopus source directory corrupted: build directory is missing. Please re-download source code.")
exit()
os.chdir(octopus_build_dir) # so cmake doesn't pollute root directory
cmake_options = []
if args["type"] == "unit":
cmake_options.extend(["-DBUILD_TESTING=ON", octopus_dir])
elif args["type"] == "valgrind":
cmake_options.append("-DCMAKE_BUILD_TYPE=Debug")
if args["compiler"]:
cmake_options.append("-DCMAKE_CXX_COMPILER=" + args["compiler"])
ret = call(["cmake"] + cmake_options + [".."])
make_options = []
if args["threads"]:
if (args["threads"] > 1):
make_options.append("-j" + str(args["threads"]))
else:
make_options.append("-j" + str(multiprocessing.cpu_count()))
if ret == 0:
ret = call(["make"] + make_options)
if ret == 0:
if args["type"] == "unit":
run_unit_tests(octopus_build_dir, args["verbose"])
elif args["type"] == "valgrind":
call(["make", "install"])
|
from lib.utils.evaluation_metrics.AverageSumErrorEvaluationMetric import AverageSumErrorEvaluationMetric
from lib.utils.evaluation_metrics.SumErrorEvaluationMetric import SumErrorEvaluationMetric
from lib.utils.evaluation_metrics.ClassificationEvaluationMetric import ClassificationEvaluationMetric
from lib.NeuralNetwork import NeuralNetwork
class EvaluationMetricFactory(object):
def __init__(self, nn: NeuralNetwork):
self._nn = nn
def get_evaluation_metric(self, evaluation_metric: str):
if evaluation_metric == "classification_report":
return ClassificationEvaluationMetric(self._nn)
elif evaluation_metric == "average_sum_error_report":
return AverageSumErrorEvaluationMetric(self._nn)
elif evaluation_metric == "sum_error_report":
return SumErrorEvaluationMetric(self._nn)
else:
raise Exception("Invalid report type.")
|
from BlochSolver.SolversManager import solvers_manager
from BlochSolver.Plotter import bloch_plotter as bs
from BlochSolver.Perturbations.filters import Filters
from BlochSolver.QuantumSolvers.rotations import rotation_handler
from BlochSolver.QuantumSolvers.numerics import numerical_methods
from BlochSolver.Utils.utils import Utils
import numpy as np
# INFO: Perturbation algorithms are using filter-refilter stuff to avoid problems with raising edge
# Here you can see example of code, solver options to try :
# 1. default or None - basic GRAPE algorithm
# 2. unitary - GRAPE for pulses without raising time, unitary evolution
# 3. perturbation grape - GRAPE for pulses with raising time, non-unitary evolution
# 4. perturbation unitary - GRAPE for pulses with no raising time, unitary evolution
def main():
bloch_plotter = bs.BlochPlotter()
quantum_solvers = solvers_manager.SolversManager()
angles = [np.pi / 2]
axes = ["x"]
initial_state =np.array([1,0])
granulation = 8
cut_off_time = 0.4e-9
initial_pulses = np.ones(32)* 0.002 #np.random.uniform(0.001 ,0.006, 32)#
ideal_state, pulses = quantum_solvers.get_solver(solver_type="GRAPE",
algorithm_type="perturbation unitary",
penalty=True,
results_path="./",
initial_pulses=initial_pulses,
angles=angles,
axes=axes,
initial_state=initial_state,
cut_off_time=cut_off_time,
granulation=granulation)
bloch_plotter.plot(plot_type="pulses", pulses_final=pulses, pulses_init=initial_pulses)
bloch_plotter.plot(plot_type="evolution", pulses_final=pulses, init_state=initial_state, target_state=ideal_state)
# Here before plotting you have to put filtered signals because plotting function with granulation option will calculate the effective pulses
# bloch_plotter.plot(plot_type="evolution", pulses_final=pulses, init_state=ideal_state, granulation=granulation, target_state=ideal_state)
del quantum_solvers
return
if __name__ == '__main__':
main()
|
from datetime import date
print('Maioridade!!!')
print('=-=' * 15)
maior = menor = 0
data = date.today().year
for c in range(1, 8):
n = int(input(f'Ano de nascimento da {c} pessoa: '))
if data - n >= 18:
maior += 1
else:
menor += 1
print('=-=' * 15)
print(f'{maior} Pessoas cadastradas são maiores.')
print(f'{menor} Pessoas cadastradas são menores.')
|
__author__ = "Mohammad Dabiri"
__copyright__ = "Free to use, copy and modify"
__credits__ = ["Mohammad Dabiri"]
__license__ = "MIT Licence"
__version__ = "0.0.1"
__maintainer__ = "Mohammad Dabiri"
__email__ = "moddabiri@yahoo.com"
def check_none(**kwargs):
for [argName, value] in kwargs.items():
if (value is None):
raise TypeError("Argument was None: Argument: " + str(argName))
|
from itertools import chain
def foo_yield(x):
i = 1
while i <= x:
yield range(0, i)
i += 1
'''
if you call foo_yield(4)
you will get a generator object
then you can call
for i in chain.from_iterable(foo_yield(4)):
print(i)
and you will get evaluated result below:
0
0
1
0
1
2
0
1
2
3
'''
'''
or you can call without chain() method
for i in foo_yield_from(4):
print(i)
istead, which implement yield from expression for generator delegation.
since Python 3.3
'''
def foo_yield_from(x):
i = 1
while i <= x:
yield from range(0, i)
i += 1
|
import torch
from torch import nn
import torch.nn.functional as F
from collections import OrderedDict
import torch.utils.model_zoo as model_zoo
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
model_urls = {
'mnist': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/mnist-b07bb66b.pth'
}
class MLP(nn.Module):
def __init__(self, input_dims, n_hiddens, n_class):
super(MLP, self).__init__()
assert isinstance(input_dims, int), 'Please provide int for input_dims'
self.input_dims = input_dims
current_dims = input_dims
layers = OrderedDict()
if isinstance(n_hiddens, int):
n_hiddens = [n_hiddens]
else:
n_hiddens = list(n_hiddens)
for i, n_hidden in enumerate(n_hiddens):
layers['fc{}'.format(i+1)] = nn.Linear(current_dims, n_hidden)
layers['relu{}'.format(i+1)] = nn.ReLU()
layers['drop{}'.format(i+1)] = nn.Dropout(0.2)
current_dims = n_hidden
layers['out'] = nn.Linear(current_dims, n_class)
self.model= nn.Sequential(layers)
print(self.model)
def forward(self, input):
input = input.view(input.size(0), -1)
assert input.size(1) == self.input_dims
return self.model.forward(input)
def load_pretrained_mlp(input_dims=784, n_hiddens=[256, 256], n_class=10, pretrained=None):
model = MLP(input_dims, n_hiddens, n_class)
if pretrained is not None:
m = model_zoo.load_url(model_urls['mnist'], map_location=torch.device('cpu'))
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
|
from django.core.management.commands.compilemessages import Command as CompileMessagesCommand
class Command(CompileMessagesCommand):
program_options = [option for option in CompileMessagesCommand.program_options if option != '--check-format']
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
'--check-format', action='store_true', help='Enable message format checking',
)
def handle(self, **options):
if options['check_format']:
self.program_options = self.program_options + ['--check-format']
return super().handle(**options)
|
"""
Module for helper functions regarding images.
"""
import cv2
import numpy as np
def get_image(file_path):
"""
Reads image and returns array of pixels.
"""
return cv2.imread(file_path)
def get_grayscale_image(image):
"""
Reads image and returns grayscale array of pixels.
"""
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def resize_image(image, width, height):
"""
Resizes image to specified width and height.
"""
return cv2.resize(image, (width, height), interpolation=cv2.INTER_AREA)
def get_rgb_from_grayscale(image):
"""
Converts grayscale image back to RGB.
"""
return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
def get_image_size(image):
"""
Returns width and height of image
"""
width = int(image.shape[1])
height = int(image.shape[0])
return width, height
def show_image(image, window_title="Title"):
"""
Displayes image on window based on passed title.
"""
cv2.imshow(window_title, image)
def await_key():
"""
Waits for key input.
"""
cv2.waitKey()
def quit():
"""
Destroys all opened windows.
"""
cv2.destroyAllWindows()
def save_image(image_path, image):
"""
Saved image to passed path.
"""
cv2.imwrite(image_path, image)
def camera_input():
"""
Example of how to get computers camera frames.
"""
video = cv2.VideoCapture(0)
while True:
ret, frame = video.read()
if ret:
show_image(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
# save_image(RELATIVE_PATH + "jaz.png", frame)
break
video.release()
quit()
|
# Python Substrate Interface Library
#
# Copyright 2018-2020 Stichting Polkascan (Polkascan Foundation).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from unittest.mock import MagicMock
from scalecodec.type_registry import load_type_registry_file
from substrateinterface.exceptions import SubstrateRequestException
from scalecodec.base import ScaleBytes
from substrateinterface import SubstrateInterface
class TestHelperFunctions(unittest.TestCase):
test_metadata_version = 'V13'
@classmethod
def setUpClass(cls):
cls.substrate = SubstrateInterface(url='dummy', ss58_format=42, type_registry_preset='kusama')
cls.metadata_fixture_dict = load_type_registry_file(
os.path.join(os.path.dirname(__file__), 'fixtures', 'metadata_hex.json')
)
metadata_decoder = cls.substrate.runtime_config.create_scale_object('MetadataVersioned')
metadata_decoder.decode(ScaleBytes(cls.metadata_fixture_dict[cls.test_metadata_version]))
cls.substrate.get_block_metadata = MagicMock(return_value=metadata_decoder)
def mocked_request(method, params):
if method == 'chain_getRuntimeVersion':
return {
"jsonrpc": "2.0",
"result": {"specVersion": 2023},
"id": 1
}
elif method == 'chain_getHeader':
return {
"jsonrpc": "2.0",
"result": {
"digest": {
"logs": [
]
},
"extrinsicsRoot": "0xa94148d938c7b7976abf4272dca95724d7a74da2f3649ec0bd53dc3daaedda44",
"number": "0x4abaaa",
"parentHash": "0xe1781813275653a970b4260298b3858b36d38e072256dad674f7c786a0cae236",
"stateRoot": "0xb6aa468385c82d15b343a676b3488d9f141ac100fc548bb8a546f27a7241c44a"
},
"id": 1
}
elif method == 'chain_getHead':
return {
"jsonrpc": "2.0",
"result": "0xe1781813275653a970b4260298b3858b36d38e072256dad674f7c786a0cae236",
"id": 1
}
raise NotImplementedError(method)
cls.substrate.rpc_request = MagicMock(side_effect=mocked_request)
cls.empty_substrate = SubstrateInterface(url='dummy', ss58_format=42, type_registry_preset='kusama')
def mocked_request(method, params):
return {'jsonrpc': '2.0', 'result': None, 'id': 1}
cls.empty_substrate.rpc_request = MagicMock(side_effect=mocked_request)
cls.error_substrate = SubstrateInterface(url='wss://kusama-rpc.polkadot.io', ss58_format=2, type_registry_preset='kusama')
# def mocked_request(method, params):
# return {'jsonrpc': '2.0', 'error': {
# 'code': -32602, 'message': 'Generic error message'
# }, 'id': 1}
#
# cls.error_substrate.rpc_request = MagicMock(side_effect=mocked_request)
def test_decode_scale(self):
self.assertEqual(self.substrate.decode_scale('Compact<u32>', '0x08'), 2)
def test_encode_scale(self):
self.assertEqual(self.substrate.encode_scale('Compact<u32>', 3), ScaleBytes('0x0c'))
def test_create_scale_object(self):
scale_obj = self.substrate.create_scale_object("Bytes")
self.assertEqual(scale_obj.encode("Test"), ScaleBytes("0x1054657374"))
self.assertEqual(scale_obj.decode(ScaleBytes("0x1054657374")), "Test")
def test_get_type_definition(self):
self.assertDictEqual(self.substrate.get_type_definition('Bytes'), {
'decoder_class': 'Bytes',
'is_primitive_core': False,
'is_primitive_runtime': True,
'spec_version': 2023,
'type_string': 'Bytes'}
)
def test_get_metadata_modules(self):
for module in self.substrate.get_metadata_modules():
self.assertIn('module_id', module)
self.assertIn('name', module)
self.assertEqual(module['spec_version'], 2023)
def test_get_metadata_call_function(self):
call_function = self.substrate.get_metadata_call_function("Balances", "transfer")
self.assertEqual("transfer", call_function.name)
self.assertEqual('dest', call_function.args[0].name)
self.assertEqual('value', call_function.args[1].name)
def test_get_metadata_call_functions(self):
call_functions = self.substrate.get_metadata_call_functions()
self.assertGreater(len(call_functions), 0)
def test_get_metadata_event(self):
event = self.substrate.get_metadata_event("Balances", "Transfer")
self.assertEqual("Transfer", event.name)
self.assertEqual('AccountId', event.args[0].type)
self.assertEqual('AccountId', event.args[1].type)
self.assertEqual('Balance', event.args[2].type)
def test_get_metadata_constant(self):
constant = self.substrate.get_metadata_constant("System", "BlockHashCount")
self.assertEqual("BlockHashCount", constant.name)
self.assertEqual("BlockNumber", constant.type)
self.assertEqual("0x60090000", f"0x{constant.constant_value.hex()}")
def test_get_metadata_constants(self):
constants = self.substrate.get_metadata_constants()
self.assertGreater(len(constants), 0)
def test_get_constant(self):
constant = self.substrate.get_constant("System", "BlockHashCount")
self.assertEqual(2400, constant.value)
constant = self.substrate.get_constant("Balances", "ExistentialDeposit")
self.assertEqual(100000000000000, constant.value)
# Also test cache method doesn't mix up results
constant = self.substrate.get_constant("System", "BlockHashCount")
self.assertEqual(2400, constant.value)
def test_get_metadata_storage_function(self):
storage = self.substrate.get_metadata_storage_function("System", "Account")
self.assertEqual("Account", storage.name)
self.assertEqual("AccountId", storage.get_params_type_string()[0])
self.assertEqual("Blake2_128Concat", storage.type['Map']['hasher'])
def test_get_metadata_storage_functions(self):
storages = self.substrate.get_metadata_storage_functions()
self.assertGreater(len(storages), 0)
def test_get_metadata_error(self):
error = self.substrate.get_metadata_error("System", "InvalidSpecName")
self.assertEqual("InvalidSpecName", error.name)
self.assertIsNotNone(error.docs)
def test_get_metadata_errors(self):
errors = self.substrate.get_metadata_errors()
self.assertGreater(len(errors), 0)
def test_helper_functions_should_return_null_not_exists(self):
self.assertIsNone(self.empty_substrate.get_block_number(
block_hash="0x6666666666666666666666666666666666666666666666666666666666666666"
))
self.assertIsNone(self.empty_substrate.get_block_hash(block_id=99999999999999999))
self.assertIsNone(self.empty_substrate.get_block_header(block_hash='0x'))
self.assertIsNone(self.empty_substrate.get_block_metadata(block_hash='0x')['result'])
self.assertIsNone(self.empty_substrate.get_block_runtime_version(block_hash='0x'))
def test_helper_functions_invalid_input(self):
self.assertRaises(SubstrateRequestException, self.error_substrate.get_block_number, "0x6666666666666666")
self.assertRaises(SubstrateRequestException, self.error_substrate.get_block_hash, -1)
self.assertRaises(SubstrateRequestException, self.error_substrate.get_block_header, '0x')
self.assertRaises(SubstrateRequestException, self.error_substrate.get_block_metadata, '0x')
self.assertRaises(SubstrateRequestException, self.error_substrate.get_block_runtime_version, '0x')
self.assertRaises(ValueError, self.error_substrate.query, 'System', 'Account', ['0x'])
self.assertRaises(SubstrateRequestException, self.error_substrate.get_runtime_metadata, '0x')
def test_storage_function_param_info(self):
storage_function = self.substrate.get_metadata_storage_function("System", "Account")
with self.assertRaises(NotImplementedError):
storage_function.get_param_info()
class TestHelperFunctionsV14(TestHelperFunctions):
test_metadata_version = 'V14'
def test_get_metadata_constant(self):
constant = self.substrate.get_metadata_constant("System", "BlockHashCount")
self.assertEqual("BlockHashCount", constant.name)
self.assertEqual("scale_info::4", constant.type)
self.assertEqual("0x60090000", f"0x{constant.constant_value.hex()}")
def test_get_metadata_storage_function(self):
storage = self.substrate.get_metadata_storage_function("System", "Account")
self.assertEqual("Account", storage.name)
self.assertEqual("scale_info::0", storage.get_params_type_string()[0])
self.assertEqual("Blake2_128Concat", storage.type['Map']['hashers'][0])
def test_get_metadata_event(self):
event = self.substrate.get_metadata_event("Balances", "Transfer")
self.assertEqual("Transfer", event.name)
self.assertEqual('scale_info::0', event.args[0].type)
self.assertEqual('scale_info::0', event.args[1].type)
self.assertEqual('scale_info::6', event.args[2].type)
def test_storage_function_param_info(self):
storage_function = self.substrate.get_metadata_storage_function("System", "Account")
info = storage_function.get_param_info()
self.assertEqual(1, len(info))
class TestHelperFunctionsKarura(TestHelperFunctionsV14):
test_metadata_version = 'karura_test'
def test_storage_function_param_info(self):
storage_function = self.substrate.get_metadata_storage_function("Tokens", "TotalIssuance")
info = storage_function.get_param_info()
self.assertEqual(1, len(info))
self.assertEqual('Token', info[0]['variant']['variants'][0]['name'])
self.assertEqual('ACA', info[0]['variant']['variants'][0]['value']['variant']['variants'][0]['name'])
storage_function = self.substrate.get_metadata_storage_function("Rewards", "PoolInfos")
info = storage_function.get_param_info()
self.assertEqual(1, len(info))
self.assertEqual('Loans', info[0]['variant']['variants'][0]['name'])
self.assertEqual('Token', info[0]['variant']['variants'][0]['value']['variant']['variants'][0]['name'])
self.assertEqual('ACA', info[0]['variant']['variants'][0]['value']['variant']['variants'][0]['value']
['variant']['variants'][0]['name'])
storage_function = self.substrate.get_metadata_storage_function("Dex", "TradingPairStatuses")
info = storage_function.get_param_info()
self.assertEqual(1, len(info))
self.assertEqual('Token', info[0]['composite']['fields'][0]['value']['variant']['variants'][0]['name'])
def test_get_type_definition(self):
# TODO refactor get_type_definition
pass
def test_get_metadata_constant(self):
constant = self.substrate.get_metadata_constant("System", "BlockHashCount")
self.assertEqual("BlockHashCount", constant.name)
self.assertEqual("scale_info::4", constant.type)
self.assertEqual("0xb0040000", f"0x{constant.constant_value.hex()}")
def test_get_constant(self):
constant = self.substrate.get_constant("System", "BlockHashCount")
self.assertEqual(1200, constant.value)
constant = self.substrate.get_constant("Balances", "ExistentialDeposit")
self.assertEqual(100000000000, constant.value)
# Also test cache method doesn't mix up results
constant = self.substrate.get_constant("System", "BlockHashCount")
self.assertEqual(1200, constant.value)
if __name__ == '__main__':
unittest.main()
|
from django import forms
from adminarea.models import AppIntegration, APP_INTEGRATIONS
class AppIntegrationForm(forms.ModelForm):
class Meta:
fields = ("platform", "display_name")
model = AppIntegration
|
import asyncio
from pyrocketjoe.celery import Celery
from pyrocketjoe.celery.apps import Worker
app = Celery()
worker = Worker(app)
@app.task()
def hello_world(a: int, b: int) -> None:
print('Hello, World!')
return a + b
result = hello_world.apply_async(40, 2)
async def main() -> None:
await worker
asyncio.run(main())
print(result.get())
|
#!/usr/bin/env python
from __future__ import print_function
import zmq
import sys
import json
def save(filename, contents):
with open(filename + '_versions.json', 'a+') as f:
# Read the old versions.
f.seek(0)
try:
versions = json.load(f)
except ValueError:
versions = []
# Clear the file.
f.seek(0)
f.truncate()
# Add the new version to the file.
versions.append(contents)
json.dump(versions, f)
def patch(filename):
ctx = zmq.Context()
sock = ctx.socket(zmq.REQ)
sock.connect('tcp://127.0.0.1:5555')
with open(filename) as f:
string = f.read()
save(filename, string)
sock.send(string.encode('utf-8'))
print(sock.recv().decode('utf-8'))
if __name__ == '__main__':
if len(sys.argv) > 1:
patch(sys.argv[1])
|
# Generated by Django 2.2.16 on 2022-04-24 07:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sales_backend', '0003_auto_20220423_1218'),
]
operations = [
migrations.CreateModel(
name='RatingStar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.SmallIntegerField(default=0, verbose_name='Значение')),
],
options={
'verbose_name': 'Звезда рейтинга',
'verbose_name_plural': 'Звезды рейтинга',
'ordering': ['-value'],
},
),
migrations.AlterField(
model_name='comment',
name='product',
field=models.ForeignKey(help_text='Комментарий поста', on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='sales_backend.Product', verbose_name='Комментарий'),
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=15, verbose_name='IP адрес')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales_backend.Product', verbose_name='продукт')),
('star', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales_backend.RatingStar', verbose_name='звезда')),
],
options={
'verbose_name': 'Рейтинг',
'verbose_name_plural': 'Рейтинги',
},
),
]
|
import _dk_core as core
LinearTransform2 = core.LinearTransform2
AffineTransform2 = core.AffineTransform2
LinearTransform3 = core.LinearTransform3
AffineTransform3 = core.AffineTransform3
TransformUnit = core.TransformUnit
USTransform = core.USTransform
NSTransform = core.NSTransform
|
# coding: utf-8
try:
import os
import sqlite3
except ImportError:
pass
from modules.windows.chromium.chromium_module import ChromiumModule
from internal import data_type
from api.windows import format
class WindowsChromiumDownload(ChromiumModule):
def __init__(self):
ChromiumModule.__init__(
self,
name='WindowsChromiumDownload',
version='0.1.2',
file=__file__,
dependencies=['os', 'sqlite3'],
)
def execute(self) -> bool:
if not super().execute():
return False
for profile in self.get_profiles():
self.cursor_getV2(
path=profile + '/History',
items=[
[data_type.File, ('target_path', 'total_bytes')],
[data_type.Link, 'tab_url'],
[data_type.Time, 'start_time', format.chrome_time],
],
db='downloads',
spe=os.path.split(profile)[1],
)
return True
|
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMI# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import bpy
import bmesh
import os
import bpy_extras
import bpy_extras.io_utils
import mathutils
from bpy.props import (
BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
)
from bpy_extras.io_utils import (
ImportHelper,
ExportHelper,
path_reference_mode,
)
def f2str(value):
return "%.5f"%value
def writeVector3d(file,vector):
file.write("[");
file.write(f2str(vector.x))
file.write(",")
file.write(f2str(vector.y))
file.write(",")
file.write(f2str(vector.z))
file.write("]")
def writeVector2d(file,vector):
file.write("[");
file.write(f2str(vector.x))
file.write(",")
file.write(f2str(vector.y))
file.write("]")
def writeColor(file,color):
file.write("[");
file.write(f2str(color.r))
file.write(",")
file.write(f2str(color.g))
file.write(",")
file.write(f2str(color.b))
file.write("]")
def sameVectors(na,nb):
coeff = na*nb/(na.length*nb.length)
return coeff > 0.999
# keyVertexList 3 indices in P only list
# uniqueVertexList - uniqueVertexList in PUNT list (unique combination)
def add2AdjancedDictionary(keyVertexList,uniqueVertexList,adjancedDict):
adjKey = (keyVertexList[0],keyVertexList[1])
if adjKey not in adjancedDict:
adjancedDict[adjKey] = uniqueVertexList[2]
adjKey = (keyVertexList[1],keyVertexList[2])
if adjKey not in adjancedDict:
adjancedDict[adjKey] = uniqueVertexList[0]
adjKey = (keyVertexList[2],keyVertexList[0])
if adjKey not in adjancedDict:
adjancedDict[adjKey] = uniqueVertexList[1]
# uniqueFaceVertexList - face "unique vertex" indices
# adjancedDisct - dictionary keyed by "position only" vertex indices
# uniqe2ponly - dictionar - unique to postion only index
def getAindAdjancedVertices(uniqueFaceVertexList,adjancedDict,uniqe2ponly):
ponly = []
result = []
for unique in uniqueFaceVertexList:
ponly.append(uniqe2ponly[unique])
adjKey = (ponly[0],ponly[2]) # adjanced traingles have reversed order of indices
if adjKey not in adjancedDict:
result.append(None)
else:
result.append(adjancedDict[adjKey])
adjKey = (ponly[2],ponly[1]) # adjanced traingles have reversed order of indices
if adjKey not in adjancedDict:
result.append(None)
else:
result.append(adjancedDict[adjKey])
adjKey = (ponly[1],ponly[0]) # adjanced traingles have reversed order of indices
if adjKey not in adjancedDict:
result.append(None)
else:
result.append(adjancedDict[adjKey])
return result
class VertexData:
def __init__(self):
self.Position = None
self.Tangents = []
self.Normals = []
self.UVs = []
def addToList(value,list2Mod):
if value in list2Mod:
return list2Mod.index(value)
else:
list2Mod.append(value)
return list2Mod.index(value)
def addTangent(self,value):
return VertexData.addToList(value,self.Tangents)
def addNormal(self,value):
return VertexData.addToList(value,self.Normals)
def addUV(self,value):
return VertexData.addToList(value,self.UVs)
def formatFloat(self,value):
return "%.5f".value
def writeUniqueVertex(self,file,uvIndex,normalIndex,tangentIndex):
file.write("VERTEX_PUNT: ")
writeVector3d(file,self.Position)
writeVector2d(file,self.UVs[uvIndex])
writeVector3d(file,self.Normals[normalIndex])
writeVector3d(file,self.Tangents[tangentIndex])
file.write("\n")
file.write("VERTEX_AVG_N: ")
writeVector3d(file,self.AvgN)
file.write("\n")
def calcAvgVal(self):
self.AvgN = mathutils.Vector()
uniqueNormals = []
for nN in self.Normals:
found = False
for uN in uniqueNormals:
if sameVectors(nN,uN):
found = True
break
if not found:
uniqueNormals.append(nN)
for cN in uniqueNormals:
self.AvgN += cN
self.AvgN /= self.AvgN.length
def writeAIN(file,srcdir,dstdir):
file.write("AIN_FILE: V0.0-0\n")
file.write("# lines beginning with # are comments\n")
file.write("# empty lines are ignored\n")
file.write("# all other begin with line type id token(word) followed by : and space\n")
file.write("# all tokens with _NAME ending mark begin of new context - new image, mesh or texture\n")
file.write("# list of context starting tokens\n")
file.write("# MATER_NAME - material id (referenced in meshes)\n")
file.write("# MESH_NAME - mesh name\n")
file.write("# list of normal data fields\n")
file.write("# IMG_COUNT - number of image paths defined in file\n")
file.write("# MATER_COUNT - number of materials defined in file\n")
file.write("# MESH_COUNT - number of meshes defined in file\n")
file.write("# _COUNT lines always appear before corresponding data blocks\n")
file.write("# IMG_PATH - path to image source\n")
file.write("# MATER_AMBIENT MATER_DIFFUSE_COLOR MATER_DIFFUSE_INTENSITY MATER_SPECULAR_COLOR MATER_SPECULAR_INTENSITY MATER_EMIT - generic material parameters\n")
file.write("# MATER_TEX_AMBIENT - ambient texture image - index of image (index base 0 ) from IMAGES section\n")
file.write("# MATER_TEX_DIFFUSE - diffuse texture image\n")
file.write("# MATER_TEX_SPECULAR_COL - speculat color texture image\n")
file.write("# MATER_TEX_NORMAL - normal texture image\n")
file.write("# MATER_TEX_EMISSION - emission texture\n")
file.write("# MESH_VERTEX_COUNT - number of vertices in current mesh (for easier lodaing)\n")
file.write("# MESH_FACE_COUNT - number of faces in current mesh\n")
file.write("# MESH_MATER_COUNT - number of materials used in current mesh\n")
file.write("# MESH_MATER - material index\n")
file.write("# VERTEX_PUNT - vertex definition in form [position][uv][normal][tangent]\n")
file.write("# VERTEX_AVG_N - additional averaged normal for generation of shadow volume\n")
file.write("# FACE3 - triangular face definioniton in format [index of v0, index of v1, index of v2]\n")
file.write("# ADJANCED3 - indices of vertexes 'adjanced' to face - if 'N' if there is no adjanced vertex\n")
file.write("#====================== IMAGES =====================\n")
img2index = {}
count=0
for img in bpy.data.images:
if img.filepath != "":
count = count + 1
file.write("IMG_COUNT: ")
file.write(str(count))
file.write("\n")
count = 0
for img in bpy.data.images:
if img.filepath != "":
file.write("IMG_PATH: ")
filepath = bpy_extras.io_utils.path_reference(img.filepath, srcdir, dstdir, mode='ABSOLUTE', copy_subdir='', copy_set=None, library=None)
file.write(filepath)
file.write("\n")
img2index[img.name]=count
count=count + 1
file.write("#====================== MATERIALS =====================\n")
file.write("MATER_COUNT: ")
file.write(str(len(bpy.data.materials)))
file.write("\n")
mater2index = {}
count = 0
for mater in bpy.data.materials:
mater2index[mater.name] = count
count = count + 1
file.write("MATER_NAME: ")
file.write(mater.name)
file.write("\n")
file.write("MATER_AMBIENT: ")
file.write(f2str(mater.ambient))
file.write("\n")
file.write("MATER_DIFFUSE_COLOR: ")
writeColor(file,mater.diffuse_color)
file.write("\n")
file.write("MATER_DIFFUSE_INTENSITY: ")
file.write(f2str(mater.diffuse_intensity))
file.write("\n")
file.write("MATER_SPECULAR_COLOR: ")
writeColor(file,mater.specular_color)
file.write("\n")
file.write("MATER_SPECULAR_INTENSITY: ")
file.write(f2str(mater.specular_intensity))
file.write("\n")
file.write("MATER_EMIT: ")
file.write(f2str(mater.emit))
file.write("\n")
for texslot in mater.texture_slots:
if texslot != None:
texture = bpy.data.textures[texslot.name]
if hasattr(texture,'image'):
if texture.image.name in img2index:
imgIdx = str(img2index[texture.image.name])
if texslot.use_map_ambient:
file.write("MATER_TEX_AMBIENT: ")
file.write(imgIdx)
file.write("\n")
if texslot.use_map_emit:
file.write("MATER_TEX_EMISSION: ")
file.write(imgIdx)
file.write("\n")
elif texslot.use_map_color_diffuse: # blender requires that "emission" texture influences both color and emission
file.write("MATER_TEX_DIFFUSE: ")
file.write(imgIdx)
file.write("\n")
if texslot.use_map_color_spec:
file.write("MATER_TEX_SPECULAR_COL: ")
file.write(imgIdx)
file.write("\n")
if texslot.use_map_normal:
file.write("MATER_TEX_NORMAL: ")
file.write(imgIdx)
file.write("\n")
file.write("#====================== MESHES =====================\n")
# "selected only"
#me = bpy.context.active_object.to_mesh(bpy.context.scene,apply_modifiers=True,settings="RENDER")
#me.name = bpy.context.active_object.name
#bm = bmesh.new()
#bm.from_mesh(me)
#bmesh.ops.triangulate(bm,faces=bm.faces)
#bm.to_mesh(me)
#bm.free()
# collect all meshes in project
meshes = []
for obj in bpy.data.objects:
try:
me = obj.to_mesh(bpy.context.scene,apply_modifiers=True,settings="RENDER")
except RuntimeError:
continue # no mesh data in this object
if len(me.uv_layers) == 0:
print("Mesh ",me.name," has no UV coordinates")
continue
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.triangulate(bm,faces=bm.faces)
bm.to_mesh(me)
bm.free()
me.name = obj.name
meshes.append(me)
file.write("MESH_COUNT: ")
file.write(str(len(meshes)))
file.write("\n");
for me in meshes:
uvlist = me.uv_layers[0].data[:] # copy data to separate table - for some reason orginal table will be overwritten with normals?
me.calc_tangents()
vertices = {}
faces = {} # key is material ID
adjancedDict = {} # key id tuple of vertex indices (3 entries from every triangle)
unique2ponly = {}
unique_vertices = []
for face in me.polygons:
face_indices = []
ponly_vertices = []
for loopIdx in face.loop_indices:
vert = me.loops[loopIdx]
uv = uvlist[loopIdx].uv
if vert.vertex_index in vertices:
vdata = vertices[vert.vertex_index]
else:
vdata = VertexData()
vdata.Position = me.vertices[vert.vertex_index].co
vertices[vert.vertex_index] = vdata
ti = vdata.addTangent(vert.tangent)
ni = vdata.addNormal(vert.normal)
uvi = vdata.addUV(uv)
unique_vi = (vert.vertex_index,uvi,ni,ti)
if unique_vi not in unique_vertices:
unique_vertices.append(unique_vi)
face_indices.append(unique_vertices.index(unique_vi))
unique2ponly[unique_vertices.index(unique_vi)] = vert.vertex_index
ponly_vertices.append(vert.vertex_index)
material_name=me.materials[face.material_index].name
global_material_index=mater2index[material_name]
if global_material_index not in faces:
faces[global_material_index] = []
faces[global_material_index].append(face_indices)
add2AdjancedDictionary(ponly_vertices,face_indices,adjancedDict)
for vert in vertices.values():
vert.calcAvgVal()
# save data
file.write("MESH_NAME: ")
file.write(me.name)
file.write("\n")
file.write("MESH_VERTEX_COUNT: ")
file.write(str(len(unique_vertices)))
file.write("\n")
file.write("MESH_FACE_COUNT: ")
file.write(str(len(faces)))
file.write("\n")
file.write("MESH_MATER_COUNT: ")
file.write(str(len(me.materials)))
file.write("\n")
for uvi in unique_vertices:
vdata = vertices[uvi[0]]
vdata.writeUniqueVertex(file,uvi[1],uvi[2],uvi[3])
for material_id in faces.keys():
file.write("MESH_MATER: ")
file.write(str(material_id))
file.write("\n")
for face in faces[material_id]:
file.write("FACE3: ")
file.write("[")
file.write(str(face[0]))
file.write(",")
file.write(str(face[1]))
file.write(",")
file.write(str(face[2]))
file.write("]\n")
file.write("ADJANCED3: ")
adjVert = getAindAdjancedVertices(face,adjancedDict,unique2ponly)
file.write("[")
if adjVert[0] == None:
file.write("N")
else:
file.write(str(adjVert[0]))
file.write(",")
if adjVert[1] == None:
file.write("N")
else:
file.write(str(adjVert[1]))
file.write(",")
if adjVert[2] == None:
file.write("N")
else:
file.write(str(adjVert[2]))
file.write("]\n")
# debug
#for face in faces[material_id]:
# file.write("FACE3PO: ")
# for uvi in face:
# writeVector3d(file,vertices[unique_vertices[uvi][0]].Position)
# file.write("\n")
# file.write("ADJANCEDPO: ")
# adjVert = getAindAdjancedVertices(face,adjancedDict,unique2ponly)
# for uvi in adjVert:
# if uvi == None:
# file.write("[-,-,-]")
# else:
# writeVector3d(file,vertices[unique_vertices[uvi][0]].Position)
# file.write("\n")
file.write("\n")
# remove temporary object
bpy.data.meshes.remove(me)
bl_info = {
"name": "AllINeed AIN format",
"author": "Grzegorz Domagala",
"version": (1, 0, 0),
"blender": (2, 77, 0),
"location": "File > Import-Export",
"description": "Export AIN",
"warning": "",
"wiki_url": "",
"support": 'OFFICIAL',
"category": "Import-Export"}
class ExportAIN(bpy.types.Operator, ExportHelper):
""" Save AIN file """
bl_idname = "export_scene.ain"
bl_label = "Export AIN"
bl_options = {'PRESET'}
filename_ext = ".ain"
filer_glob = StringProperty(
default="*.ain",
options={'HIDDEN'},
)
path_mode = path_reference_mode
check_extension = True
def execute(self,context):
keywords = self.as_keywords()
file = open(keywords["filepath"],"w")
srcdir = os.path.dirname(bpy.data.filepath)
dstdir = os.path.dirname(keywords["filepath"])
writeAIN(file,srcdir,dstdir)
file.close()
return {'FINISHED'}
def menu_func_export_ain(self,context):
self.layout.operator(ExportAIN.bl_idname, text = "All I Need (.ain)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export_ain)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export_ain)
if __name__ == "__main__":
register()
TED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import bpy
import bmesh
import os
import bpy_extras
import bpy_extras.io_utils
import mathutils
from bpy.props import (
BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
)
from bpy_extras.io_utils import (
ImportHelper,
ExportHelper,
path_reference_mode,
)
def f2str(value):
return "%.5f"%value
def writeVector3d(file,vector):
file.write("[");
file.write(f2str(vector.x))
file.write(",")
file.write(f2str(vector.y))
file.write(",")
file.write(f2str(vector.z))
file.write("]")
def writeVector2d(file,vector):
file.write("[");
file.write(f2str(vector.x))
file.write(",")
file.write(f2str(vector.y))
file.write("]")
def writeColor(file,color):
file.write("[");
file.write(f2str(color.r))
file.write(",")
file.write(f2str(color.g))
file.write(",")
file.write(f2str(color.b))
file.write("]")
def sameVectors(na,nb):
coeff = na*nb/(na.length*nb.length)
return coeff > 0.999
# keyVertexList 3 indices in P only list
# uniqueVertexList - uniqueVertexList in PUNT list (unique combination)
def add2AdjancedDictionary(keyVertexList,uniqueVertexList,adjancedDict):
adjKey = (keyVertexList[0],keyVertexList[1])
if adjKey not in adjancedDict:
adjancedDict[adjKey] = uniqueVertexList[2]
adjKey = (keyVertexList[1],keyVertexList[2])
if adjKey not in adjancedDict:
adjancedDict[adjKey] = uniqueVertexList[0]
adjKey = (keyVertexList[2],keyVertexList[0])
if adjKey not in adjancedDict:
adjancedDict[adjKey] = uniqueVertexList[1]
# uniqueFaceVertexList - face "unique vertex" indices
# adjancedDisct - dictionary keyed by "position only" vertex indices
# uniqe2ponly - dictionar - unique to postion only index
def getAindAdjancedVertices(uniqueFaceVertexList,adjancedDict,uniqe2ponly):
ponly = []
result = []
for unique in uniqueFaceVertexList:
ponly.append(uniqe2ponly[unique])
adjKey = (ponly[0],ponly[2]) # adjanced traingles have reversed order of indices
if adjKey not in adjancedDict:
result.append(None)
else:
result.append(adjancedDict[adjKey])
adjKey = (ponly[2],ponly[1]) # adjanced traingles have reversed order of indices
if adjKey not in adjancedDict:
result.append(None)
else:
result.append(adjancedDict[adjKey])
adjKey = (ponly[1],ponly[0]) # adjanced traingles have reversed order of indices
if adjKey not in adjancedDict:
result.append(None)
else:
result.append(adjancedDict[adjKey])
return result
class VertexData:
def __init__(self):
self.Position = None
self.Tangents = []
self.Normals = []
self.UVs = []
def addToList(value,list2Mod):
if value in list2Mod:
return list2Mod.index(value)
else:
list2Mod.append(value)
return list2Mod.index(value)
def addTangent(self,value):
return VertexData.addToList(value,self.Tangents)
def addNormal(self,value):
return VertexData.addToList(value,self.Normals)
def addUV(self,value):
return VertexData.addToList(value,self.UVs)
def formatFloat(self,value):
return "%.5f".value
def writeUniqueVertex(self,file,uvIndex,normalIndex,tangentIndex):
file.write("VERTEX_PUNT: ")
writeVector3d(file,self.Position)
writeVector2d(file,self.UVs[uvIndex])
writeVector3d(file,self.Normals[normalIndex])
writeVector3d(file,self.Tangents[tangentIndex])
file.write("\n")
file.write("VERTEX_AVG_N: ")
writeVector3d(file,self.AvgN)
file.write("\n")
def calcAvgVal(self):
self.AvgN = mathutils.Vector()
uniqueNormals = []
for nN in self.Normals:
found = False
for uN in uniqueNormals:
if sameVectors(nN,uN):
found = True
break
if not found:
uniqueNormals.append(nN)
for cN in uniqueNormals:
self.AvgN += cN
self.AvgN /= self.AvgN.length
def writeAIN(file,srcdir,dstdir):
file.write("AIN_FILE: V0.0-0\n")
file.write("# lines beginning with # are comments\n")
file.write("# empty lines are ignored\n")
file.write("# all other begin with line type id token(word) followed by : and space\n")
file.write("# all tokens with _NAME ending mark begin of new context - new image, mesh or texture\n")
file.write("# list of context starting tokens\n")
file.write("# MATER_NAME - material id (referenced in meshes)\n")
file.write("# MESH_NAME - mesh name\n")
file.write("# list of normal data fields\n")
file.write("# IMG_COUNT - number of image paths defined in file\n")
file.write("# MATER_COUNT - number of materials defined in file\n")
file.write("# MESH_COUNT - number of meshes defined in file\n")
file.write("# _COUNT lines always appear before corresponding data blocks\n")
file.write("# IMG_PATH - path to image source\n")
file.write("# MATER_AMBIENT MATER_DIFFUSE_COLOR MATER_DIFFUSE_INTENSITY MATER_SPECULAR_COLOR MATER_SPECULAR_INTENSITY MATER_EMIT - generic material parameters\n")
file.write("# MATER_TEX_AMBIENT - ambient texture image - index of image (index base 0 ) from IMAGES section\n")
file.write("# MATER_TEX_DIFFUSE - diffuse texture image\n")
file.write("# MATER_TEX_SPECULAR_COL - speculat color texture image\n")
file.write("# MATER_TEX_NORMAL - normal texture image\n")
file.write("# MESH_VERTEX_COUNT - number of vertices in current mesh (for easier lodaing)\n")
file.write("# MESH_FACE_COUNT - number of faces in current mesh\n")
file.write("# MESH_MATER_COUNT - number of materials used in current mesh\n")
file.write("# MESH_MATER - material index\n")
file.write("# VERTEX_PUNT - vertex definition in form [position][uv][normal][tangent]\n")
file.write("# VERTEX_AVG_N - additional averaged normal for generation of shadow volume\n")
file.write("# FACE3 - triangular face definioniton in format [index of v0, index of v1, index of v2]\n")
file.write("# ADJANCED3 - indices of vertexes 'adjanced' to face - if 'N' if there is no adjanced vertex\n")
file.write("#====================== IMAGES =====================\n")
img2index = {}
count=0
for img in bpy.data.images:
if img.filepath != "":
count = count + 1
file.write("IMG_COUNT: ")
file.write(str(count))
file.write("\n")
count = 0
for img in bpy.data.images:
if img.filepath != "":
file.write("IMG_PATH: ")
filepath = bpy_extras.io_utils.path_reference(img.filepath, srcdir, dstdir, mode='ABSOLUTE', copy_subdir='', copy_set=None, library=None)
file.write(filepath)
file.write("\n")
img2index[img.name]=count
count=count + 1
file.write("#====================== MATERIALS =====================\n")
file.write("MATER_COUNT: ")
file.write(str(len(bpy.data.materials)))
file.write("\n")
mater2index = {}
count = 0
for mater in bpy.data.materials:
mater2index[mater.name] = count
count = count + 1
file.write("MATER_NAME: ")
file.write(mater.name)
file.write("\n")
file.write("MATER_AMBIENT: ")
file.write(f2str(mater.ambient))
file.write("\n")
file.write("MATER_DIFFUSE_COLOR: ")
writeColor(file,mater.diffuse_color)
file.write("\n")
file.write("MATER_DIFFUSE_INTENSITY: ")
file.write(f2str(mater.diffuse_intensity))
file.write("\n")
file.write("MATER_SPECULAR_COLOR: ")
writeColor(file,mater.specular_color)
file.write("\n")
file.write("MATER_SPECULAR_INTENSITY: ")
file.write(f2str(mater.specular_intensity))
file.write("\n")
file.write("MATER_EMIT: ")
file.write(f2str(mater.emit))
file.write("\n")
for texslot in mater.texture_slots:
if texslot != None:
texture = bpy.data.textures[texslot.name]
if hasattr(texture,'image'):
if texture.image.name in img2index:
imgIdx = str(img2index[texture.image.name])
if texslot.use_map_ambient:
file.write("MATER_TEX_AMBIENT: ")
file.write(imgIdx)
file.write("\n")
if texslot.use_map_color_diffuse:
file.write("MATER_TEX_DIFFUSE: ")
file.write(imgIdx)
file.write("\n")
if texslot.use_map_color_spec:
file.write("MATER_TEX_SPECULAR_COL: ")
file.write(imgIdx)
file.write("\n")
if texslot.use_map_normal:
file.write("MATER_TEX_NORMAL: ")
file.write(imgIdx)
file.write("\n")
file.write("#====================== MESHES =====================\n")
# "selected only"
#me = bpy.context.active_object.to_mesh(bpy.context.scene,apply_modifiers=True,settings="RENDER")
#me.name = bpy.context.active_object.name
#bm = bmesh.new()
#bm.from_mesh(me)
#bmesh.ops.triangulate(bm,faces=bm.faces)
#bm.to_mesh(me)
#bm.free()
# collect all meshes in project
meshes = []
for obj in bpy.data.objects:
try:
me = obj.to_mesh(bpy.context.scene,apply_modifiers=True,settings="RENDER")
except RuntimeError:
continue # no mesh data in this object
if len(me.uv_layers) == 0:
print("Mesh ",me.name," has no UV coordinates")
continue
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.triangulate(bm,faces=bm.faces)
bm.to_mesh(me)
bm.free()
me.name = obj.name
meshes.append(me)
file.write("MESH_COUNT: ")
file.write(str(len(meshes)))
file.write("\n");
for me in meshes:
uvlist = me.uv_layers[0].data[:] # copy data to separate table - for some reason orginal table will be overwritten with normals?
me.calc_tangents()
vertices = {}
faces = {} # key is material ID
adjancedDict = {} # key id tuple of vertex indices (3 entries from every triangle)
unique2ponly = {}
unique_vertices = []
for face in me.polygons:
face_indices = []
ponly_vertices = []
for loopIdx in face.loop_indices:
vert = me.loops[loopIdx]
uv = uvlist[loopIdx].uv
if vert.vertex_index in vertices:
vdata = vertices[vert.vertex_index]
else:
vdata = VertexData()
vdata.Position = me.vertices[vert.vertex_index].co
vertices[vert.vertex_index] = vdata
ti = vdata.addTangent(vert.tangent)
ni = vdata.addNormal(vert.normal)
uvi = vdata.addUV(uv)
unique_vi = (vert.vertex_index,uvi,ni,ti)
if unique_vi not in unique_vertices:
unique_vertices.append(unique_vi)
face_indices.append(unique_vertices.index(unique_vi))
unique2ponly[unique_vertices.index(unique_vi)] = vert.vertex_index
ponly_vertices.append(vert.vertex_index)
material_name=me.materials[face.material_index].name
global_material_index=mater2index[material_name]
if global_material_index not in faces:
faces[global_material_index] = []
faces[global_material_index].append(face_indices)
add2AdjancedDictionary(ponly_vertices,face_indices,adjancedDict)
for vert in vertices.values():
vert.calcAvgVal()
# save data
file.write("MESH_NAME: ")
file.write(me.name)
file.write("\n")
file.write("MESH_VERTEX_COUNT: ")
file.write(str(len(unique_vertices)))
file.write("\n")
file.write("MESH_FACE_COUNT: ")
file.write(str(len(faces)))
file.write("\n")
file.write("MESH_MATER_COUNT: ")
file.write(str(len(me.materials)))
file.write("\n")
for uvi in unique_vertices:
vdata = vertices[uvi[0]]
vdata.writeUniqueVertex(file,uvi[1],uvi[2],uvi[3])
for material_id in faces.keys():
file.write("MESH_MATER: ")
file.write(str(material_id))
file.write("\n")
for face in faces[material_id]:
file.write("FACE3: ")
file.write("[")
file.write(str(face[0]))
file.write(",")
file.write(str(face[1]))
file.write(",")
file.write(str(face[2]))
file.write("]\n")
file.write("ADJANCED3: ")
adjVert = getAindAdjancedVertices(face,adjancedDict,unique2ponly)
file.write("[")
if adjVert[0] == None:
file.write("N")
else:
file.write(str(adjVert[0]))
file.write(",")
if adjVert[1] == None:
file.write("N")
else:
file.write(str(adjVert[1]))
file.write(",")
if adjVert[2] == None:
file.write("N")
else:
file.write(str(adjVert[2]))
file.write("]\n")
# debug
#for face in faces[material_id]:
# file.write("FACE3PO: ")
# for uvi in face:
# writeVector3d(file,vertices[unique_vertices[uvi][0]].Position)
# file.write("\n")
# file.write("ADJANCEDPO: ")
# adjVert = getAindAdjancedVertices(face,adjancedDict,unique2ponly)
# for uvi in adjVert:
# if uvi == None:
# file.write("[-,-,-]")
# else:
# writeVector3d(file,vertices[unique_vertices[uvi][0]].Position)
# file.write("\n")
file.write("\n")
# remove temporary object
bpy.data.meshes.remove(me)
bl_info = {
"name": "AllINeed AIN format",
"author": "Grzegorz Domagala",
"version": (1, 0, 0),
"blender": (2, 77, 0),
"location": "File > Import-Export",
"description": "Export AIN",
"warning": "",
"wiki_url": "",
"support": 'OFFICIAL',
"category": "Import-Export"}
class ExportAIN(bpy.types.Operator, ExportHelper):
""" Save AIN file """
bl_idname = "export_scene.ain"
bl_label = "Export AIN"
bl_options = {'PRESET'}
filename_ext = ".ain"
filer_glob = StringProperty(
default="*.ain",
options={'HIDDEN'},
)
path_mode = path_reference_mode
check_extension = True
def execute(self,context):
keywords = self.as_keywords()
file = open(keywords["filepath"],"w")
srcdir = os.path.dirname(bpy.data.filepath)
dstdir = os.path.dirname(keywords["filepath"])
writeAIN(file,srcdir,dstdir)
file.close()
return {'FINISHED'}
def menu_func_export_ain(self,context):
self.layout.operator(ExportAIN.bl_idname, text = "All I Need (.ain)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export_ain)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export_ain)
if __name__ == "__main__":
register()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.