text stringlengths 8 6.05M |
|---|
N = int(input())
for i in range(N):
a = input().split()
r, s = int(a[0]), a[1]
ans = ''
for k in s:
ans += k*r
print(ans)
# Done |
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
#!/usr/bin/env python
#
# For time management.
import time
import datetime
# For path management.
import os.path
# To create folders.
import os
################################################################################################################
# Helps storing time information in a log. Useful for performance tests.
################################################################################################################
class TimeLog(object):
# Folder where apps to be pushed are stored.
LOG_FOLDER = os.path.join(os.path.abspath(os.getcwd()), "logs")
# A map of timestamps and nametags of events.
_stamps = {}
# Stores the last stamp to easily calculate diff with previous stamp.
_lastStamp = 0;
################################################################################################################
# Adds a new "stamp" to the in-memory log.
# @param tag a string to give a name to this timestamp, usually an event.
################################################################################################################
@staticmethod
def stamp(tag):
# Get the current time.
currTime = time.time()
# Only when initializing, set the last stamp to the current time.
if(TimeLog._lastStamp == 0):
TimeLog._lastStamp = currTime;
# Log the stamp
TimeLog._stamps[currTime] = tag;
print tag + " (logged)"
# Update the last time with the current one.
TimeLog._lastStamp = currTime;
################################################################################################################
# Write the whole log to a file.
# @param file the filename for the log.
################################################################################################################
@staticmethod
def writeToFile(logFilename='vmlog.txt'):
# Create the folder if it doesn't exist.
if(not os.path.exists(TimeLog.LOG_FOLDER)):
os.makedirs(TimeLog.LOG_FOLDER)
# Create and open the file to write the stamps to.
localFilePath = os.path.join(TimeLog.LOG_FOLDER, logFilename)
with open(localFilePath, "a") as completeLocalFile:
# Write a header to the file.
completeLocalFile.write("TIMELOG " + logFilename + '\n')
# Get the timestamps as a sorted dictionary, to write things in chronological order.
sortedStamps = sorted(TimeLog._stamps)
initialTimestamp = sortedStamps[0]
previousTimestamp = initialTimestamp
for currentTimestamp in sortedStamps:
# Calculate the time transcurred between events.
tag = TimeLog._stamps[currentTimestamp]
diffFromStart = 1000*(currentTimestamp - initialTimestamp)
diffFromPrevious = 1000*(currentTimestamp - previousTimestamp)
# Write to file.
eventTime = datetime.datetime.fromtimestamp(currentTimestamp).strftime("%H:%M:%S.%f")
completeLocalFile.write("%s (+%d ms): %s (+%d ms)\n" % (eventTime, diffFromPrevious, tag, diffFromStart))
# Update the previous stanp variable for the next iteration.
previousTimestamp = currentTimestamp
################################################################################################################
# Remove all timestamps from memory.
################################################################################################################
@staticmethod
def reset():
TimeLog._lastStamp = 0
TimeLog._stamps.clear()
################################################################################################################
# Command line test
################################################################################################################
def testTimelog():
TimeLog.stamp('Starting test...')
TimeLog.stamp('Waiting for a second...')
time.sleep(1)
TimeLog.stamp('Stopped waiting.')
TimeLog.stamp('Waiting for 2 seconds...')
time.sleep(2)
TimeLog.stamp('Stopped waiting.')
TimeLog.stamp('Writing to file...')
TimeLog.writeToFile('testlog.txt')
TimeLog.stamp('Process finished')
|
def tarjan(grafo):
'''Restituisce il vettore delle componenti fortemente connesse del grafo
diretto fornito come parametro, in tempo O(n + m).'''
def dfs(nodo):
nonlocal tempo, componente
tempo += 1
componenti[nodo] = -tempo
stack.append(nodo)
min_nodo = tempo
for adiacente in grafo[nodo]:
# Caso in cui ancora l'adiacente non è stato visitato.
if componenti[adiacente] == 0:
min_nodo = min(min_nodo,dfs(adiacente))
# Caso in cui è stato visitato ma la componente non è impostata.
elif componenti[adiacente] < 0:
min_nodo = min(min_nodo, -componenti[adiacente])
if min_nodo == -componenti[nodo]:
# nodo è quindi una c-root, svuoto lo stack fino a nodo.
componente += 1
nodo_tmp = -1
while nodo_tmp != nodo:
nodo_tmp = stack.pop()
componenti[nodo_tmp] = componente
return min_nodo
# componenti[nodo]: 0 - non è stato visitato
# > 0 - visitato e componente impostata
# < 0 - visitato ma senza componente settata
componenti = [0 for _ in grafo]
componente = tempo = 0 # contatore componenti e tempi.
stack = []
for nodo in grafo: # il grafo può essere sconnesso.
if componenti[nodo] == 0:
dfs(nodo)
return componenti
|
# import the necessary packages
from optparse import OptionParser
from scipy.spatial import distance as dist
import matplotlib.pyplot as plt
import numpy as np
import argparse
import glob
import cv2
import sys
import pickle
###########################
def image_match_histogram( all_files, options ):
histograms = {}
image_files = []
# loop over all images
for (i, fname) in enumerate(all_files):
if options.ipath:
path_fname = options.ipath + '/' + fname
else:
path_fname = fname
# read in image
image = cv2.imread( path_fname );
if image is None:
print path_fname + " : fail to read"
continue
image_files.append(fname)
if image.shape[2] == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
print i, path_fname, image.shape
v = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
v = v.flatten()
hist = v / sum(v)
histograms[fname] = hist
pickle.dump( histograms, open( options.opath+"/color_feature.p","wb") )
# feature matrix
feature_matrix = np.zeros( (len(histograms), len(hist)) )
for (i,fi) in enumerate(image_files):
feature_matrix[i,:] = histograms[image_files[i]]
pickle.dump( feature_matrix, open( options.opath+"/color_matrix.p","wb") )
dists = np.zeros((len(image_files), len(image_files)))
knn = {}
# pairwise comparison
for (i, fi) in enumerate(image_files):
for (j, fj) in enumerate(image_files):
if i <= j:
d = cv2.compareHist( histograms[fi], histograms[fj], cv2.cv.CV_COMP_INTERSECT)
dists[i,j] = d
dists[j,i] = d
pickle.dump( dists, open( options.opath+"/color_affinity.p","wb") )
# K nearest neighbors
k=int(options.top)
print 'knn'
for (i, fi) in enumerate(image_files):
vec = sorted( zip(dists[i,:], image_files), reverse = True )
knn[fi] = vec[:k]
print knn[fi]
pickle.dump( knn, open( options.opath+"/color_knn.p","wb") )
# Kmeans clustering
term_crit = (cv2.TERM_CRITERIA_EPS, 100, 0.01)
print feature_matrix
ret, labels, centers = cv2.kmeans(np.float32(feature_matrix), int(options.cluster_count), term_crit, 10, cv2.KMEANS_RANDOM_CENTERS )
label_list=[]
for (i,l) in enumerate(labels):
label_list.append(l[0])
print label_list
image_label = zip( image_files, label_list )
print image_label
pickle.dump( image_label, open( options.opath+"/color_clustering.p","wb") )
###########################
def main():
usage = "usage: %prog [options] image_list_file \n"
usage += " image match"
parser = OptionParser(usage=usage)
parser.add_option("-i", "--input_path", default="",
action="store", dest="ipath",
help="input path")
parser.add_option("-o", "--output_path", default=".",
action="store", dest="opath",
help="output path")
parser.add_option("-f", "--feature", default="color_histogram",
action="store", dest="feature",
help="color_histogram; sift_match;dist_info")
parser.add_option("-m", "--method", default="Intersection",
action="store", dest="method",
help="Intersection;L1;L2")
parser.add_option("-t", "--top", default="5",
action="store", dest="top",
help="Top nearest neighbors")
parser.add_option("-c", "--cluster_count", default="3",
action="store", dest="cluster_count",
help="Number of clusters")
parser.add_option("-d", "--debug", default="0",
action="store", dest="debug_mode",
help="debug intermediate results")
(options, args) = parser.parse_args()
if len(args) < 1 :
print "Need one argument: image_list_file \n"
sys.exit(1)
image_files = [line.strip() for line in open(args[0])]
if options.feature == "color_histogram":
image_match_histogram( image_files, options )
if __name__=="__main__":
main()
|
import re
#Check if the string starts with "The" and ends with "Spain":
txt = "The rain in Spain"
x = re.search("^The.*Spain$", txt)
if (x):
print("YES! We have a match!")
else:
print("No match")
import re
str = "The rain in Spain"
#Check if the string starts with "The":
x = re.findall("\AThe", str)
print(x)
if (x):
print("Yes, there is a match!")
else:
print("No match")
|
import os
from PIL import Image
from numpy.ma import cos, sin, arccos
from pylab import *
from scipy.constants import pi
def process_image(imagename,resultname,params="--edge-thresh 10 --peak-thresh 5"):
""" Process an image using sift and save the results in a file. """
if imagename[-3:] != 'pgm':
# create a pgm file
im = Image.open(imagename).convert('L')
im.save('tmp.pgm')
imagename = 'tmp.pgm'
cmmd = str("sift "+imagename+" --output="+resultname+ " "+params)
os.system(cmmd)
print('processed', imagename, 'to', resultname)
# each line in the output file represents a interest point
# each line contains coordinates, scale, rotation angle, and then 128 values of the descriptor
def read_features_from_file(filename):
""" Read feature properties and return in matrix form. """
f = loadtxt(filename)
return f[:, :4],f[:, 4:] # feature locations, descriptors
def write_features_to_file(filename, locs, desc):
""" Save feature location and descriptor to file. """
savetxt(filename, hstack((locs, desc))) # hstack() horizontally stacks the two arrays by concatenating the rows
def plot_features(im, locs, circle=False):
""" Show image with features. input: im (image as array),
locs (row, col, scale, orientation of each feature). """
def draw_circle(c,r):
t = arange(0,1.01,.01)*2*pi
x = r*cos(t) + c[0]
y = r*sin(t) + c[1]
plot(x,y,'b',linewidth=2)
imshow(im)
if circle:
for p in locs:
draw_circle(p[:2],p[2]) # circles with radius equal to the scale of the feature will be drawn
else:
plot(locs[:,0],locs[:,1],'ob')
axis('off')
def match(desc1,desc2):
""" For each descriptor in the first image, select its match in the second image.
input: desc1 (descriptors for the first image), desc2 (same for second image). """
desc1 = array([d/linalg.norm(d) for d in desc1])
desc2 = array([d/linalg.norm(d) for d in desc2])
dist_ratio = 0.6
desc1_size = desc1.shape
matchscores = zeros(desc1_size[0],'int')
desc2t = desc2.T # precompute matrix transpose
for i in range(desc1_size[0]):
dotprods = dot(desc1[i,:],desc2t) # vector of dot products
dotprods = 0.9999*dotprods
# inverse cosine and sort, return index for features in second image
indx = argsort(arccos(dotprods))
# check if nearest neighbor has angle less than dist_ratio times 2nd
if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:
matchscores[i] = int(indx[0])
return matchscores
def match_twosided(desc1,desc2):
""" Two-sided symmetric version of match(). """
matches_12 = match(desc1,desc2)
matches_21 = match(desc2,desc1)
ndx_12 = matches_12.nonzero()[0]
# remove matches that are not symmetric
for n in ndx_12:
if matches_21[int(matches_12[n])] != n:
matches_12[n] = 0
return matches_12
def appendimages(im1,im2):
""" Return a new image that appends the two images side-by-side. """
# select the image with the fewest rows and fill in enough empty rows
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0)
elif rows1 > rows2:
im2 = concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0)
# if none of these cases they are equal, no filling needed.
return concatenate((im1,im2), axis=1)
def plot_matches(im1, im2, locs1, locs2, matchscores, show_below=True):
""" Show a figure with lines joining the accepted matches
input: im1,im2 (images as arrays), locs1,locs2 (feature locations), matchscores (as output from 'match()'),
show_below (if images should be shown below matches). """
im3 = appendimages(im1,im2)
if show_below:
im3 = vstack((im3,im3))
imshow(im3)
cols1 = im1.shape[1]
for i,m in enumerate(matchscores): # i increments from 0 to len(matchscores)-1, m is the value in matchscores
if m>0:
plot([locs1[i][0], locs2[m][0]+cols1], [locs1[i][1], locs2[m][1]], 'c')
axis('off')
def sample_find_features():
imname = 'continental.jpg'
im1 = array(Image.open(imname).convert('L'))
process_image(imname, 'continental.sift')
l1, d1 = read_features_from_file('continental.sift')
figure()
gray()
plot_features(im1, l1, circle=True)
show()
def sample_match_images():
imname = 'continental.jpg'
siftname = 'continental.sift'
im1 = array(Image.open(imname).convert('L'))
process_image(imname, siftname)
l1, d1 = read_features_from_file(siftname)
imname2 = 'continental_resized.jpg'
siftname2 = 'continental_resized.sift'
im2 = array(Image.open(imname2).convert('L'))
process_image(imname2, siftname2)
l2, d2 = read_features_from_file(siftname2)
matches = match_twosided(d1, d2)
figure()
gray()
plot_matches(im1, im2, l1, l2, matches)
show()
# sample_find_features()
# sample_match_images()
|
# ###############################
# Michael Vassernis - 319582888
#
#################################
import nn_model as nn_mdl
from helper_functions import load_mnist, accuracy_on_dataset, load_model, save_model
import numpy as np
import matplotlib.pyplot as plt
import sys
import time
def train_classifier(train_set, dev_set, num_iterations, learning_rate, model, regularization, model_file):
best_dev_accuracy = 0.0
for epoch in xrange(num_iterations):
np.random.shuffle(train_set)
print '\nbegan doing epoch no.', epoch + 1
total_loss = 0.0 # total loss in this iteration.
avg_loss = 0.0
count = 0.0
batch_size = 10
start_time = time.time()
for batch_index in range(0, len(train_set), batch_size):
loss = model.train_on_mini_batch(train_set[batch_index:batch_index+batch_size,:-1],
train_set[batch_index:batch_index+batch_size,-1].astype(dtype=int),
learning_rate, regularization)
total_loss += loss
avg_loss += loss
count += batch_size
if count % 5000 == 0:
took = time.time() - start_time
start_time = time.time()
print '\t', (count / len(train_set)) * 100, '% complete.', 'took:', took,\
'seconds. average loss <-', avg_loss / 5000
avg_loss = 0
train_loss = total_loss / len(train_set)
train_accuracy = accuracy_on_dataset(train_set, model)
dev_accuracy = accuracy_on_dataset(dev_set, model)
print 'epoch:', epoch + 1, 'loss:', train_loss, 'train accuracy:', train_accuracy, 'dev accuracy:', dev_accuracy
if dev_accuracy > best_dev_accuracy:
best_dev_accuracy = dev_accuracy
print 'saving model....'
save_model(model, model_file)
if __name__ == '__main__':
model_file = '../saved_models/nn2classifier.mdl'
train_set, dev_set = load_mnist('../mnist_data')
if len(sys.argv) > 1:
model = load_model(model_file)
else:
model = nn_mdl.NNModel()
model.initialize(28*28, 128, 128, 10)
train_classifier(train_set, dev_set, num_iterations=20, learning_rate=0.001, model=model, regularization=1e-6,
model_file=model_file)
|
#!/usr/bin/env python
''' Expermiental Python Server backend test '''
import os
import sys
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root_dir)
sys.pycache_prefix = os.path.join(root_dir, 'dist', '__pycache__')
netron = __import__('source')
test_data_dir = os.path.join(root_dir, 'third_party', 'test')
def _test_onnx():
file = os.path.join(test_data_dir, 'onnx', 'candy.onnx')
onnx = __import__('onnx')
model = onnx.load(file)
netron.serve(None, model, browse=True, verbosity='quiet')
def _test_onnx_list():
folder = os.path.join(test_data_dir, 'onnx')
for item in os.listdir(folder):
file = os.path.join(folder, item)
if file.endswith('.onnx') and \
item != 'super_resolution.onnx' and \
item != 'arcface-resnet100.onnx':
print(item)
onnx = __import__('onnx')
model = onnx.load(file)
address = netron.serve(file, model, verbosity='quiet')
netron.stop(address)
def _test_torchscript():
torch = __import__('torch')
torchvision = __import__('torchvision')
model = torchvision.models.resnet34(weights=torchvision.models.ResNet34_Weights.DEFAULT)
args = torch.zeros([1, 3, 224, 224])
graph, _ = torch.jit._get_trace_graph(model, args) # pylint: disable=protected-access
# graph = torch.onnx._optimize_trace(graph, torch.onnx.OperatorExportTypes.ONNX)
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/ir/ir.h
netron.serve('resnet34', graph, browse=True, verbosity='quiet')
# _test_onnx()
# _test_torchscript()
_test_onnx_list()
|
from panda3d.core import Point3, Vec3, NodePath, LineSegs, Vec4, \
CollisionTraverser, CollisionHandlerQueue, CollisionBox, CollisionNode, \
BitMask32, KeyboardButton
from .BoxTool import BoxAction, ResizeHandle
from .ToolOptions import ToolOptions
from bsp.leveleditor.selection.SelectionType import SelectionModeTransform
from bsp.leveleditor.math.Ray import Ray
from bsp.leveleditor.viewport.ViewportType import VIEWPORT_3D_MASK
from bsp.leveleditor import LEUtils, LEGlobals
from bsp.leveleditor.actions.EditObjectProperties import EditObjectProperties
from bsp.leveleditor.actions.ActionGroup import ActionGroup
from bsp.leveleditor.actions.Create import Create
from bsp.leveleditor.actions.Select import Select
from .SelectTool import SelectTool
from bsp.bspbase import BSPUtils
from PyQt5 import QtWidgets
Ready = 0
Rollover = 1
Down = 2
Global = 0
Local = 1
class TransformToolOptions(ToolOptions):
GlobalPtr = None
@staticmethod
def getGlobalPtr():
self = TransformToolOptions
if not self.GlobalPtr:
self.GlobalPtr = TransformToolOptions()
return self.GlobalPtr
def __init__(self):
ToolOptions.__init__(self)
group = QtWidgets.QGroupBox("With Respect To", self)
group.setLayout(QtWidgets.QFormLayout())
globalBtn = QtWidgets.QRadioButton("Global", group)
globalBtn.toggled.connect(self.__toggleGlobal)
group.layout().addWidget(globalBtn)
localBtn = QtWidgets.QRadioButton("Local", group)
localBtn.toggled.connect(self.__toggleLocal)
group.layout().addWidget(localBtn)
self.globalBtn = globalBtn
self.localBtn = localBtn
self.layout().addWidget(group)
def setTool(self, tool):
ToolOptions.setTool(self, tool)
if self.tool.wrtMode == Global:
self.globalBtn.setChecked(True)
elif self.tool.wrtMode == Local:
self.localBtn.setChecked(True)
def __toggleGlobal(self):
self.tool.setWrtMode(Global)
def __toggleLocal(self):
self.tool.setWrtMode(Local)
class TransformWidgetAxis(NodePath):
DotFade = True
DotRange = [0.95, 0.99]
OppositeDot = False
def __init__(self, widget, axis):
NodePath.__init__(self, "transformWidgetAxis")
self.reparentTo(widget)
self.widget = widget
vec = Vec3(0)
vec[axis] = 1.0
self.direction = vec
self.defaultColor = Vec4(vec[0], vec[1], vec[2], 1.0)
self.rolloverColor = Vec4(vec + 0.5, 1.0)
self.downColor = Vec4(vec - 0.5, 1.0)
self.lookAt(vec)
self.setTransparency(1)
self.axisIdx = axis
box = CollisionBox(*self.getClickBox())
cnode = CollisionNode("pickBox")
cnode.addSolid(box)
cnode.setIntoCollideMask(LEGlobals.ManipulatorMask)
cnode.setFromCollideMask(BitMask32.allOff())
self.pickNp = self.attachNewNode(cnode)
self.pickNp.setPythonTag("widgetAxis", self)
self.state = Ready
self.setState(Ready)
def cleanup(self):
self.widget = None
self.direction = None
self.defaultColor = None
self.rolloverColor = None
self.downColor = None
self.axisIdx = None
self.pickNp.removeNode()
self.pickNp = None
self.state = None
self.removeNode()
def update(self):
if self.DotFade:
camToAxis = self.getPos(base.render) - self.widget.vp.cam.getPos(base.render)
camToAxis.normalize()
dot = abs(camToAxis.dot(self.direction))
inRange = dot >= self.DotRange[0] if not self.OppositeDot else dot <= self.DotRange[0]
if inRange:
alpha = BSPUtils.remapVal(dot, self.DotRange[0], self.DotRange[1], 1.0, 0.0)
self.setAlphaScale(alpha)
else:
self.setAlphaScale(1)
def getClickBox(self):
return [Vec3(-1), Vec3(1)]
def setState(self, state):
if state != self.state:
self.widget.tool.doc.update3DViews()
self.state = state
if state == Ready:
self.setColorScale(self.defaultColor)
elif state == Rollover:
self.setColorScale(self.rolloverColor)
elif state == Down:
self.setColorScale(self.downColor)
class TransformWidget(NodePath):
def __init__(self, tool):
self.tool = tool
NodePath.__init__(self, "transformWidget")
self.widgetQueue = CollisionHandlerQueue()
self.widgetTrav = CollisionTraverser()
self.setLightOff(1)
self.setFogOff(1)
self.setDepthWrite(False, 1)
self.setDepthTest(False, 1)
self.setBin("unsorted", 60)
self.hide(~VIEWPORT_3D_MASK)
self.vp = None
for vp in self.tool.doc.viewportMgr.viewports:
if vp.is3D():
self.vp = vp
break
self.activeAxis = None
self.axes = {}
for axis in (0, 1, 2):
self.axes[axis] = self.createAxis(axis)
def cleanup(self):
self.tool = None
self.widgetQueue = None
self.widgetTrav = None
self.vp = None
self.activeAxis = None
for axis in self.axes.values():
axis.cleanup()
self.axes = None
self.removeNode()
def createAxis(self, axis):
return None
def setActiveAxis(self, axis):
if self.activeAxis:
self.activeAxis.setState(Ready)
if axis is None:
self.activeAxis = None
else:
self.activeAxis = self.axes[axis]
self.activeAxis.setState(Rollover)
def update(self):
distance = self.getPos(self.vp.cam).length()
self.setScale(distance / 4)
for _, axis in self.axes.items():
axis.update()
if self.tool.mouseIsDown or self.tool.doc.viewportMgr.activeViewport != self.vp:
return
self.setActiveAxis(None)
entries = self.vp.click(LEGlobals.ManipulatorMask, self.widgetQueue,
self.widgetTrav, self)
if entries and len(entries) > 0:
entry = entries[0]
axisObj = entry.getIntoNodePath().getPythonTag("widgetAxis")
self.setActiveAxis(axisObj.axisIdx)
def enable(self):
self.reparentTo(self.tool.toolRoot)
def disable(self):
self.reparentTo(NodePath())
# Base class for a tool that transforms objects.
# Inherted by MoveTool, RotateTool, and ScaleTool
class BaseTransformTool(SelectTool):
def __init__(self, mgr):
SelectTool.__init__(self, mgr)
self.hasWidgets = False
self.widget = None
self.toolRoot = self.doc.render.attachNewNode("xformToolRoot")
self.toolVisRoot = self.toolRoot.attachNewNode("xformVisRoot")
self.toolVisRoot.setTransparency(True)
self.toolVisRoot.setColorScale(1, 1, 1, 0.5, 1)
self.axis3DLines = None
self.isTransforming = False
self.xformObjects = []
self.boxOriginOffset = Vec3(0, 0, 0)
# With respect to mode. Gizmo is rotated differently
# based on this mode.
self.wrtMode = Global
self.transformStart = Point3(0)
self.preTransformStart = Point3(0)
self.transformType = SelectionModeTransform.Off
self.createWidget()
self.options = TransformToolOptions.getGlobalPtr()
def cleanup(self):
self.hasWidgets = None
self.widget.cleanup()
self.widget = None
self.toolRoot.removeNode()
self.toolRoot = None
self.toolVisRoot = None
if self.axis3DLines:
self.axis3DLines.removeNode()
self.axis3DLines = None
self.isTransforming = None
self.xformObjects = None
self.boxOriginOffset = None
self.wrtMode = None
self.transformStart = None
self.preTransformStart = None
self.transformType = None
SelectTool.cleanup(self)
def filterHandle(self, handle):
if self.isTransforming:
# Don't show handles if we're scaling in 3D
return False
return True
def onBeginTransform(self, vp):
pass
def resizeBoxDrag(self):
SelectTool.resizeBoxDrag(self)
if self.state.action == BoxAction.Resizing and base.selectionMgr.hasSelectedObjects():
if self.state.handle == ResizeHandle.Center:
boxCenter = (self.state.boxStart + self.state.boxEnd) / 2.0
self.setGizmoOrigin(boxCenter + self.boxOriginOffset)
self.onSelectedBoxResize()
def onSelectedBoxResize(self):
pass
def createWidget(self):
pass
def setBoxToSelection(self):
self.state.boxStart = base.selectionMgr.selectionMins
self.state.boxEnd = base.selectionMgr.selectionMaxs
# Calculate an offset from the center of the box to the gizmo origin
# so we can keep the box and gizmo in sync as they move.
self.boxOriginOffset = self.getGizmoOrigin() - base.selectionMgr.selectionCenter
self.state.action = BoxAction.Drawn
self.resizeBoxDone()
self.showBox()
self.showText()
def setBoxToVisRoot(self):
self.toolVisRoot.calcTightBounds(self.state.boxStart, self.state.boxEnd, self.doc.render)
self.state.action = BoxAction.Drawn
self.resizeBoxDone()
self.showBox()
self.showText()
def setWrtMode(self, mode):
self.wrtMode = mode
self.adjustGizmoAngles()
def adjustGizmoAngles(self):
if self.wrtMode == Global:
# Look forward in world space
self.setGizmoAngles(Vec3(0, 0, 0))
elif self.wrtMode == Local:
# Set the gizmo angles to the angles of the most
# recently selected object.
if base.selectionMgr.hasSelectedObjects():
numSelections = base.selectionMgr.getNumSelectedObjects()
selection = base.selectionMgr.selectedObjects[numSelections - 1]
self.setGizmoAngles(selection.getAbsAngles())
def handleSelectedObjectTransformChanged(self, entity):
# This method unfortunately gets called when we change the transform on
# the selected objects when finishing the move... changing
# the widget point while applying the final move position
# screws it up.
if not self.isTransforming:
self.calcWidgetPoint()
def selectionChanged(self):
if base.selectionMgr.hasSelectedObjects() \
and base.selectionMgr.isTransformAllowed(self.transformType):
if not self.hasWidgets:
self.enableWidget()
else:
self.calcWidgetPoint()
elif self.hasWidgets:
self.disableWidget()
self.maybeCancel()
def calcWidgetPoint(self, updateBox = True):
# Set the gizmo to the average origin of all the selected objects.
avg = Point3(0)
for obj in base.selectionMgr.selectedObjects:
avg += obj.getAbsOrigin()
avg /= len(base.selectionMgr.selectedObjects)
self.setGizmoOrigin(avg)
self.adjustGizmoAngles()
if updateBox:
self.setBoxToSelection()
def mouseDown(self):
SelectTool.mouseDown(self)
if self.widget.activeAxis:
self.widget.activeAxis.setState(Down)
vp = base.viewportMgr.activeViewport
if vp.is2D():
self.transformStart = vp.viewportToWorld(vp.getMouse(), flatten = False)
else:
self.transformStart = self.getPointOnGizmo()
self.preTransformStart = self.getGizmoOrigin()
def mouseMove(self, vp):
if vp and vp.is3D() and self.mouseIsDown and self.widget.activeAxis:
if not self.isTransforming:
self.createMoveVis()
self.isTransforming = True
self.onBeginTransform(vp)
self.onMouseMoveTransforming3D(vp)
self.doc.updateAllViews()
else:
if not self.isTransforming and self.state.action in [BoxAction.DownToResize, BoxAction.Resizing]:
self.createMoveVis()
self.isTransforming = True
self.onBeginTransform(vp)
self.onMouseMoveTransforming2D(vp)
SelectTool.mouseMove(self, vp)
def onMouseMoveTransforming2D(self, vp):
pass
def onMouseMoveTransforming3D(self, vp):
pass
def getGizmoDirection(self, axis):
quat = self.toolRoot.getQuat(NodePath())
if axis == 0:
return quat.getRight()
elif axis == 1:
return quat.getForward()
else:
return quat.getUp()
def getGizmoOrigin(self):
return self.toolRoot.getPos(NodePath())
def setGizmoOrigin(self, origin):
self.toolRoot.setPos(NodePath(), origin)
def setGizmoAngles(self, angles):
self.toolRoot.setHpr(NodePath(), angles)
def getGizmoRay(self, axis):
direction = self.getGizmoDirection(axis)
origin = self.getGizmoOrigin()
return Ray(origin, direction)
def getPointOnGizmo(self):
vp = base.viewportMgr.activeViewport
if not vp or not vp.is3D():
return
axis = self.widget.activeAxis.axisIdx
gray = self.getGizmoRay(axis)
mray = vp.getMouseRay()
# Move into world space
mray.xform(vp.cam.getMat(NodePath()))
distance = LEUtils.closestDistanceBetweenLines(gray, mray)
return gray.origin + (gray.direction * -gray.t)
def createMoveVis(self):
# Instance each selected map object to the vis root
for obj in base.selectionMgr.selectedObjects:
instRoot = NodePath("instRoot")
inst = obj.np.instanceTo(instRoot)
instRoot.wrtReparentTo(self.toolVisRoot)
self.xformObjects.append((obj, instRoot, inst))
# Show an infinite line along the axis we are moving the object
# if we are using the 3D view
if self.widget.activeAxis:
axis = self.widget.activeAxis.axisIdx
segs = LineSegs()
col = Vec4(0, 0, 0, 1)
col[axis] = 1.0
segs.setColor(col)
p = Point3(0)
p[axis] = -1000000
segs.moveTo(p)
p[axis] = 1000000
segs.drawTo(p)
self.axis3DLines = self.toolRoot.attachNewNode(segs.create())
self.axis3DLines.setLightOff(1)
self.axis3DLines.setFogOff(1)
self.widget.stash()
def destroyMoveVis(self):
for obj, instRoot, inst in self.xformObjects:
instRoot.removeNode()
self.xformObjects = []
if self.axis3DLines:
self.axis3DLines.removeNode()
self.axis3DLines = None
self.widget.unstash()
def mouseUp(self):
SelectTool.mouseUp(self)
if self.widget.activeAxis:
self.widget.activeAxis.setState(Rollover)
if self.isTransforming:
vp = base.viewportMgr.activeViewport
if vp.mouseWatcher.isButtonDown(KeyboardButton.shift()):
# Clone when shift is held
self.onFinishTransformingClone()
else:
self.onFinishTransforming()
self.onTransformDone()
self.destroyMoveVis()
base.selectionMgr.updateSelectionBounds()
self.isTransforming = False
def onTransformDone(self):
pass
def onFinishTransformingClone(self):
# Clone the selections and set them to the transform the user
# has chosen.
copies = []
actions = []
for obj, _, inst in self.xformObjects:
copy = obj.copy(base.document.idGenerator)
copy.updateProperties(self.getUpdatedProperties(obj, inst))
actions.append(Create(obj.parent.id, copy))
copies.append(copy)
actions.append(Select(copies, True))
base.actionMgr.performAction("Duplicate %i object(s)" % len(copies),
ActionGroup(actions))
def onFinishTransforming(self):
actions = []
for obj, _, inst in self.xformObjects:
action = EditObjectProperties(obj, self.getUpdatedProperties(obj, inst))
actions.append(action)
base.actionMgr.performAction("%s %i object(s)" % (self.getActionName(), len(self.xformObjects)),
ActionGroup(actions))
def getActionName(self):
return "Transform"
def getUpdatedProperties(self, obj, inst):
return {}
def update(self):
SelectTool.update(self)
if self.hasWidgets:
self.widget.update()
if self.widget.activeAxis or self.state.action in [BoxAction.ReadyToResize, BoxAction.DownToResize]:
self.suppressSelect = True
else:
self.suppressSelect = False
else:
self.suppressSelect = False
def enableWidget(self):
self.calcWidgetPoint()
self.widget.enable()
self.hasWidgets = True
def disableWidget(self):
self.widget.disable()
self.hasWidgets = False
def activate(self):
SelectTool.activate(self)
# The transform may have been changed using the object properties panel.
# Intercept this event to update our gizmo and stuff.
self.accept('selectedObjectTransformChanged', self.handleSelectedObjectTransformChanged)
# Same with bounds
self.accept('selectedObjectBoundsChanged', self.handleSelectedObjectTransformChanged)
def enable(self):
SelectTool.enable(self)
if base.selectionMgr.hasSelectedObjects() \
and base.selectionMgr.isTransformAllowed(self.transformType):
self.enableWidget()
def disable(self):
SelectTool.disable(self)
self.disableWidget()
if self.isTransforming:
self.destroyMoveVis()
self.isTransforming = False
|
import time
val = [3, 34, 4, 12, 5, 2]
#wt = [10, 20, 30]
summ = 9
n = len(val)
# by Dynamic programing
def knapsack(val,W,n):
array = [[-1 for j in range(W+1)]for i in range(n+1)]
#print("array",array)
if W == 0 or n == 0:
return 0
if array[n][W] != -1:
return array[n][W]
if val[n-1] <= W:
array[n][W] = max(val[n-1]+knapsack(val,W - val[n-1],n-1), knapsack(val,W,n-1))
return array[n][W]
else:
array[n][W] = knapsack(val,W,n-1)
return array[n][W]
start = time.time()
print(knapsack(val,summ,n))
end = time.time()
print("time taken", end - start) |
"""
# 哈希表
1. 使用链表解决哈希表的冲突
2. 使用哈希表+链表实现LRU cache查询复杂度为O(1)
哈希表(Hash table,也叫散列表),是根据关键码值(Key value)而直接进行访问的数据结构。
也就是说,它通过把关键码值映射到表中一个位置来访问记录,以加快查找的速度。
这个映射函数叫做散列函数,存放记录的数组叫做散列表。
"""
import os
import logging
logger = logging.getLogger(__name__)
class Dict(object):
"""hash table --- 类似位于字典数据结构"""
def __init__(self, num):
self.__table__ = [] # 二维列表, 第一维代表哈希表, 第二维代表链表支持解决冲突(key转为哈希表索引重复时, 将数据添加到同一个位置的链表最后)
self.num = num # 哈希表的容量
for _ in range(num):
self.__table__.append([]) # 表每个位置都对应一个链表来支持解决冲突(支持扩容)
def hash_function(self, key, num):
"""哈希函数, 负责key的映射, 将key映射为数组的索引号"""
hash_val = 0
x = key
if x < 0:
logger.info("the key is low")
return
# while x != 0:
# hash_val = (hash_val << 3) + x % 10
# x /= 10
return hash_val % num # 轮转到第几个索引号
def put(self, key, value):
"""散列表中插入数据"""
i = self.hash_function(key, self.num) % self.num # 计算索引号
for p, (k, v) in enumerate(self.__table__[i]):
if k == key:
# 当前位置已经存在元素了
self.__table__[i][p] = (key, value) # 更新已有(key, value)
return
self.__table__[i].append((key, value)) # 添加新的(key, value)
def get(self, key):
"""获取对应key的值"""
i = self.hash_function(key, self.num) % self.num # 计算索引号
for k, v in self.__table__[i]: # 获取key对应散列表的位置
if key == k:
return v
raise KeyError(key)
def keys(self):
ret = []
for table in self.__table__:
for k, v in table:
ret.append(k)
return ret
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.put(key, value)
class LRUCache(object):
"""借助哈希表和链表实现LRU"""
def __init__(self, size=3):
self.size = size
self.cache = {} # 存储元素
self.keys = [] # 记录元素访问先后顺序
def set(self, key, value):
"""插入元素"""
if key in self.cache:
self.keys.remove(key) # 删除原来的元素
self.keys.insert(0, key) # 将其添加到链头
self.cache[key] = value # 更新缓存内容
elif len(self.keys) == self.size:
# 链满
old = self.keys.pop() # 取出最后元素,最长时间不被使用的缓存
self.cache.pop(old) # 从缓存清楚此元素
self.keys.insert(0, key) # 将新的内容入缓存
self.cache[key] = value
else:
# 新内容插入链头
self.keys.insert(0, key)
self.cache[key] = value
def get(self, key):
"""获取元素"""
if key in self.cache:
self.keys.remove(key) # 将其从链表的原有位置删除
self.keys.insert(0, key) # 将其重新插入链头
return self.cache[key] # 取出元素
else:
return None # 缓存中不存在此元素
if __name__ == '__main__':
logging.basicConfig(format="[%(asctime)s %(filename)s: %(lineno)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
filename=None,
filemode="a")
obj = Dict(3)
for i in range(5):
obj[i] = i
logger.info(obj[1])
test = LRUCache()
test.set('a', 2)
test.set('b', 2)
test.set('c', 2)
test.set('d', 2)
test.set('e', 2)
test.set('f', 2)
logger.info(test.get('c')) # None
logger.info(test.get('b')) # None
logger.info(test.get('a')) # None
logger.info(test.get('e')) # 2
|
def func():
print("I imported v2 of surf")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from models import GenericTask
import json
import datetime
|
class Seller:
username: str
password: str
token: str
goodsId: str
orderId:str |
"""
У вас есть 2 компании с людьми. Одна из компаний пусть будет это global_logic была поглощена компанией toshiba.
Отобразите это в коде. Учитывайте что люди с одинаковыми именами могут быть в обеих компаниях
"""
global_logic = ['John Dow', 'John Snow', 'Arya Stark']
toshiba = ['Robin Williams', 'John Snow', 'Taylor Swift']
toshiba.extend(global_logic)
global_logic.clear() # all global_logic employees were moved into toshiba, global_logic has no employees anymore
print(f'Employees in Toshiba are: {toshiba}')
print(f'Employees in Global Logic are: {global_logic}') |
import nltk
from jiwer import wer
import sys
evaluation_data = {'ted': 'data/translated-ted/valid',
'asistent': 'data/asistent-testset/asistent_testset',
'general': 'data/translated-general/valid'}
base_translation_path = "data/translated-"
def evaluate_one(translation_path, valid_path):
hypotheses = []
references = []
list_of_references = []
joined_references = []
joined_hypotheses = []
joined_list_of_references = []
lang_pair = 'en-sl'
with open(valid_path + '.' + lang_pair.split('-')[1], 'r',
encoding='utf-8') as references_file:
with open(translation_path + '.' + lang_pair + '.' + lang_pair.split('-')[1], 'r',
encoding='utf-8') as hypotheses_file:
for r, h in zip(references_file, hypotheses_file):
list_of_references.append([r[:-1].split()])
references.append(r[:-1].split())
hypotheses.append(h[:-1].split())
joined_references.append(r[:-1])
joined_hypotheses.append(h[:-1])
joined_list_of_references.append([r[:-1]])
print('BLEU:', nltk.translate.bleu_score.corpus_bleu(list_of_references, hypotheses))
print("CHRF:", nltk.translate.chrf_score.corpus_chrf(references, hypotheses))
print("GLEU:", nltk.translate.gleu_score.corpus_gleu(list_of_references, hypotheses))
meteor_score = sum([nltk.translate.meteor_score.meteor_score(r, jh) for r, jh in
zip(joined_list_of_references, joined_hypotheses)]) / len(joined_hypotheses)
print("METEOR:", meteor_score)
print("NIST:", nltk.translate.nist_score.corpus_nist(list_of_references, hypotheses))
try:
print("RIBES:", nltk.translate.ribes_score.corpus_ribes(list_of_references, hypotheses))
except ZeroDivisionError:
print("RIBES undefined")
print("WER:", wer(joined_references, joined_hypotheses))
def evaluate_dataset(model, dataset):
print("Evaluating dataset", dataset)
translation_path = base_translation_path + dataset
if model == 'pretrained':
evaluate_one(translation_path + '/translated-pretrained', evaluation_data[dataset])
elif model == 'general':
for i in range(1, 9):
print("Evaluating epoch", i)
results_path = translation_path + '/results' + str(i)
evaluate_one(results_path + "/translated-general", results_path + "/valid")
elif model == 'domain':
for i in range(1, 4):
print("Evaluating configuration", i)
results_path = translation_path + '/results_ted_conf' + str(i)
evaluate_one(results_path + "/translated-ted", results_path + "/valid")
if __name__ == '__main__':
model = sys.argv[1]
print("Evaluating model", model)
if len(sys.argv) > 2:
if sys.argv[2]=='domain':
evaluate_dataset(model, 'ted')
else:
evaluate_dataset(model, sys.argv[2])
else:
if model == 'pretrained' or model == 'domain':
for dataset in ['asistent', 'ted']:
evaluate_dataset(model, dataset)
elif model == 'general':
for dataset in evaluation_data:
evaluate_dataset(model, dataset)
|
import xml.etree.ElementTree as ET
import os
import math
cwd = '/media/zs/wisin_linkdata/mk/shadowk/Annotations/'
# newcwd = 'newAnnotations/'
newcwd = '/media/zs/wisin_linkdata/mk/shadowk/Annotations/'
for path,d,filelist in os.walk(cwd):
for xmlname in filelist:
if xmlname.endswith('xml'):
oldname = os.path.join(path,xmlname)
# tree = ET.parse('/home/jianchao/Downloads/rBgODFuQ1s-ARPEFAVZHYGZrMS0490/Annotations/'+str(aaa)+'.xml')
tree = ET.parse(oldname)
root = tree.getroot()
for xmin in root.iter('xmin'):
xmin.text = str(math.floor(float(xmin.text)))
a=xmin.text
print(a)
for xmax in root.iter('xmax'):
xmax.text = str(math.floor(float(xmax.text)))
b=xmax.text
print(b)
for ymin in root.iter('ymin'):
ymin.text = str(math.floor(float(ymin.text)))
c = ymin.text
print(c)
for ymax in root.iter('ymax'):
ymax.text = str(math.floor(float(ymax.text)))
d = ymax.text
print(d)
# for name in root.iter('name'):
# name.text = '@'
# e=name.text
# print(e)
tree.write(newcwd + xmlname, encoding="utf-8", xml_declaration=True)
|
"""
当父类方法无法满足子类需要时,就可以重写父类方法
如何重写?
就是在子类中重新定义一个和字类同名的方法并且实现它
"""
class Dog:
def bark(self):
print("汪汪。。。")
class Xiaotianquan(Dog):
def bark(self):
print("叫的和神一样")
def fly(self):
print("i can fly。。。")
xtq = Xiaotianquan()
xtq.bark() |
from fractions import Fraction
from math import factorial
import random
import itertools
def cross(A, B):
"O conjunto de formas de concatenar os itens de A e B (produto cartesiano)"
return {a + b
for a in A for b in B
}
def combos(items, n):
"Todas as combinações de n items; cada combinação concatenada em uma string"
return {' '.join(combo)
for combo in itertools.combinations(items, n)
}
def escolha(n, c):
"Número de formas de escolher c itens de uma lista com n items"
return factorial(n) // (factorial(c) * factorial(n - c))
def P(evento, espaco):
"""A probabilidade de um evento, dado um espaço amostral de resultados equiprováveis.
evento: uma coleção de resultados, ou um predicado.
espaco: um conjunto de resultados ou a distribuicao de probabilidade na forma de pares {resultado: frequencia}.
"""
if callable(evento):
evento = tal_que(evento, espaco)
if isinstance(espaco, ProbDist):
return sum(espaco[o] for o in espaco if o in evento)
else:
return Fraction(len(evento & espaco), len(espaco))
def tal_que(predicado, espaco):
"""Os resultados no espaço amostral para os quais o predicado é verdadeiro.
Se espaco é um conjunto, retorna um subconjunto {resultado, ...}
Se espaco é ProbDist, retorna um ProbDist{resultado, frequencia}"""
if isinstance(espaco, ProbDist):
return ProbDist({o: espaco[o] for o in espaco if predicado(o)})
else:
return {o for o in espaco if predicado(o)}
class ProbDist(dict):
"Uma distribuição de probablidade; um mapeamento {resultado: probabilidade}"
def __init__(self, mapping=(), **kwargs):
self.update(mapping, **kwargs)
total = sum(self.values())
if total != 0:
for outcome in self:
self[outcome] = self[outcome]/total
assert self[outcome] >= 0
def joint(A, B, sep=''):
"""A probabilidade conjunta de duas distribuições de probabilidade independentes.
Resultado é todas as entradas da forma {a+sep+b: P(a)*P(b)}"""
return ProbDist({a + sep + b: A[a] * B[b]
for a in A
for b in B})
## predicados
def soma_eh_primo(r): return eh_primo(sum(r))
def eh_primo(n): return n > 1 and not any(n % i == 0 for i in range(2, n))
def eh_par(n): return n % 2 == 0
|
from nastran.analysis import AnalysisModel
class BCType:
def __init__(self, label, ids, desc):
self.label = label
self.ids = ids
self.desc = desc
class PanelBC:
def __init__(self, bcs, label):
self.bcs = bcs
self.label = label
def get_bc_ids(self):
return [ bc.ids for bc in self.bcs]
BCTYPES = {
'S': BCType('S', '123', 'Simply Supported'),
'C': BCType('C', '123456', 'Clamped'),
'F': BCType('F', '', 'Free'),
'V': BCType('V', '3', 'Vertical Constrained'),
}
def generate_bc_cases(labels_lst):
return { i: generate_bc_case(l) for i, l in enumerate(labels_lst) }
def generate_bc_case(labels):
"""
labels pattern -> '1234' -> 'SSCF'
------------------- ^ y
V | F | |
-> | (4) | --->
-> | | x
-> | S(1) (2)S |
-> | |
| (3) |
| C |
-------------------
"""
return PanelBC([BCTYPES[c] for c in labels], labels)
def create_spcs_and_subcases(analysis: AnalysisModel, cases, nodes, subcase_class):
for label, spc in create_spcs(analysis, cases, nodes):
sub_config = {
'LABEL': label,
'SPC': spc.conid,
}
analysis.create_subcase_from_dict(subcase_class, spc.conid, sub_config)
def create_global_case(analysis: AnalysisModel, spc):
analysis.set_global_case_from_dict(subcase_class, i, sub_config)
def create_spcs(analysis: AnalysisModel, spcs_dict, nodes):
for i, spcs in spcs_dict.items():
spc_id = analysis.idutil.get_next_sid()
for comp, nds in zip(spcs.get_bc_ids(), nodes):
if comp == '':
continue
else:
yield (spcs.label, analysis.model.add_spc1(spc_id, comp, nds, comment=spcs.label))
def create_springs(analysis, nodes):
nid = analysis.idutil.get_next_node_id()
eid = analysis.idutil.get_next_element_id()
pid = 10 # analysis.idutil.get_next_pid()
analysis.model.add_pbush(pid, [1000., 0., 0., 0., 0., 0.], [0.]*6, [0.]*6)
dvec = [[-1, 0, 0], [1, 0, 0], [0, -1, 0], [0, 1, 0]]
for nds, vec in zip(nodes, dvec):
for grid in nds:
g = analysis.model.add_grid(nid, analysis.model.nodes[grid].xyz + vec)
analysis.model.add_cbush(eid, pid, [grid, g.nid], [0., 0., 1.], None)
nid += 1
eid += 1
for k in analysis.model.spcs.keys():
analysis.model.add_spc1(k, '123456', g.nid, comment='spring fixed')
nid += 1
|
import numpy as np
import matplotlib.pyplot as plt
import sys, os
import keras
import tensorflow
from keras.models import Sequential, Model, model_from_json
from keras.layers import Input, Dense, Activation
import h5py
import random
import pprint
import pickle
import sklearn
scalerfile = 'transformer_frontend_y_imgs.sav'
transformer_y = pickle.load(open(scalerfile, 'rb'))
class SurrogateModel:
"""
Example Usage:
Load model and use a dictionary of inputs to evaluate the NN.
"""
def __init__(self, model_file = None
):
# Save init
self.model_file = model_file
# Run control
self.configure()
def __str__(self):
if self.type == "scalar":
s = f'''The inputs are: {', '.join(self.input_names)} and the outputs: {', '.join(self.output_names)}'''
elif self.type == "image":
s = f'''The inputs are: {', '.join(self.input_names)} and the output is an image of LPS.'''
return s
def configure(self):
## Open the File
with h5py.File(self.model_file, 'r') as h5:
attrs = dict(h5.attrs)
self.__dict__.update(attrs)
self.json_string = self.JSON
self.model = model_from_json(self.json_string.decode("utf-8"))
self.model.load_weights(self.model_file)
## Set basic values needed for input and output scaling
self.model_value_max = attrs['upper']
self.model_value_min = attrs['lower']
#print(self.output_scales)
#print(self.output_offsets)
if self.type == "image":
self.image_scale = self.output_scales[-1]
self.image_offset = self.output_offsets[-1]
self.output_scales = self.output_scales[:-1]
self.output_offsets = self.output_offsets[:-1]
def scale_inputs(self, input_values):
data_scaled=self.model_value_min+((input_values-self.input_offsets)*
(self.model_value_max-self.model_value_min)/self.input_scales)
return data_scaled
def scale_outputs(self, output_values):
data_scaled=self.model_value_min+((output_values-self.output_offsets)*
(self.model_value_max-self.model_value_min)/self.output_scales)
return data_scaled
def scale_image(self, image_values):
#data_scaled = 2*((image_values/self.image_scale)-self.image_offset)
data_scaled=transformer_y.transform(image_values)
return data_scaled
def unscale_image(self,image_values):
#data_scaled = (((image_values/2)+self.image_offset)*self.image_scale)
data_scaled=transformer_y.inverse_transform(image_values)
return data_scaled
def predict(self, input_values):
inputs_scaled = self.scale_inputs(input_values)
predicted_outputs = self.model.predict(inputs_scaled)
predicted_outputs_unscaled = self.unscale_outputs(predicted_outputs)
return predicted_outputs_unscaled
def predict_image(self, input_values, plotting = True):
inputs_scaled = self.scale_inputs(input_values)
predicted_outputs = self.model.predict(inputs_scaled)
predicted_outputs_limits = self.unscale_outputs(predicted_outputs[:,:self.ndim[0]])
predicted_outputs_image = self.unscale_image(predicted_outputs[:,self.ndim[0]:])
#predicted_outputs_unscaled = np.concatenate((predicted_outputs_limits, predicted_outputs_image), axis = 1)
#predicted_outputs_unscaled = predicted_outputs
return predicted_outputs_image, predicted_outputs_limits
def unscale_inputs(self, input_values):
data_unscaled=(((input_values-self.model_value_min)*
(self.input_scales)/(self.model_value_max-self.model_value_min)) + self.input_offsets)
return data_unscaled
def unscale_outputs(self, output_values):
data_unscaled=(((output_values-self.model_value_min)*
(self.output_scales)/(self.model_value_max-self.model_value_min)) + self.output_offsets)
return data_unscaled
def evaluate(self, settings):
if self.type == "image":
print('To evaluate an image NN, please use the method .evaluateImage(settings).')
output = 0
else:
vec = np.array([[settings[key] for key in self.input_ordering]])
model_output = self.predict(vec)
output = dict(zip(self.output_ordering, model_output.T))
return output
def evaluate_image(self, settings, position_scale = 10E6):
vec = np.array([[settings[key] for key in self.input_ordering]])
model_output, extent = self.predict_image(vec)
output = model_output.reshape((int(self.bins[0]),int(self.bins[1])))
return output, extent
def evaluate_image_array(self, settings, position_scale = 10e6):
vec = np.array([[settings[key] for key in self.input_ordering]])
output, extent = self.predict_image(vec)
return output, extent
def generate_random_input(self):
values = np.zeros(len(self.input_ordering))
for i in range(len(self.input_ordering)):
values[i] = random.uniform(self.input_ranges[i][0], self.input_ranges[i][1])
return dict(zip(self.input_ordering, values.T))
def random_evaluate(self):
individual = self.generate_random_input()
if self.type == "scalar":
random_eval_output = self.evaluate(individual)
else:
random_eval_output, extent = self.evaluate_image(individual)
print("Output Generated")
print(extent)
return random_eval_output
|
#encoding=UTF8
'''
典型的工厂模式,通过函数名直接调用实例
使用函数名调用函数实例用getattr()
'''
import global_setting
from conf import shell_get
def get_cpu():
data=shell_get.get_cpu()
print data
return data
def get_load():
data=shell_get.get_load()
print data
return data
|
import pandas as pd
l1 = [{'name': 'John', 'job': "teacher"},
{'name': 'Nate', 'job': "student"},
{'name': 'Fred', 'job': "developer"}]
l2 = [{'name': 'Ed', 'job': "dentist"},
{'name': 'Jack', 'job': "farmer"},
{'name': 'Ted', 'job': "designer"}]
df1 = pd.DataFrame(l1, columns=['name', 'job'])
df2 = pd.DataFrame(l2, columns=['name', 'job'])
result_row = pd.concat([df1,df2], ignore_index=True) #행 기준으로 합침, 기존 인덱스 무시
print(result_row)
print()
result_row2 = df1.append(df2, ignore_index=True) #df1 뒤에 행 기준으로 합침, 기존 인덱스 무시
print(result_row2)
print()
l3 = [{'name': 'John', 'job': "teacher"},
{'name': 'Nate', 'job': "student"},
{'name': 'Jack', 'job': "developer"}]
l4 = [{'age': 25, 'country': "U.S"},
{'age': 30, 'country': "U.K"},
{'age': 45, 'country': "Korea"}]
df1 = pd.DataFrame(l3, columns=['name', 'job'])
df2 = pd.DataFrame(l4, columns=['age', 'country'])
result_column = pd.concat([df1,df2], axis=1, ignore_index=False) #열 기준으로 합침, 기존 인덱스(컬럼이름) 무시안함
print(result_column)
print() |
__author__ = 'apple'
import os
from osgeo import ogr
daShapefile = r"/Users/apple/PycharmProjects/Shapefile/Shapefile/rice_ne-sim.shp" # Path Your Shapefile
driver = ogr.GetDriverByName('ESRI Shapefile')
dataSource = driver.Open(daShapefile, 0) # 0 means read-only. 1 means writeable.
# Check to see if shapefile is found.
if dataSource is None:
print 'Could not open %s' % (daShapefile)
else:
print 'Opened %s' % (daShapefile)
layer = dataSource.GetLayer()
featureCount = layer.GetFeatureCount()
print "Number of features in %s: %d" % (os.path.basename(daShapefile),featureCount)
layercount = dataSource.GetLayerCount()
print "Number of layer in %s: %d" % (os.path.basename(daShapefile), layercount) |
from main.activity.activity_login import *
from main.page.people.pe_people import *
from main.page.setting.pe_user import *
from main.page.setting.pe_user_notif import *
from selenium import webdriver
import time
import unittest
class TestEditPeople(unittest.TestCase):
_site = "beta"
# dictionary user
dict_user = {
"email_buyer" : "tkpd.qc+35@gmail.com",
"password_buyer" : "8delapan",
"email_seller" : "tkpd.qc+36@gmail.com",
"password_seller" : "8delapan"
}
def setUp(self):
# self.driver = webdriver.Chrome("C:\driver\chromedriver")
self.driver = webdriver.Firefox()
self.peoplePage = peoplePage(self.driver) #Set Firefoxdriver
self.driver.implicitly_wait(10)
self.login = LoginPage(self.driver)
self.header = HeaderPage(self.driver)
self.tabulasi = UserSetting(self.driver)
self.ceklist= UserNotif(self.driver)
def test_1_edit_tab_1_personal_profile(self):
print ("TEST #1 : Halaman People")
driver = self.driver
self.login.open(self._site)
self.login.do_login(self.dict_user['email_buyer'], self.dict_user['password_buyer'])
self.header.mouse_hover_to_user_bar()
self.header.mouse_hover_to_setting()
self.tabulasi.goto_notif_tab()
self.ceklist.set_notification()
time.sleep(2)
####### comment dan uncomment untuk mengganti-ganti test case di bawah ini #######
#self.peoplePage.edit_personal_profile()
#self.peoplePage.edit_photo()
# self.peoplePage.edit_password()
# self.peoplePage.action_address("add", 2)
# self.peoplePage.action_address("edit", 2)
# self.peoplePage.action_address("delete", 2)
# self.peoplePage.action_address("default", 2)
# self.peoplePage.search_address()
# self.peoplePage.choose_sorting()
# self.peoplePage.action_bank_account("add", 2)
# self.peoplePage.action_bank_account("edit", 2)
# self.peoplePage.action_bank_account("delete", 2)
# self.peoplePage.action_bank_account("default", 2)
# self.peoplePage.set_notification()
# self.peoplePage.set_privacy_settings()
time.sleep(5)
def tearDown(self):
print("Testing akan selesai dalam beberapa saat..")
time.sleep(5)
self.driver.close()
if __name__ == '__main__':
unittest.main() |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
# Functin to process_data into appropriate form.......
def process_data(X_dummy, le):
# features to drop instantly
features_to_drop = ['id', 'game', 'white', 'black', 'date', 'black_clock']
X_dummy.drop(features_to_drop, axis=1, inplace=True)
X_dummy['eco'] = pd.Series(le.transform(list(X_dummy['eco'])), index=X_dummy.index)
# map whiteiscomp and blackiscomp to binary
X_dummy['whiteiscomp'] = X_dummy['whiteiscomp'].map({False: 0,
True:1})
X_dummy['blackiscomp'] = X_dummy['blackiscomp'].map({False: 0,
True:1})
# create new feature elo_difference
X_dummy['elo_dif'] = abs(X_dummy['white_elo'] - X_dummy['black_elo'])
# fix the time control feature
time_control = X_dummy['timecontrol']
fixed_time = []
extended_time = []
for time in time_control:
time_tuple = time.split('+')
fixed_time.append(int(time_tuple[0]))
extended_time.append(int(time_tuple[1]))
X_dummy['fixed_time'] = pd.Series(fixed_time, index=X_dummy.index)
X_dummy['extended_time'] = pd.Series(extended_time, index=X_dummy.index)
# some more features to drop
X_dummy.drop(['timecontrol', 'moves', 'white_clock'], axis=1, inplace=True)
# fix the time feature
time_period = []
for period in X_dummy['time']:
time_list = period.split(':')
time_period.append((100*int(time_list[0])) + (int(time_list[1])) + (int(time_list[2])/100))
X_dummy['total_time'] = pd.Series(time_period, index=X_dummy.index)
# Drop the time feature
X_dummy.drop(['time'], axis=1, inplace=True)
return X_dummy
# map y values before fitting into model
def map_y_values(y):
y = y.map({'Black checkmated':-2, 'White resigns':3, 'White checkmated':2,
'Black resigns':-3, 'White forfeits on time':4,
'Black forfeits by disconnection':-5,
'Neither player has mating material':-7, 'Black forfeits on time':-4,
'Game drawn by repetition':1, 'Game drawn by the 50 move rule':0,
'Game drawn by mutual agreement':-1,
'White forfeits by disconnection':5,
'Black ran out of time and White has no material to mate':6,
'White ran out of time and Black has no material to mate':-6,
'Game drawn by stalemate':7})
return y
# Make changes to model here
def fit_model(X, y):
clf = RandomForestClassifier(n_estimators = 33, max_features=5, min_samples_leaf=2, oob_score=True)
clf.fit(X,y)
print('Classifier Ready')
return clf
# Invert the mapping before submission
def invert_mapping(y):
forward_map = {'Black checkmated':-2, 'White resigns':3, 'White checkmated':2,
'Black resigns':-3, 'White forfeits on time':4,
'Black forfeits by disconnection':-5,
'Neither player has mating material':-7, 'Black forfeits on time':-4,
'Game drawn by repetition':1, 'Game drawn by the 50 move rule':0,
'Game drawn by mutual agreement':-1,
'White forfeits by disconnection':5,
'Black ran out of time and White has no material to mate':6,
'White ran out of time and Black has no material to mate':-6,
'Game drawn by stalemate':7}
inv_map = {v:k for k,v in forward_map.items()}
y = y.map(inv_map)
return y
def make_submission(path, clf, le):
official_data = pd.read_csv(path)
temp = official_data.copy()
temp = process_data(temp, le)
np_results = clf.predict(temp)
results_official = pd.Series(np_results)
results_official = invert_mapping(results_official)
pd.DataFrame({"id": official_data['id'],
"commentaries": results_official}).set_index("id").to_csv('latest.csv')
if __name__ == "__main__":
df_train = pd.read_csv('raw_data/training_data.csv')
reindexed_list = ['id', 'game', 'white', 'black', 'white_elo', 'black_elo', 'white_rd',
'black_rd', 'whiteiscomp', 'blackiscomp', 'timecontrol', 'date', 'time',
'white_clock', 'black_clock', 'eco', 'plycount', 'moves',
'commentaries']
df_train = df_train.reindex(reindexed_list, axis=1)
le = preprocessing.LabelEncoder()
le.fit(list(df_train['eco']))
# df_train['eco'] = pd.Series(le.transform(list(df_train['eco'])), index=df_train.index)
# Split into testing and training set
X = df_train.iloc[:, 0:18]
y = df_train['commentaries']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05)
X_train = process_data(X_train, le)
y_train = map_y_values(y_train)
m_clf = fit_model(X_train, y_train)
# make_submission("raw_data/testing_data.csv", m_clf, le)
X_test = process_data(X_test, le)
y_test = map_y_values(y_test)
results = m_clf.predict(X_test)
print(accuracy_score(y_test, results))
|
from itertools import permutations
max = 0
trazeni = set('123456789')
for k in xrange(1, 5):
for it in permutations('123456789', k):
s = ''.join(it)
n, i = int(s), 2
while len(s) < 9: s, i = s + str(n * i), i + 1
if len(s) == 9 and set(s) == trazeni and int(s) > max: max = int(s)
print max
|
# reverse an array
# using the minus index
a = [1,2,3,4,5,6]
b = []
for i in a:
b.append(a[-i])
print b
# string array using the loop
x = ['Ankit','Jasmeet','Sandip','Ganesan','Balaji']
y = []
for i in range(len(x)):
y.append(x[len(x) - 1- i])
print y
# string array using string inbuilt function
x = ['Ankit','Jasmeet','Sandip','Ganesan','Balaji']
y = x[::-1]
print y
|
from django.shortcuts import render
from django.views.generic import ListView
from .models import SavedBooks
# Create your views here.
class SavedBooksView(ListView):
model = SavedBooks
template_name = 'saves.html'
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class CfeBindings(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('llvm@6.0.1~gold+python+shared_libs',
type='build')
extends('python')
def install(self, spec, prefix):
install_tree( '%s/python2.7/site-packages/clang' %
spec['llvm'].prefix.lib,
'%s/python2.7/site-packages/clang' %
self.prefix.lib)
install('%s/libclang.dylib' % self.spec['llvm'].prefix.lib,
'%s/libclang.dylib' % self.prefix.lib)
def setup_dependent_environment(self, spack_env, run_env, dspec):
spack_env.set('LLVM_BASE', self.prefix)
@run_after('install')
def write_scram_toolfiles(self):
pyvers = str(self.spec['python'].version).split('.')
pyver = pyvers[0] + '.' + pyvers[1]
values = {}
values['VER'] = self.spec.version
values['PFX'] = self.spec.prefix
values['LIB'] = self.spec.prefix.lib
values['PYVER'] = pyver
fname = 'pyclang.xml'
contents = str("""<tool name="pyclang" version="${VER}">
<client>
<environment name="LLVM_BASE" default="${PFX}"/>
<environment name="PYCLANG_BASE" default="${PFX}"/>
</client>
<runtime name="PYTHONPATH" value="${LIB}/python${PYVER}/site-packages" type="path"/>
<use name="python"/>
</tool>""")
write_scram_toolfile(contents, values, fname, self.spec.prefix)
|
import pyautogui
import cv2, numpy as np
from PIL import Image
import BoardSolver
topLeftLocation = pyautogui.locateCenterOnScreen("TopLeft.png")
bottomRightLocation = pyautogui.locateCenterOnScreen("BottomRight.png")
sudokuGrid = [[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]]
print("Puzzle Location: ",topLeftLocation,bottomRightLocation)
size = (300,350)
im = pyautogui.screenshot(imageFilename="my_screenshot.png",region=(topLeftLocation[0],topLeftLocation[1],bottomRightLocation[0]-topLeftLocation[0],bottomRightLocation[1]-topLeftLocation[1] + 10))
im2 = Image.open("my_screenshot.png")
im2.thumbnail(size,Image.ANTIALIAS)
im2.save("my_screenshot2.png","PNG")
imageList = ["1","2","3","4","5","6","7","8","9"]
img_rgb = cv2.imread("my_screenshot2.png")
for index, imageIndex in enumerate(imageList):
# Reads in the images into CV2 objects
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(str(imageIndex) + ".png",0)
#Gets the width and height of the template object?
w,h = template.shape[::-1]
print("Width - Height: " , w,h)
#finds the images in the main image
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.825
loc = np.where(res >= threshold)
print(loc)
print(loc[::-1])
print(*loc[::-1])
#Draws rectangle, using zip?
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb,pt,(pt[0] + w, pt[1] + h), (0,0,255), 2)
#enumerating through the xvalues of the locations
for i in range(9):
for j in range(9):
for k, location in enumerate(loc[1]):
if((location >= 6 + (33*i) and location <= 37 + (33*i)) and (loc[0][k] >= 1 + (33*j) and loc[0][k] <= 32 + (33*j))):
sudokuGrid[j][i] = index + 1
print(sudokuGrid)
cv2.imshow("output",img_rgb)
cv2.waitKey(0)
#Start input onto webpage
solvedBoard = BoardSolver.solveBoard(sudokuGrid)
for i in range(9):
try:
firstIndex = [i,sudokuGrid[i].index(0)]
break
except:
pass
print(firstIndex)
pyautogui.click((topLeftLocation[0] + 25 + (31 * firstIndex[1]),topLeftLocation[1] + 25 + (31 * firstIndex[0])))
for rowIndex, row in enumerate(sudokuGrid):
for columnIndex, column in enumerate(row):
if column != 0:
if not(rowIndex == 8 and columnIndex == 8):
pyautogui.press("tab")
else:
pyautogui.press(str(solvedBoard[rowIndex][columnIndex]))
if not(rowIndex == 8 and columnIndex == 8):
pyautogui.press("tab")
pyautogui.press("enter")
#Squares are 31 pixals long, starting at 6
#Sqaures are 31 pixals long, starting at 1 |
import pandas as pd
import numpy as np
site_id = 1058
df = pd.read_csv('/Users/coralietouati/PycharmProjects/Project1/' + str(site_id) + '_risk.csv')
df_filtered = df[(df['operating year'] == 1)]
percentile = df_filtered.ess_kW_savings.quantile(0.5)
df_filtered['Delta_percentile'] = df.apply( lambda row: row['ess_kW_savings'] - percentile, axis=1 )
data = {'name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'age': [42, 52, 36, 24, 73],
'preTestScore': [4, 24, 31, 2, 3],
'postTestScore': [25, 94, 57, 62, 70]}
df2 = pd.DataFrame(data, columns = ['name', 'age', 'preTestScore', 'postTestScore'])
# Affichage des info de la df
print('df2.shape')
df.head() # le debut de la base de données
df.tail() # la fin des données
print(df.columns)
# type de chaque colonne #
print(df.dtypes)
print(df.info())
print(df.describe(include='all'))
# Manipulation variable
print(df['ess_kW_savings'])
# autre methodes pas attentions aux espaces!
print(df.ess_kW_savings)
print(df[['iteration','ess_kW_savings']])
print(df['ess_kW_savings'].describe())
print(df['ess_kW_savings'].mean())
print(df['ess_kW_savings'].value_counts())
print(df['ess_kW_savings'][1])
print(df['ess_kW_savings'][1:3])
# Trier les donnees
print(df2.age)
print(df2['age'].sort_values())
print(df2['age'].argsort()) # donne l'index des valeurs triées
print(df2.sort_values(by='age').head())
# Itérations sur les variables
for col in df2.columns:
print(df2[col].dtype)
#Accès indicé aux données d'un DataFrame
# iloc permet d'utiliser les indices
print(df2.iloc[0,0])
print(df2['name'][0])
print(df2)
#valeur située en dernière ligne, première colonne --> utilisation de l'indiçage négatif
print(df2.iloc[-1,0])
print(df2.shape[0]) # nb de ligne
print(df2.shape[1]) # nb de colonne
print(df2.iloc[df.shape[0]-1,0]) # equivalent à la méthode -1
print(df2.iloc[0:5,:]) # ligne 0 à 5 et toutes les colonnes
print(df2.iloc[-2:,:])
print(df2.loc[-2:,:])
print(df2.iloc[0:5,[0,2]])
print(df2.iloc[0:5,0:3:2]) # idem
# difference between loc and iloc:
#loc gets rows (or columns) with particular labels from the index.
print(df2.iloc[0:3,0])
#iloc gets rows (or columns) at particular positions in the index (so it only takes integers)
#FILTRER
print(df2[( df2['age'] > 30 )])
print( df2.loc[ df2['age'] > 30 , :])
print( df2['age'] > 30 )
print( (df2['age'] > 30).value_counts())
colonnes = ['name','age']
print(df2.loc[ ( (df2.age > 30) & (df2.preTestScore >10) ), colonnes])
# filter a partir dune liste
d[d.ID.isin(ID_keep)]
# change value of some rows only
d.loc[ (d['week'] == 52) & (d['month'] == 1 ), 'week'] = 0
#groupby
d[d['week']==0].groupby(['day']).mean()
# fin the max of a column for a given row
min(d.loc[1:200,'day'])
#Calculs récapitulatifs - Croisement des variables
# Tableaux croisé dynamique
print(pd.crosstab( df2.age , df2.preTestScore))
df2['Result'] = np.where( df2.preTestScore > 10 , 'Valid', 'Fail') # voir np.select si plusieurs conditions
# add column: We can use DataFrame.apply to apply a function to all columns axis=0 (the default) or axis=1 rows.
df2['Total'] = df2.preTestScore.values + df2.postTestScore.values
type(df2.preTestScore)
# Ajout colonne
df2.loc[:, 'Tot'] = df2.preTestScore.values + df2.postTestScore.values
#Inverser la valeurs de deux colonnes
df2.loc[:,['Tot', 'preTestScore']] = df2[['preTestScore', 'Tot']].values
df2['Total'] = 0
print(df2)
# Boucle for avec condition
for i in range(0,int(df2.shape[0])):
if (df2.loc[i, 'postTestScore']-df2.loc[i, 'age']) >=0:
df2.loc[i, 'test'] = 1
else:
df2.loc[i, 'test'] = df2.loc[i, 'postTestScore']/df2.loc[i, 'age']
# Sommer une colonne
print(df2['preTestScore'].sum() )
# avec une condition, sum toute les colonnes, on peut choisir a la fin
print(df2[ df2['preTestScore'] > 10 ].sum())
print(df2[df2['preTestScore'] > 10].sum())[2]
print(df2.preTestScore[ df2['preTestScore'] > 10 ].sum())
print(df2['preTestScore'][ df2['preTestScore'] > 10 ].sum())
# Sumprod
sum(np.multiply(df2['age'],df2['age']))
# test
for i in df2.age:
print(i)
# Supprimer les colonnes NAN
d.dropna
#supprimer les lignes qui contiennent une chaine de caractaire
load = load[load['local_dt'].str.contains('2016-02-29') == False]
# open a database from an excel
pv = pd.read_excel("file.xlsx", sheetname = 'Summary of Results', skiprows=107)
# dowload
df.to_csv('excel.csv')
# Renomer un file
# import os
os.rename('solar1.csv', str(site_id) + '_solar.csv')
# dealing with nan values
d.isnull()
# Give value with condition without boucle for
d['Week']= np.where(d.Day>=5,'Weekend','Week')
# better way of giving a vlue bassed on other columns value
d.loc[d.ID == id, 'max'] = 'maxval' |
def get_size(w, h, d):
return [2 * (w * h) + 2 * (w * d) + 2 * (h * d), w * h * d]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##################################################
## This script uses vaex and dask for fast subsetting of ranges from QTLtools nominal output files.
##################################################
## Author: Heini M. Natri
## Date: Nov 15 2019
## Email: heini.natri@gmail.com
##################################################
# Importing modules
import argparse
import vaex
import pandas as pd
from datetime import datetime
import dask.dataframe as dd
import dask.array as da
from dask.multiprocessing import get
#import warnings
#warnings.filterwarnings("ignore")
colnames = ["targetID", "targetChr", "targetStart", "targetEnd", "strand", "length", "distance", "dummy", "varChr", "varStart", "varEnd", "pval", "slope", "leadSNP"]
def parse_arguments():
# Description for the help doc
parser = argparse.ArgumentParser(
description = 'This script uses vaex and dask for fast subsetting of ranges '
'from QTLtools nominal output files.')
# Adding arguments
parser.add_argument('--range_file', type=str, dest='range_file',
action='store', help='A .tsv file with the regions of '
'interest: 250kb surrounding each significant eQTL/methylQTL')
parser.add_argument('--nominal_result', type=str, dest='nominal_result',
action='store', help='Nominal summary statistics from QTLtools')
parser.add_argument('--out_file', type=str, dest='out_file',
action='store', help='Output file path')
# Convert argument strings to objects and assign them as attributes of the
# namespace. Return the populated namespace.
args = parser.parse_args()
return (args.range_file, args.nominal_result, args.out_file)
def subset_regions(chunk, start, end):
# This function subsets regions of interest from the nominal summary statistics
try:
chunk[(chunk.varStart > start)]
subsetted_regions_df = chunk[(chunk.varStart > start)]
#subsetted_regions_df
print(subsetted_regions_df.head())
try:
subsetted_regions_df[(subsetted_regions_df.varStart < end)]
subsetted_regions_df2 = subsetted_regions_df[(subsetted_regions_df.varStart < end)]
print(subsetted_regions_df2.head())
except ValueError:
print("Nothing to subset")
return "Nothing to subset"
except ValueError:
print ("Nothing to subset")
return "Nothing to subset"
return subsetted_regions_df2
def run_subsetting(range_file, nominal_result, out_file):
# Reading data
print ("Opening ranges")
ranges = pd.read_csv(range_file, delimiter="\t", names=["chr", "pos", "start", "end"])
#print(ranges.head())
# Converting to dask df
ranges = dd.from_pandas(ranges, npartitions=24)
#print ("Opening methylQTL summary statistics as chunks")
#meqtl_nom_chr22 = pd.read_csv("/Volumes/hnatri/Indonesian/result_30peer_3gt/methylQTL_n115_29PEER_5gtpcs_nominal1_22.txt", delimiter=" ", chunksize=100000)
print ("Opening summary statistics as chunks")
qtl_nom = pd.read_csv(nominal_result, sep=' ', names=colnames, chunksize=100000)
# An empty df for all subsetted regions
#methyl_all_subsetted_regions_df = pd.DataFrame(columns=meqtl_nom_chr22.columns)
all_subsetted_regions_pd = pd.DataFrame(columns=qtl_nom.get_chunk(1).columns)
all_subsetted_regions_dd = dd.from_pandas(all_subsetted_regions_pd, npartitions=24)
chunkN = 1
# Iterating over chunks of nominal summary statistics, calling a function to subset regions of interest, appending to the final dataframe
for chunk in qtl_nom:
# Converting to a vaex df
chunk = vaex.from_pandas(chunk)
print ("Processing chunk " + str(chunkN))
print (datetime.now(tz=None))
#print(chunk.head())
chunkN = chunkN+1
for i in ranges.index:
start = ranges.loc[i, "start"].compute()[i]
end = ranges.loc[i, "end"].compute()[i]
# Small df of the subsetted region
subsetted_regions_df = subset_regions(chunk, start, end)
if subsetted_regions_df == "Nothing to subset":
continue
else:
# Converting to pandas df, converting to dask, concatenating to the large result df
print ("Converting to pandas df, converting to dask, concatenating to the large result df")
#print(subsetted_regions_df.head())
subsetted_regions_pd = subsetted_regions_df.to_pandas_df()
#print(subsetted_regions_pd.head())
subsetted_regions_dd = dd.from_pandas(subsetted_regions_pd, npartitions=24)
#print(subsetted_regions_dd.head())
all_subsetted_regions_dd = dd.concat([all_subsetted_regions_dd, subsetted_regions_dd])
print("Printing the concatenated dd")
print(all_subsetted_regions_dd.head(npartitions=-1))
print ("Done")
# Converting the dask dataframe to pandas
all_subsetted_regions_final_pd = all_subsetted_regions_dd.compute()
# Saving to a file
all_subsetted_regions_final_pd_nodup = all_subsetted_regions_final_pd.drop_duplicates()
all_subsetted_regions_final_pd_nodup.to_csv(out_file, index=False, header=False)
def main():
(range_file, nominal_result, out_file) = parse_arguments()
run_subsetting(range_file, nominal_result, out_file)
if __name__ == "__main__":
main()
|
# Generated by Django 2.2.6 on 2019-12-01 22:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0025_auto_20191201_2141'),
]
operations = [
migrations.AlterField(
model_name='routine',
name='day',
field=models.IntegerField(),
),
]
|
# -*- coding: utf-8 -*-
class Solution:
def countPoints(self, rings: str) -> int:
red, green, blue = set(), set(), set()
for i in range(0, len(rings), 2):
color, rod = rings[i], int(rings[i + 1])
if color == "R":
red.add(rod)
elif color == "G":
green.add(rod)
elif color == "B":
blue.add(rod)
return len(red & green & blue)
if __name__ == "__main__":
solution = Solution()
assert 1 == solution.countPoints("B0B6G0R6R0R6G9")
assert 1 == solution.countPoints("B0R0G0R9R0B0G0")
assert 0 == solution.countPoints("G4")
|
#Name: Eoin Stankard
#Date: 24/04/2019
#Description: Project on the Iris Data Set
#******************************************************************************
#References:
# https://gist.github.com/curran/a08a1080b88344b0c8a7
# https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html
# https://stackoverflow.com/questions/16503560/read-specific-columns-from-a-csv-file-with-csv-module
# http://www.learningaboutelectronics.com/Articles/How-to-plot-a-graph-with-matplotlib-from-data-from-a-CSV-file-using-the-CSV-module-in-Python.php
# https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html
# https://jakevdp.github.io/PythonDataScienceHandbook/04.06-customizing-legends.html
#
#******************************************************************************
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
Flower = ["setosa","versicolor","virginica"]
#******************************************************************************
#Gets the maximum value for chosen species Sepal/Petals width/length
#
# maxNum(c,f,opt)
# c = Choice of 'sepal' or 'Petal' from "flowerFunct" function
# f = Species of flower
# opt = Data to be retrieved 'Sepal/Petal Width/Length'
#******************************************************************************
def maxNum(c,f,opt):
readfile(c,opt,"max",f)
#******************************************************************************
#Gets the minimum value for chosen species Sepal/Petals width/length
#
# minNum(c,f,opt)
# c = Choice of 'sepal' or 'Petal' from "flowerFunct" function
# f = Species of flower
# opt = Data to be retrieved 'Sepal/Petal Width/Length'
#******************************************************************************
def minNum(c,f,opt):
readfile(c,opt,"min",f)
#******************************************************************************
#Calculates the average species Sepal/Petal Width/Length
#
# avgNum(c,f,opt)
# c = Choice of 'sepal' or 'Petal' from "flowerFunct" function
# f = Species of flower
# opt = Data to be retrieved 'Sepal/Petal Width/Length'
#******************************************************************************
def avgNum(c,f,opt):
readfile(c,opt,"avg",f)
#******************************************************************************
#This function opens a ready only copy of the irisDataSet.csv file and then
#depending on what options were picked in the previous steps it will get the
#data
#
# readfile(c,opt,x,s)
# c = Choice of 'sepal' or 'Petal'
# opt = Data to be retrieved 'Sepal/Petal Width/Length'
# x = Variable to check for data minimum ,maximum or average
# s = String for species of flower
#******************************************************************************
def readfile(c,opt,x,s):
with open("iris.csv",'r') as df:
reader = csv.reader(df, delimiter=',')
minAns = 10
maxAns = 0
avgAns = 0
count = 0
if (c is 's') and (opt == 1) or (c is 's') and (opt == 2) or (c is 's') and (opt == 3):col = 0
elif (c is 's') and (opt == 4) or (c is 's') and (opt == 5) or (c is 's') and (opt == 6):col = 1
elif (c is 'p') and (opt == 1) or (c is 'p') and (opt == 2) or (c is 'p') and (opt == 3):col = 2
elif (c is 'p') and (opt == 4) or (c is 'p') and (opt == 5) or (c is 'p') and (opt == 6):col = 3
else:print("Error")
if x == "min": #If the user requests the minimum
for row in reader:
if s in row:
if float(row[col]) <minAns:
minAns = float(row[col])
else:continue
if col ==0 and (c is 's'):print(f"{s.capitalize()} minimum sepal length is {minAns}")
elif col ==1 and (c is 's'):print(f"{s.capitalize()} minimum sepal width is {minAns}")
elif col ==2 and (c is 'p'):print(f"{s.capitalize()} minimum petal length is {minAns}")
elif col ==3 and (c is 'p'):print(f"{s.capitalize()} minimum petal width is {minAns}")
else: print("Error")
elif x == "max":#If the user requests the maximum
for row in reader:
if s in row:
if float(row[col]) >maxAns:
maxAns = float(row[col])
else:continue
else:continue
if col ==0 and (c is 's'):print(f"{s.capitalize()} maximum sepal length is {maxAns}")
elif col ==1 and (c is 's'):print(f"{s.capitalize()} maximum sepal width is {maxAns}")
elif col ==2 and (c is 'p'):print(f"{s.capitalize()} maximum petal length is {maxAns}")
elif col ==3 and (c is 'p'):print(f"{s.capitalize()} maximum petal width is {maxAns}")
else:print("Error")
elif x == "avg":#If the user requests the Average
for row in reader:
if s in row:
count = count + 1
avgAns = avgAns + float(row[col])
else:continue
avgAns = avgAns/count
if col ==0 and (c is 's'):print(f"{s.capitalize()} average sepal length is {avgAns}")
elif col ==1 and (c is 's'):print(f"{s.capitalize()} average sepal width is {avgAns}")
elif col ==2 and (c is 'p'):print(f"{s.capitalize()} average petal length is {avgAns}")
elif col ==3 and (c is 'p'):print(f"{s.capitalize()} average petal width is {avgAns}")
else:print("Error")
#******************************************************************************
#This function is called after the user picks a species of flower,
#This will give the user two new menus
#One to choose if the would live Sepal or Petal Data
#The Second to give data options for what was selected in the first menu
#
# flowerFunct(F)
# F = Chosen species of Flower
#******************************************************************************
def flowerFunct(F):
os.system('cls')
print(f"Options for {F.capitalize()}")
print("1. Sepal Data\n2. Petal Data")
print("3. Home\n")
sp = input("Choice: ")
if int(sp)==1:
os.system('cls')
print(f"Options for {F.capitalize()}")
print("1. Max Sepal Length\n2. Min Sepal Length\n3. Avg Sepal Length")
print("4. Max Sepal Width\n5. Min Sepal Width\n6. Avg Sepal Width")
print("7. Home\n")
option = input("Choice: ")
if int(option)==1:maxNum("s",F,int(option))
elif int(option)==2:minNum("s",F,int(option))
elif int(option)==3:avgNum("s",F,int(option))
elif int(option)==4:maxNum("s",F,int(option))
elif int(option)==5:minNum("s",F,int(option))
elif int(option)==6:avgNum("s",F,int(option))
elif int(option)==7:Init()
else:print("Error")
elif int(sp)==2:
os.system('cls')
print(f"Options for {F.capitalize()}")
print("1. Max Petal Length\n2. Min Petal Length\n3. Avg Petal Length")
print("4. Max Petal Width\n5. Min Petal Width\n6. Avg Petal Width")
print("7. Home\n")
option = input("Choice: ")
if int(option)==1:maxNum("p",F,int(option))
elif int(option)==2:minNum("p",F,int(option))
elif int(option)==3:avgNum("p",F,int(option))
elif int(option)==4:maxNum("p",F,int(option))
elif int(option)==5:minNum("p",F,int(option))
elif int(option)==6:avgNum("p",F,int(option))
elif int(option)==7:Init()
else:print("Error")
elif int (sp)==3:Init()
else:print("Incorrect input")
#******************************************************************************
#This function is called and will plot a graph with all species data for
#either the species sepal or data
#******************************************************************************
def plotData():
os.system('cls')
x,x1,x2=[],[],[]
y,y1,y2=[],[],[]
print("1. Plot Sepal data for all species\n2. Plot Petal Data for all species\n3. Home")
choice = input("Choice: ")
if int(choice)==1:
plotName = "Sepal"
rowX = 1
rowY = 0
elif int(choice)==2:
plotName = "Petal"
rowX = 3
rowY = 2
elif int(choice)==3:
Init()
else:
print("Incorrect input given")
with open("iris.csv",'r') as df:
reader = csv.reader(df, delimiter=',')
for row in reader:
if "setosa" in row:
x.append(float(row[rowX]))
y.append(float(row[rowY]))
elif "virginica" in row:
x1.append(float(row[rowX]))
y1.append(float(row[rowY]))
elif "versicolor" in row:
x2.append(float(row[rowX]))
y2.append(float(row[rowY]))
else:
continue
plt.plot(x,y, "or")
plt.plot(x1,y1, "ob")
plt.plot(x2,y2, "og")
plt.legend(('Setosa','Virginica','Versicolor'))
plt.title(f'Plotting all species {plotName} data')
plt.xlabel(f'{plotName} Length')
plt.ylabel(f'{plotName} Width')
plt.show()
#******************************************************************************
#On Running the script this is where the program will start.
#It asks the user for choice species that they would like data on
#After the user picks the species of flower that they would like data on
#the flowerFunction will be called giving the user multiple options
#
#******************************************************************************
def Init():
os.system('cls')
print("Please Select Iris Species")
print("1. Setosa\n2. Versicolor\n3. Virginica\n4. Plot All\n5. Exit")
choice = input("Choice: ")
if int(choice)==1 or int(choice)==2 or int(choice)==3:flowerFunct(Flower[int(choice)-1])
elif int(choice)==4:plotData()
elif int(choice)==5:print("Exit")
else:print("Incorrect input value")
#******************************************************************************
#
#
#******************************************************************************
try:
Init()
except:
print("Exception, Incorrect input given") |
import pylab as pl
import numpy as np
from scipy import ndimage
from scipy.stats import multivariate_normal
import sys
#img2 = pl.imread("converse2.jpg")
#img2 = pl.imread("obraz.png")
img2 = pl.imread(sys.argv[1])
s = img2.shape
print ("Min: {}, max: {}".format(np.min(img2),np.max(img2)))
for i in range(0, s[0]):
print("\rRow: {}".format(i), end=' ')
for j in range(0, s[1]):
img2[i,j] = 32.0*round(256.0*img2[i,j]/32.0)/256.0
img = img2
pl.imsave("polprodukt.png", img, cmap=pl.gray())
pl.imsave("sobe.png", ndimage.sobel(img), cmap=pl.gray())
kX = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
kY = np.transpose(kX)
imgX = ndimage.convolve(img, kX)
imgY = ndimage.convolve(img, kY)
imgX2 = imgX * imgX
imgY2 = imgY * imgY
imgXY = imgX * imgY
var = multivariate_normal(mean=[0,0], cov=[[2,0],[0,2]])
g = np.zeros((9,9), dtype=float)
for i in range(-4, 5):
for j in range(-4, 5):
g[i+4,j+4] = var.pdf([i,j])
imgX2g = ndimage.convolve(imgX2,g)
imgY2g = ndimage.convolve(imgY2,g)
imgXYg = ndimage.convolve(imgXY,g)
k = 0.05
cim = (imgX2g * imgY2g - imgXYg**2) - k*(imgX2g + imgY2g)**2
pl.imsave(sys.argv[2],cim, cmap=pl.gray())
iMin = np.min(cim)
iMax = np.max(cim)
print("min: {}, max: {}".format(iMin,iMax))
for i in range(0, s[0]):
for j in range(0, s[1]):
cim[i,j] = (cim[i,j]-iMin)/(iMax - iMin)
iMin = np.min(cim)
iMax = np.max(cim)
print("min: {}, max: {}".format(iMin,iMax))
pl.imsave(sys.argv[3],cim, cmap=pl.gray())
result = cim > 0.4
pl.imshow(result, cmap=pl.gray())
pl.show()
#pl.imshow(imgR, cmap=pl.gray())
#pl.show()
#pl.imshow(cim, cmap=pl.gray())
#pl.show()
# pl.imshow(imgXY, cmap=pl.gray())
# pl.show()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
import sys
class UnicodePython2e3(object):
if sys.version_info[0] >= 3: # Python 3
def __str__(self):
return self.__unicode__()
else: # Python 2
def __str__(self):
return self.__unicode__().encode('utf8')
class Pasta(models.Model, UnicodePython2e3):
titulo = models.CharField(max_length=200)
subtitulo = models.CharField(max_length=200)
imagem = models.FileField(upload_to='uploads/%Y/%m/%d/')
def __unicode__(self):
return self.titulo
class Arquivo(models.Model, UnicodePython2e3):
titulo = models.CharField(max_length=200)
arquivo = models.FileField(upload_to='uploads/%Y/%m/%d/')
pasta = models.ForeignKey(Pasta)
def __unicode__(self):
return self.titulo
|
'''
zhuliwen: liwenzhu@pku.edu.cn
October 24,2019
ref: https://github.com/weiaicunzai/pytorch-cifar100
'''
from AI_homework_1 import *
import torch
def test(pth_file):
net = ResNet(BasicBlock, [2, 2, 2, 2])
net.load_state_dict(torch.load(pth_file))
net.cuda()
print(net)
net.eval()
correct_1 = 0.0
correct_5 = 0.0
test_loss = 0.0
for n_iter, (image, label) in enumerate(cifar10_test_loader):
print("iteration: {}\ttotal {} iterations".format(n_iter + 1, len(cifar10_test_loader)))
image = Variable(image).cuda()
label = Variable(label).cuda()
output = net(image)
loss_function = nn.CrossEntropyLoss()
loss = loss_function(output, label)
test_loss += loss.item()
_, pred = output.topk(5, 1, largest=True, sorted=True)
label = label.view(label.size(0), -1).expand_as(pred)
correct = pred.eq(label).float()
# compute top 5
correct_5 += correct[:, :5].sum()
# compute top1
correct_1 += correct[:, :1].sum()
print()
print("Top 1 Accuracy: ", correct_1 / len(cifar10_test_loader.dataset))
print("Top 5 Accuracy: ", correct_5 / len(cifar10_test_loader.dataset))
print("Average Loss:", test_loss / len(cifar10_test_loader.dataset))
print("Parameter numbers: {}".format(sum(p.numel() for p in net.parameters())))
if __name__ == '__main__':
cifar10_test_loader = get_test_dataloader(
settings.CIFAR10_TEST_MEAN,
settings.CIFAR10_TEST_STD,
num_workers=2,
batch_size=128,
shuffle=True
)
pth_file = 'resnet18-25-best.pth'
test(pth_file)
|
#Leap Year
def is_leap_year(year):
if(year % 4 == 0 and (year % 400 == 0 or year % 100 != 0)):
return True
else:
return False
is_leap_year(1996)
is_leap_year(1900)
is_leap_year(2444)
|
import requests
from tqdm import tqdm
# GET /delete {"id":'int', "modifier":'str'}
# GET /update {"note_id":'int', "note":'str', "modifier":'str'}
# GET /insert {"note_id":'int', "note":'str', "modifier":'str'}
# GET /notes {"id":'int'}
# GET /search {"database_search":'str'} [database_search, column_search, table_search, notes_search]
# GET /search page_number | itemsPerPage (values == items)
# Insert -> Update -> search -> delete
|
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
#'idkey:STRING, '
#'fecha:DATETIME, '
'id_negociador:STRING, '
'nombre_negociador:STRING, '
'id_grabador:STRING, '
'id_team_leader:STRING, '
'nombre_team_leader:STRING, '
'id_ejecutivo:STRING, '
'nombre_ejecutivo:STRING, '
'id_gerente:STRING, '
'nombre_gerente:STRING, '
'suboperacion:STRING, '
'ciudad:STRING, '
'operacion:STRING, '
'nombre_producto:STRING, '
'meta_gestiones_hora:STRING '
)
# ?
class formatearData(beam.DoFn):
def process(self, element):
# print(element)
arrayCSV = element.split('|')
tupla= {#'idkey' : str(uuid.uuid4()),
#'fecha' : datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), #datetime.datetime.today().strftime('%Y-%m-%d'),
'id_negociador': arrayCSV[0],
'nombre_negociador': arrayCSV[1],
'id_grabador': arrayCSV[2],
'id_team_leader': arrayCSV[3],
'nombre_team_leader': arrayCSV[4],
'id_ejecutivo': arrayCSV[5],
'nombre_ejecutivo': arrayCSV[6],
'id_gerente': arrayCSV[7],
'nombre_gerente': arrayCSV[8],
'suboperacion': arrayCSV[9],
'ciudad': arrayCSV[10],
'operacion': arrayCSV[11],
'nombre_producto': arrayCSV[12],
'meta_gestiones_hora': arrayCSV[13]
}
return [tupla]
def run():
gcs_path = "gs://ct-bancolombia" #Definicion de la raiz del bucket
gcs_project = "contento-bi"
pipeline = beam.Pipeline(runner="DirectRunner")
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT")
lines = pipeline | 'Lectura de Archivo' >> ReadFromText("archivos/ASIGNACION_NEGOCIADORES_CONTENTO.csv", skip_header_lines=1)
transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData()))
# lines | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='')
transformed | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_negociadores", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/prejuridico/info_carga_banco_prej",file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escritura a BigQuery Bancolombia' >> beam.io.WriteToBigQuery(
# gcs_project + ":bancolombia_admin.prejuridico",
# schema=TABLE_SCHEMA,
# create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
# write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
# )
# transformed | 'Borrar Archivo' >> FileSystems.delete('gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT')
# 'Eliminar' >> FileSystems.delete (["archivos/Info_carga_avon.1.txt"])
jobObject = pipeline.run()
# jobID = jobObject.job_id()
return ("Corrio sin problema")
|
"""If you run a 10 kilometer race in 43 minutes 30 seconds, what is your average time per mile? What
is your average speed in miles per hour? (Hint: there are 1.61 kilometers in a mile).
"""
km=float(input("Enter the kilometer: "))
time=float(input("enter the time in decimal: "))
op=((km/1.61)/(time/60))
print("the average speed in mph is: ",op)
|
# Copyright (c) 2020. Yul HR Kang. hk2699 at caa dot columbia dot edu.
import numpy as np
from scipy.io import loadmat
import h5py
import os
import pprint
import pandas as pd
#%%
from lib.pylabyk import zipPickle as zpkl
from lib.pylabyk.matlab2py import unpackarray, structlist2df
from lib.pylabyk.np2 import dict_shapes
#%%
def asc2str(v):
return np.squeeze(np.array(v, dtype=np.uint8)).tostring().decode('ascii')
#%%
def load_eye_all():
#%% Load data
print(os.getcwd())
pth = '../Data_2D/sTr/'
nam = 'sTr_eye_all_subj_parad_py'
file_full = pth + nam + '.mat'
# fit0 = loadmat(file_full, struct_as_record=False)
f = h5py.File(file_full, 'r')
print('Loaded ' + file_full)
#%%
d = {k:np.squeeze(np.array(f[k])) for k in f.keys() if k[0] != '#'}
#%%
pprint.pprint({k:d[k] for k in d.keys()})
#%%
for k in d.keys():
if d[k].dtype == np.dtype('O'):
print('Converting ' + k)
d[k] = [np.squeeze(np.array(f[v])) for v in d[k]] # keep as lists
print('Conversion done!')
#%%
return d
#%% Saving to zpkl takes a very long time
# pkl_file = pth + nam + '.zpkl'
# zpkl.save(d, pkl_file)
# print('Saved to ' + pkl_file)
#
# #%% Test
# dat = zpkl.load(pkl_file)
# dict_shapes(dat)
#%%
|
# -*- coding: utf-8 -*-
"""
Move data from ingestion to production
"""
#################
# IMPORTS
from b2stage.apis.commons.cluster import ClusterContainerEndpoint
# from b2stage.apis.commons.endpoint import EudatEndpoint
from b2stage.apis.commons.b2handle import B2HandleEndpoint
# from restapi.rest.definition import EndpointResource
from b2stage.apis.commons.seadatacloud import \
Metadata as md, ImportManagerAPI as API
from utilities import htmlcodes as hcodes
from restapi import decorators as decorate
from restapi.flask_ext.flask_irods.client import IrodsException
# from restapi.services.detect import detector
from utilities.logs import get_logger
log = get_logger(__name__)
#################
# REST CLASS
# class Approve(EndpointResource):
class MoveToProductionEndpoint(B2HandleEndpoint, ClusterContainerEndpoint):
# def get(self, batch_id):
# log.info("Received a test HTTP request")
# # self.get_input()
# # log.pp(self._args, prefix_line='Parsed args')
# response = 'Dummy method'
# return self.force_response(response)
def pid_production(self, imain, batch_id, data):
# TODO: am I using the metadata of the zip file?
temp_id = data.get(md.tid)
# ################
# # copy file from ingestion to production
# src_path = self.src_paths.get(temp_id)
# log.warning("TESTING: %s (%s)", temp_id, src_path)
dest_path = self.complete_path(self.prod_path, temp_id)
if not imain.is_dataobject(dest_path):
log.error("Missing: %s", dest_path)
return None
# log.info("Production file path: %s", dest_path)
# imain.icopy(src_path, dest_path)
################
# irule to get PID
pid = self.pid_request(imain, dest_path)
log.info("Received PID: %s", pid)
# ################
# PID CHCECKING in IRODS METADATA (not wise for performances)
# # irods metadata to check the PID
# metadata, _ = imain.get_metadata(dest_path)
# try:
# metadata_pid = metadata.pop('PID').strip()
# except KeyError:
# error = 'Unable to generate PID: %s/%s' % (batch_id, temp_id)
# return self.send_errors(error, code=hcodes.HTTP_SERVER_ERROR)
# else:
# if pid == metadata_pid:
# log.info("Confirmed PID: %s", pid)
# else:
# log.warning("PID unconfirmed?\n%s vs %s", pid, metadata_pid)
# ################
# # DEBUG extra metadata?
# for key, value in metadata.items():
# if not key.lower().startswith('eudat'):
# print("Metadata:", key, value)
# self.eudat_pid_fields
# ################
# Verify PID (b2handle)
# TODO: re-enable, but use 'retry' python lib:
# http://tenacity.readthedocs.io/en/latest/#examples
# b2handle_output = None
# counter = 0
# while b2handle_output is None and counter < 5:
# counter += 1
# log.debug("b2handle pid test: n.%d" % counter)
# import time
# time.sleep(1)
# b2handle_output = self.check_pid_content(pid)
# if b2handle_output is None:
# error = 'PID unverified: %s/%s = %s' % (batch_id, temp_id, pid)
# return self.send_errors(error, code=hcodes.HTTP_SERVER_ERROR)
# else:
# log.verbose("PID verified (b2handle): %s", pid)
# log.pp(b2handle_output)
# ################
# # set metadata (with a prefix?)
# metadata, _ = imain.get_metadata(dest_path)
# log.pp(metadata)
# # setting = False
# for key in md.keys:
# if key not in metadata:
# value = data.get(key)
# args = {'path': dest_path, key: value}
# imain.set_metadata(**args)
# # setting = True
# # if setting:
# # log.debug("Some metadata is set")
# ################
# # ALL DONE: move file from ingestion to trash
# imain.remove(src_path)
# log.info("Source removed: %s", src_path)
return pid
def copy_to_production(self, icom, batch_id, files):
# Copy files from the B2HOST environment
rancher = self.get_or_create_handle()
batch_dir = self.get_ingestion_path()
b2safe_connvar = {
'BATCH_SRC_PATH': batch_dir,
'BATCH_DEST_PATH': self.prod_path,
'FILES': ' '.join(files),
'IRODS_HOST': icom.variables.get('host'),
'IRODS_PORT': icom.variables.get('port'),
'IRODS_ZONE_NAME': icom.variables.get('zone'),
'IRODS_USER_NAME': icom.variables.get('user'),
'IRODS_PASSWORD': icom.variables.get('password'),
}
# log.pp(b2safe_connvar)
# Launch a container to copy the data into B2HOST
cname = 'copy_zip'
cversion = '0.8'
image_tag = '%s:%s' % (cname, cversion)
container_name = self.get_container_name(batch_id, image_tag)
# print(container_name)
docker_image_name = self.get_container_image(image_tag, prefix='eudat')
log.info("Request copy2prod; image: %s" % docker_image_name)
# remove if exists
rancher.remove_container_by_name(container_name)
# launch
rancher.run(
container_name=container_name, image_name=docker_image_name,
private=True,
wait_stopped=True,
extras={
'environment': b2safe_connvar,
'dataVolumes': [self.mount_batch_volume(batch_id)],
},
)
# errors = rancher.run(
# log.pp(errors)
@decorate.catch_error(exception=IrodsException, exception_label='B2SAFE')
def post(self, batch_id):
# TODO: switch to list of approved files
################
# 0. check parameters
json_input = self.get_input()
# log.pp(self._args, prefix_line='Parsed args')
param_key = 'parameters'
params = json_input.get(param_key, {})
if len(params) < 1:
return self.send_errors(
"'%s' is empty" % param_key, code=hcodes.HTTP_BAD_REQUEST)
key = 'pids'
files = params.get(key, {})
if len(files) < 1:
return self.send_errors(
"'%s' parameter is empty list" % key,
code=hcodes.HTTP_BAD_REQUEST)
filenames = []
for data in files:
if not isinstance(data, dict):
return self.send_errors(
"File list contains at least one wrong entry",
code=hcodes.HTTP_BAD_REQUEST)
# print("TEST", data)
for key in md.keys: # + [md.tid]:
value = data.get(key)
error = None
if value is None:
error = 'Missing parameter: %s' % key
else:
value_len = len(value)
if value_len > md.max_size:
error = "Param '%s': exceeds size %s" % (key, md.max_size)
if value_len < 1:
error = "Param '%s': empty" % key
if error is not None:
return self.send_errors(
error, code=hcodes.HTTP_BAD_REQUEST)
filenames.append(data.get(md.tid))
################
# 1. check if irods path exists
imain = self.get_service_instance(service_name='irods')
self.batch_path = self.get_batch_path(imain, batch_id)
log.debug("Batch path: %s", self.batch_path)
if not imain.is_collection(self.batch_path):
return self.send_errors(
"Batch '%s' not enabled (or no permissions)" % batch_id,
code=hcodes.HTTP_BAD_REQUEST)
################
# 2. make batch_id directory in production if not existing
self.prod_path = self.get_production_path(imain, batch_id)
log.debug("Production path: %s", self.prod_path)
obj = self.init_endpoint()
imain.create_collection_inheritable(self.prod_path, obj.username)
################
# 3. copy files from containers to production
self.copy_to_production(obj.icommands, batch_id, filenames)
# ################
# # 4. check on list of files
# self.src_paths = {}
# for filename in filenames:
# src_path = self.complete_path(self.batch_path, filename)
# log.info("File path: %s", src_path)
# self.src_paths[filename] = src_path
# if not imain.is_dataobject(src_path):
# return self.send_errors(
# "File '%s' not in batch '%s'" % (filename, batch_id),
# code=hcodes.HTTP_BAD_REQUEST)
################
# 4. Request PIDs
out_data = []
for data in files:
# pid, error = self.pid_production(imain, batch_id, data)
pid = self.pid_production(imain, batch_id, data)
if pid is None:
log.error("Error: %s", error)
else:
log.info("Obtained: %s", pid)
data['pid'] = pid
out_data.append(data)
# NOTE: I could set here the pids as metadata in prod collection
################
# TODO: set expiration metadata on batch zip file?
pass
################
json_input[param_key] = out_data
# call Import manager to notify
api = API()
api.post(json_input)
return json_input
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import argparse
import rospy
import mavros
import time
import math
from tf.transformations import quaternion_from_euler
from tf.transformations import euler_from_quaternion
import tf
from sensor_msgs.msg import Joy
from std_msgs.msg import Header, Float64
from geometry_msgs.msg import PoseStamped, TwistStamped, Vector3, Quaternion, Point, PointStamped
from mavros_msgs.msg import OverrideRCIn
from mavros import setpoint as SP
from mavros_msgs.srv import CommandBool
from mavros_msgs.srv import SetMode
from mavros_msgs.srv import CommandTOL
from mavros_msgs.srv import StreamRate, StreamRateRequest
sp_timer = None
sp_pose = None
mode_service = None
arm_service = None
takeoff_service = None
uav1_pose = None
uav2_pose = None
curr_joy = None
joy_timer = None
axes_map = {
'roll': 0,
'pitch': 1,
'yaw': 3,
'throttle': 4
}
axes_scale = {
# roll should be reversed for xbox controller
'roll': -1.5,
'pitch': 1.5,
'yaw': 0.1,
'throttle': 0.2
}
button_map = {
'A' : 0,
'B' : 1,
'back': 6,
'start': 7
}
def takeoff():
# Mode Guided
print(mode_service(custom_mode="4"))
# Arm
print(arm_service(True))
# Takeoff 10
print(takeoff_service(altitude=10))
def pose1_callback(pose):
global uav1_pose
if uav1_pose is None:
print('[ INFO] uav1/mavros/local_position/pose is subscribed.')
uav1_pose = pose
def pose2_callback(pose):
global uav2_pose
if uav2_pose is None:
print('[ INFO] uav1/mavros/local_position/pose is subscribed.')
uav2_pose = pose
def get_axis(j, n):
return j.axes[axes_map[n]] * axes_scale[n]
def get_buttons(j, n):
return j.buttons[ button_map[n]]
def joy_timer_callback(event):
global uav1_pose, uav2_pose, curr_joy, sp_pose
roll = get_axis(curr_joy, 'roll')
pitch = get_axis(curr_joy, 'pitch')
yaw = get_axis(curr_joy, 'yaw')
throttle = get_axis(curr_joy, 'throttle')
mavros.set_namespace("uav1/mavros")
set_rate = rospy.ServiceProxy(mavros.get_topic('set_stream_rate'), StreamRate)
rate_arg = 20.0
try:
set_rate(stream_id=0, message_rate=rate_arg, on_off=(rate_arg != 0))
except rospy.ServiceException as ex:
fault(ex)
pos_pub = SP.get_pub_position_local(queue_size=10)
if sp_pose is None:
print('[ INFO] Going to default location.')
print('[ INFO] You can use the joystick to control the drone now.')
sp_pose = PoseStamped(header=Header(stamp=rospy.get_rostime(),frame_id="map"))
sp_pose.header.stamp = rospy.get_rostime()
sp_pose.pose.position = Point(0, 0, 10)
eu = euler_from_quaternion((0, 0, 0, 1))
q = quaternion_from_euler(0, 0, 0)
sp_pose.pose.orientation = Quaternion(*q)
# When idle stop changing setpoint to prevent idle shift
idle_threshold = [.7, .7, .05, 0]
sp_pose.header.stamp = rospy.get_rostime()
eu = euler_from_quaternion((sp_pose.pose.orientation.x, sp_pose.pose.orientation.y, sp_pose.pose.orientation.z, sp_pose.pose.orientation.w))
theta = eu[2]
x = roll
y = pitch
# REMEMBER THIS IS ONLY FOR LOCAL CONTROLLER
# ONLY 2D ROTATION TRANSFORMATION FOR SIMPLICITY
if abs(yaw) > idle_threshold[2]:
q = quaternion_from_euler(eu[0], eu[1], eu[2] + yaw)
sp_pose.pose.orientation = Quaternion(*q)
if abs(x) > idle_threshold[0] and abs(y) > idle_threshold[1]:
#sp_pose.pose.position.x = uav1_pose.pose.position.x + x
sp_pose.pose.position.x = uav1_pose.pose.position.x + x * math.cos(theta) - y * math.sin(theta)
sp_pose.pose.position.y = uav1_pose.pose.position.y + x * math.sin(theta) + y * math.cos(theta)
elif abs(x) > idle_threshold[0]:
sp_pose.pose.position.x = uav1_pose.pose.position.x + x * math.cos(theta)
sp_pose.pose.position.y = uav1_pose.pose.position.y + x * math.sin(theta)
elif abs(y) > idle_threshold[1]:
#sp_pose.pose.position.y = uav1_pose.pose.position.y + y
sp_pose.pose.position.x = uav1_pose.pose.position.x - y * math.sin(theta)
sp_pose.pose.position.y = uav1_pose.pose.position.y + y * math.cos(theta)
if abs(throttle) > idle_threshold[3]:
sp_pose.pose.position.z = uav1_pose.pose.position.z + throttle
pos_pub.publish(sp_pose)
def joy_callback(joy):
global curr_joy, joy_timer
curr_joy = joy
if joy_timer is None and uav1_pose is not None and uav2_pose is not None:
print('[ INFO] Joystick connected.')
# 0.5 is the lowerest delay to avoid unexpected behavior of the drone
joy_timer = rospy.Timer(rospy.Duration(0.5), joy_timer_callback)
def main():
rospy.init_node("local_controller")
pose1_sub = rospy.Subscriber("/uav1/mavros/local_position/pose", PoseStamped, pose1_callback)
pose2_sub = rospy.Subscriber("/uav2/mavros/local_position/pose", PoseStamped, pose2_callback)
joy_sub = rospy.Subscriber("joy", Joy, joy_callback)
rospy.spin()
if __name__ == '__main__':
main() |
import logging
import time
from time import sleep
from Acspy.Clients.SimpleClient import PySimpleClient
client = PySimpleClient()
supervisor = None
logging.basicConfig()
log = logging.getLogger()
while True:
try:
supervisor = client.getComponent("ArraySupervisor")
except Exception as e:
log.info(' - could not get supervisor ...' + str(e))
log.info(' - I"m alive ... ' + str(time.time()))
sleep(15)
|
import unittest
import sys
import os
sys.path.append(os.path.join('..', 'Src'))
from Tagging import PartOfSpeechTagging
from Tokenization import TextTokenization
class TextTaggingTestCase(unittest.TestCase):
def testTagging(self):
sents = "Tom thinks John is terrible. John thinks Tom is great."
speechTagging = PartOfSpeechTagging()
tokenization = TextTokenization()
trainedTag = (speechTagging.customTagging(tokenization.wordTokenize(sents)))
trainedTagEvaluation = trainedTag[0]
expectedTagging = [('Tom', 'NNP'), ('thinks', 'VBZ'), ('John', 'NNP'), ('is', 'VBZ'), ('terrible', 'JJ'), ('.', '.'), ('John', 'NNP'), ('thinks', 'VBZ'), ('Tom', 'NNP'), ('is', 'VBZ'), ('great', 'JJ'), ('.', '.')]
print("Accuracy with Treebank corpus: ",trainedTagEvaluation*100)
print(trainedTag[1])
self.assertTrue((expectedTagging==trainedTag[1]) and (trainedTagEvaluation>0.90))
if __name__ == '__main__':
unittest.main() |
"""
A dynamic microsimulation framework";
"""
from __future__ import annotations
import typing
import datetime
import numpy as np
import numpy.typing as npt
import df # type: ignore
import mpi # type: ignore
from . import time
import stats # type: ignore
from .domain import *
date_t = datetime.datetime | datetime.date
FloatArray1d = NPFloatArray | list[float]
NPIntArray = npt.NDArray[np.int64]
IntArray1d = NPIntArray | list[int]
__all__ = [
"CalendarTimeline",
"LinearTimeline",
"Model",
"MonteCarlo",
"NoTimeline",
"NumericTimeline",
"Timeline",
"checked",
"df",
"log",
"mpi",
"run",
"stats",
"time",
"verbose"
]
class Timeline():
def __init__(self) -> None: ...
def __repr__(self) -> str:
"""
Prints a human-readable representation of the timeline object
"""
@property
def at_end(self) -> bool:
"""
Returns True if the current step is the end of the timeline
:type: bool
"""
@property
def dt(self) -> float:
"""
Returns the step size size of the timeline
:type: float
"""
@property
def end(self) -> object:
"""
Returns the time of the end of the timeline
:type: object
"""
@property
def index(self) -> int:
"""
Returns the index of the current step in the timeline
:type: int
"""
@property
def nsteps(self) -> int:
"""
Returns the number of steps in the timeline (or -1 if open-ended)
:type: int
"""
@property
def start(self) -> object:
"""
Returns the time of the start of the timeline
:type: object
"""
@property
def time(self) -> object:
"""
Returns the time of the current step in the timeline
:type: object
"""
pass
class LinearTimeline(Timeline):
"""
An equally-spaced non-calendar timeline .
"""
@typing.overload
def __init__(self, start: float, end: float, nsteps: int) -> None:
"""
Constructs a timeline from start to end, with the given number of steps.
"""
@typing.overload
def __init__(self, start: float, step: float) -> None:
"""
Constructs an open-ended timeline give a start value and a step size. NB the model will run until the Model.halt() method is explicitly called
(from inside the step() method). Note also that nsteps() will return -1 for timelines constructed this way
"""
pass
class Model():
"""
The base model class from which all neworder models should be subclassed
"""
def __init__(self, timeline: Timeline, seeder: typing.Callable[[int], int] = MonteCarlo.deterministic_independent_stream) -> None:
"""
Constructs a model object with a timeline and (optionally) a seeder function for the random stream(s)
"""
def check(self) -> bool:
"""
User-overridable method used to check internal state at each timestep.
Default behaviour is to simply return True.
Returning False will halt the model run.
This function should not be called directly, it is used by the Model.run() function
Returns:
True if checks are ok, False otherwise.
"""
def finalise(self) -> None:
"""
User-overridable function for custom processing after the final step in the model run.
Default behaviour does nothing. This function does not need to be called directly, it is called by the Model.run() function
"""
def halt(self) -> None:
"""
Signal to the model to stop execution gracefully at the end of the current timestep, e.g. if some convergence criterion has been met,
or input is required from an upstream model. The model can be subsequently resumed by calling the run() function.
For trapping exceptional/error conditions, prefer to raise an exception, or return False from the Model.check() function
"""
def modify(self, r: int) -> None:
"""
User-overridable method used to modify state in a per-process basis for multiprocess model runs.
Default behaviour is to do nothing.
This function should not be called directly, it is used by the Model.run() function
"""
def step(self) -> None:
"""
User-implemented method used to advance state of a model.
Default behaviour raises NotImplementedError.
This function should not be called directly, it is used by the Model.run() function
"""
@property
def mc(self) -> MonteCarlo:
"""
The model's Monte-Carlo engine
:type: MonteCarlo
"""
@property
def timeline(self) -> Timeline:
"""
The model's timeline object
:type: Timeline
"""
pass
class MonteCarlo():
"""
The model's Monte-Carlo engine with configurable options for parallel execution
"""
def __repr__(self) -> str:
"""
Prints a human-readable representation of the MonteCarlo engine
"""
def arrivals(self, lambda_: FloatArray1d, dt: float, n: int, mingap: float) -> NPFloatArray:
"""
Returns an array of n arrays of multiple arrival times from a nonhomogeneous Poisson process (with hazard rate lambda[i], time interval dt),
with a minimum separation between events of mingap. Sampling uses the Lewis-Shedler "thinning" algorithm
The final value of lambda must be zero, and thus arrivals don't always occur, indicated by a value of neworder.time.never()
The inner dimension of the returned 2d array is governed by the the maximum number of arrivals sampled, and will thus vary
"""
def counts(self, lambda_: FloatArray1d, dt: float) -> NPIntArray:
"""
Returns an array of simulated arrival counts (within time dt) for each intensity in lambda
"""
@staticmethod
def deterministic_identical_stream(r: int) -> int:
"""
Returns a deterministic seed (19937). Input argument is ignored
"""
@staticmethod
def deterministic_independent_stream(r: int) -> int:
"""
Returns a deterministic seed that is a function of the input (19937+r).
The model uses the MPI rank as the input argument, allowing for differently seeded streams in each process
"""
@typing.overload
def first_arrival(self, lambda_: FloatArray1d, dt: float, n: int) -> NPFloatArray:
"""
Returns an array of length n of first arrival times from a nonhomogeneous Poisson process (with hazard rate lambda[i], time interval dt),
with a minimum start time of minval. Sampling uses the Lewis-Shedler "thinning" algorithm
If the final value of lambda is zero, no arrival is indicated by a value of neworder.time.never()
"""
@typing.overload
def first_arrival(self, lambda_: FloatArray1d, dt: float, n: int, minval: float) -> NPFloatArray:
"""
Returns an array of length n of first arrival times from a nonhomogeneous Poisson process (with hazard rate lambda[i], time interval dt),
with no minimum start time. Sampling uses the Lewis-Shedler "thinning" algorithm
If the final value of lambda is zero, no arrival is indicated by a value of neworder.time.never()
"""
@typing.overload
def hazard(self, p: float, n: int) -> NPFloatArray:
"""
Returns an array of ones (with hazard rate lambda) or zeros of length n
"""
@typing.overload
def hazard(self, p: NPFloatArray) -> NPFloatArray:
"""
Returns an array of ones (with hazard rate lambda[i]) or zeros for each element in p
"""
@typing.overload
def next_arrival(self, startingpoints: FloatArray1d, lambda_: FloatArray1d, dt: float) -> NPFloatArray:
"""
Returns an array of length n of subsequent arrival times from a nonhomogeneous Poisson process (with hazard rate lambda[i], time interval dt),
with start times given by startingpoints with a minimum offset of mingap. Sampling uses the Lewis-Shedler "thinning" algorithm.
If the relative flag is True, then lambda[0] corresponds to start time + mingap, not to absolute time
If the final value of lambda is zero, no arrival is indicated by a value of neworder.time.never()
"""
@typing.overload
def next_arrival(self, startingpoints: FloatArray1d, lambda_: FloatArray1d, dt: float, relative: bool) -> NPFloatArray:
"""
Returns an array of length n of subsequent arrival times from a nonhomogeneous Poisson process (with hazard rate lambda[i], time interval dt),
with start times given by startingpoints. Sampling uses the Lewis-Shedler "thinning" algorithm.
If the relative flag is True, then lambda[0] corresponds to start time, not to absolute time
If the final value of lambda is zero, no arrival is indicated by a value of neworder.time.never()
"""
@typing.overload
def next_arrival(self, startingpoints: FloatArray1d, lambda_: FloatArray1d, dt: float, relative: bool, minsep: float) -> NPFloatArray:
"""
Returns an array of length n of subsequent arrival times from a nonhomogeneous Poisson process (with hazard rate lambda[i], time interval dt),
with start times given by startingpoints. Sampling uses the Lewis-Shedler "thinning" algorithm.
If the final value of lambda is zero, no arrival is indicated by a value of neworder.time.never()
"""
@staticmethod
def nondeterministic_stream(r: int) -> int:
"""
Returns a random seed from the platform's random_device. Input argument is ignored
"""
def raw(self) -> int:
"""
Returns a random 64-bit unsigned integer. Useful for seeding other generators.
"""
def reset(self) -> None:
"""
Resets the generator using the original seed.
Use with care, esp in multi-process models with identical streams
"""
def sample(self, n: int, cat_weights: NPFloatArray) -> NPIntArray:
"""
Returns an array of length n containing randomly sampled categorical values, weighted according to cat_weights
"""
def seed(self) -> int:
"""
Returns the seed used to initialise the random stream
"""
def state(self) -> int:
"""
Returns a hash of the internal state of the generator. Avoids the extra complexity of tranmitting variable-length strings over MPI.
"""
@typing.overload
def stopping(self, lambda_: float, n: int) -> NPFloatArray:
"""
Returns an array of stopping times (with hazard rate lambda) of length n
"""
@typing.overload
def stopping(self, lambda_: NPFloatArray) -> NPFloatArray:
"""
Returns an array of stopping times (with hazard rate lambda[i]) for each element in lambda
"""
def ustream(self, n: int) -> NPFloatArray:
"""
Returns an array of uniform random [0,1) variates of length n
"""
pass
class NoTimeline(Timeline):
"""
An arbitrary one step timeline, for continuous-time models with no explicit (discrete) timeline
"""
def __init__(self) -> None:
"""
Constructs an arbitrary one step timeline, where the start and end times are undefined and there is a single step of size zero. Useful for continuous-time models
"""
pass
class NumericTimeline(Timeline):
"""
An custom non-calendar timeline where the user explicitly specifies the time points, which must be monotonically increasing.
"""
def __init__(self, times: typing.List[float]) -> None:
"""
Constructs a timeline from an array of time points.
"""
pass
class CalendarTimeline(Timeline):
"""
A calendar-based timeline
"""
@typing.overload
def __init__(self, start: date_t, end: date_t, step: int, unit: str) -> None:
"""
Constructs a calendar-based timeline, given start and end dates, an increment specified as a multiple of days, months or years
"""
@typing.overload
def __init__(self, start: date_t, step: int, unit: str) -> None:
"""
Constructs an open-ended calendar-based timeline, given a start date and an increment specified as a multiple of days, months or years.
NB the model will run until the Model.halt() method is explicitly called (from inside the step() method). Note also that nsteps() will
return -1 for timelines constructed this way
"""
pass
def checked(checked: bool = True) -> None:
"""
Sets the checked flag, which determines whether the model runs checks during execution
"""
def log(obj: object) -> None:
"""
The logging function. Prints obj to the console, annotated with process information
"""
def run(model: Model) -> bool:
"""
Runs the model. If the model has previously run it will resume from the point at which it was given the "halt" instruction. This is useful
for external processing of model data, and/or feedback from external sources. If the model has already reached the end of the timeline, this
function will have no effect. To re-run the model from the start, you must construct a new model object.
Returns:
True if model succeeded, False otherwise
"""
def verbose(verbose: bool = True) -> None:
"""
Sets the verbose flag, which toggles detailed runtime logs
"""
def as_np(mc: MonteCarlo) -> np.random.Generator:
"""
Returns an adapter enabling the MonteCarlo object to be used with numpy random functionality
"""
|
import gevent
from gevent import monkey
import requests
import time
start_time = time()
monkey.patch_all()
urls = ['http://www.dictionary.com/browse/sit', 'http://phrasefinder.io/search?corpus=eng-us&query=The cat perched']
def print_head(url):
print('Starting %s' % url)
data = requests.get(url).text
print('%s: %s bytes: %r' % (url, len(data), data[:50]))
jobs = [gevent.spawn(print_head, _url) for _url in urls]
gevent.wait(jobs)
end_time = time() - sart |
from __future__ import division
from __future__ import print_function
import time
import os
import json
# Train on CPU (hide GPU) due to memory constraints
os.environ['CUDA_VISIBLE_DEVICES'] = ""
import tensorflow as tf
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
from gae.optimizer import OptimizerAE, OptimizerVAE
from gae.input_data import load_data
from gae.model import GCNModelAE, GCNModelVAE
from gae.preprocessing_gae import preprocess_graph, construct_feed_dict, sparse_to_tuple, mask_test_edges
def draw_gae_training(dataset, epochs, train_loss, train_acc, val_roc, val_ap):
# plot the training loss and accuracy
myfont = {'family': 'Times New Roman',
'size': 13,
}
fig = plt.figure(figsize=(4.5, 4.5), dpi=1200)
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
l1,= ax1.plot(np.arange(0, epochs), train_loss, label="train_loss")
l2, =ax2.plot(np.arange(0, epochs), train_acc, label="train_accuracy", color='r')
ax1.set_xlabel('Epoch')
ax1.set_ylabel('train loss')
ax2.set_ylabel('train accuracy')
plt.legend([l1, l2],['train_loss', 'train_accuracy'], loc="center right")
plt.savefig("result/tables/{}_loss_accuracy.svg".format(dataset), format='svg')
# plt.show()
plt.figure(figsize=(4.5, 4.5), dpi=1200)
plt.plot(np.arange(0, epochs), val_roc, label="val_auc")
#plt.title("Training Loss and Accuracy on sar classifier")
# plt.xticks(fontsize=12, fontweight='bold')
# plt.yticks(fontsize=12, fontweight='bold')
plt.xlabel("Epoch")
plt.ylabel("Area under Curve")
plt.legend(loc="center right")
plt.savefig("result/tables/{}_val_roc.svg".format(dataset), format='svg')
# plt.show()
plt.figure(figsize=(4.5, 4.5), dpi=1200)
plt.plot(np.arange(0, epochs), val_ap, label="val_ap")
# plt.title("Training Loss and Accuracy on sar classifier")
# plt.xticks(fontsize=12, fontweight='bold')
# plt.yticks(fontsize=12, fontweight='bold')
plt.xlabel("Epoch")
plt.ylabel("Average Accuracy")
plt.legend(loc="center right")
plt.savefig("result/tables/{}_val_ap.svg".format(dataset), format='svg')
# plt.show()
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epochs',250, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 16, 'Number of units in hidden layer 2.')
flags.DEFINE_float('weight_decay', 0., 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).')
flags.DEFINE_string('model', 'gcn_ae', 'Model string.')
flags.DEFINE_string('dataset', 'hamster', 'Dataset string.')
flags.DEFINE_integer('datatype', 1, 'Datatype.')
flags.DEFINE_integer('features', 0, 'Whether to use features (1) or not (0).')
#datasets = [0, 107, 1684, 1912, 3437, 348, 3980, 414, 686, 698, 'facebook', 'twitter', 'gplus', 'hamster', 'advogato']
datasets = [348]
for dataset_str in datasets:
model_str = FLAGS.model
dataset_str = str(dataset_str)
# dataset_str = FLAGS.dataset
dataset_type = FLAGS.datatype
# Load data
adj, features = load_data(dataset_str, dataset_type)
# Store original adjacency matrix (without diagonal entries) for later
adj_orig = adj
adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
adj_orig.eliminate_zeros() # 消除0元素
adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj)
adj = adj_train
if FLAGS.features == 0:
features = sp.identity(features.shape[0]) # featureless
# Some preprocessing
adj_norm = preprocess_graph(adj)
# Define placeholders
placeholders = {
'features': tf.sparse_placeholder(tf.float32),
'adj': tf.sparse_placeholder(tf.float32),
'adj_orig': tf.sparse_placeholder(tf.float32),
'dropout': tf.placeholder_with_default(0., shape=())
}
num_nodes = adj.shape[0]
features = sparse_to_tuple(features.tocoo())
num_features = features[2][1]
features_nonzero = features[1].shape[0]
# Create model
model = None
if model_str == 'gcn_ae':
model = GCNModelAE(placeholders, num_features, features_nonzero)
elif model_str == 'gcn_vae':
model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero)
pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
# Optimizer
with tf.name_scope('optimizer'):
if model_str == 'gcn_ae':
opt = OptimizerAE(preds=model.reconstructions,
labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
validate_indices=False), [-1]),
pos_weight=pos_weight,
norm=norm)
elif model_str == 'gcn_vae':
opt = OptimizerVAE(preds=model.reconstructions,
labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
validate_indices=False), [-1]),
model=model, num_nodes=num_nodes,
pos_weight=pos_weight,
norm=norm)
# Initialize session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
cost_val = []
acc_val = []
def get_roc_score(edges_pos, edges_neg, emb=None):
if emb is None:
feed_dict.update({placeholders['dropout']: 0})
emb = sess.run(model.z_mean, feed_dict=feed_dict)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Predict on test set of edges
adj_rec = np.dot(emb, emb.T)
preds = []
pos = []
for e in edges_pos:
preds.append(sigmoid(adj_rec[e[0], e[1]]))
pos.append(adj_orig[e[0], e[1]])
preds_neg = []
neg = []
for e in edges_neg:
preds_neg.append(sigmoid(adj_rec[e[0], e[1]]))
neg.append(adj_orig[e[0], e[1]])
preds_all = np.hstack([preds, preds_neg])
labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds_neg))])
roc_score = roc_auc_score(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
return roc_score, ap_score
cost_val = []
acc_val = []
val_roc_score = []
adj_label = adj_train + sp.eye(adj_train.shape[0])
adj_label = sparse_to_tuple(adj_label)
filetime = time.strftime('%Y-%m-%d', time.localtime(time.time()))
filename = model_str + "_" + dataset_str + "_" + filetime
resultfile = 'result/' + filename + '.txt'
# resultjson = 'result/'+filename+'.json'
with open(resultfile, 'w+') as result:
# Train model
Epoch = []
train_loss = []
train_acc = []
val_roc = []
val_ap = []
times = []
for epoch in range(FLAGS.epochs):
t = time.time()
# Construct feed dictionary
feed_dict = construct_feed_dict(adj_norm, adj_label, features, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Run single weight update
outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=feed_dict)
# Compute average loss
avg_cost = outs[1]
avg_accuracy = outs[2]
roc_curr, ap_curr = get_roc_score(val_edges, val_edges_false)
val_roc_score.append(roc_curr)
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(avg_cost),
"train_acc=", "{:.5f}".format(avg_accuracy), "val_roc=", "{:.5f}".format(val_roc_score[-1]),
"val_ap=", "{:.5f}".format(ap_curr),
"time=", "{:.5f}".format(time.time() - t))
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(avg_cost),
"train_acc=", "{:.5f}".format(avg_accuracy), "val_roc=", "{:.5f}".format(val_roc_score[-1]),
"val_ap=", "{:.5f}".format(ap_curr),
"time=", "{:.5f}".format(time.time() - t), file=result)
Epoch.append(epoch + 1)
train_loss.append(avg_cost)
train_acc.append(avg_accuracy)
val_roc.append(val_roc_score[-1])
val_ap.append(ap_curr)
times.append(time.time() - t)
draw_gae_training(dataset_str, FLAGS.epochs, train_loss, train_acc, val_roc, val_ap)
print("Optimization Finished!")
# train_log = model.fit_generator(
# train_generator,
# steps_per_epoch=1, #batch_size,
# epochs=FLAGS.epochs,
# validation_data=validation_generator,
# validation_steps=1 # batch_size
# )
roc_score, ap_score = get_roc_score(test_edges, test_edges_false)
print('Test ROC score: ' + str(roc_score))
print('Test AP score: ' + str(ap_score))
# print("Optimization Finished!", file=result)
# print('Test ROC score: ' + str(roc_score), file=result)
# print('Test AP score: ' + str(ap_score), file=result)
gae_result = {}
gae_result['Epoch'] = Epoch
gae_result['train_loss'] = train_loss
gae_result['train_acc'] = train_acc
gae_result['val_roc'] = val_roc
gae_result['val_ap'] = val_ap
gae_result['time'] = time
# json.dump(gae_result, result, indent=4)
result.close()
# with open(resultjson, 'w') as f:
#
# gae_result = json.load(f)
# train_loss = gae_result['train_loss']
# train_acc = gae_result['train_acc']
# val_roc = gae_result['val_roc']
# val_ap = gae_result['val_ap']
# # plot the training loss and accuracy
# plt.style.use("ggplot")
# plt.figure()
# plt.plot(np.arange(0, FLAGS.epochs), train_loss, label="train_loss")
# plt.plot(np.arange(0, FLAGS.epochs), train_acc, label="train_acc")
# plt.plot(np.arange(0, FLAGS.epochs), val_roc, label="val_roc")
# plt.plot(np.arange(0, FLAGS.epochs), val_ap, label="val_ap")
# plt.title("Training Loss and Accuracy on sar classifier")
# plt.xlabel("Epoch #")
# plt.ylabel("Loss/Accuracy")
# plt.legend(loc="upper right")
# plt.savefig("Loss_Accuracy_alexnet_{:d}e.jpg".format(FLAGS.epochs))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
import sys
import subprocess
import cgruutils
def processMovie( i_file):
out = dict()
out['infile'] = i_file
if not os.path.isfile( out['infile']):
out['error'] = 'Input file does not exist.'
return out
params = {}
params['bitdepth'] = 'BitDepth'
params['chromasubsampling'] = 'ChromaSubsampling'
params['codec'] = 'Codec'
params['colorspace'] = 'ColorSpace'
params['fps'] = 'FrameRate'
params['frame_count'] = 'FrameCount'
params['height'] = 'Height'
params['width'] = 'Width'
inform = ''
for key in params:
if len( inform): inform += ','
inform += '"%s":"%%%s%%"' % ( key, params[key])
inform = '--inform=Video;{' + inform + '}'
data = subprocess.check_output(['mediainfo', inform, out['infile']])
data = cgruutils.toStr( data)
inform = None
try:
inform = json.loads( data)
except:
inform = None
out['data'] = data
out['error'] = 'JSON load error'
return out
for key in inform:
if inform[key].isdigit():
inform[key] = int(inform[key])
else:
try:
inform[key] = float( inform[key])
except:
pass
# Return this object:
out['mediainfo'] = {'video':inform}
return out
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Input file is not specified.')
sys.exit(1)
out = processMovie( sys.argv[1])
print( json.dumps( out, indent=4))
|
#!/usr/bin/python
# TODO:
# Send multiple ping requests for each neighbor and then take the average
# Update the return message format
import sys
import os
import subprocess
from neighbors import my_neighbors
from process_topology import ip_prefix2site_prefix
interest = sys.argv[1]
site = interest.split("/script")[0]
n = my_neighbors[site]
result = ""
proc_list = []
for dest_ip_prefix in n:
destination = ip_prefix2site_prefix[dest_ip_prefix]
cmd = "ndnping -c 3 " + destination
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
if 'Avg' in out:
out = out.split("/")
result += site + ":" + dest_ip_prefix + "+" + out[-2] + "&"
# print result
cmd = "echo \"" + result + " \" > /tmp/ndn_result.txt"
os.system(cmd)
|
import unittest
from katas.kyu_7.greatest_common_divisor import mygcd
class MyGCDTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(mygcd(30, 12), 6)
def test_equals_2(self):
self.assertEqual(mygcd(8, 9), 1)
def test_equals_3(self):
self.assertEqual(mygcd(1, 1), 1)
def test_equals_4(self):
self.assertEqual(mygcd(74634, 73865), 1)
def test_equals_5(self):
self.assertEqual(mygcd(10927782, 6902514), 846)
def test_equals_6(self):
self.assertEqual(mygcd(1590771464, 1590771620), 4)
|
from TxtVenture.tools.implementations import *
from TxtVenture.generic.worldspace import WorldSpace
class TestRoom(WorldSpace):
""" A test room to play around with. """
def __init__(self, id, temp):
super(TestRoom, self).__init__(id, temp)
def main(self):
respond(self.description)
respond(self.space_item_descriptions)
while True:
state = self.query()
if state:
break
self.temp.current_room = None
|
import sys
import os
import shutil
import subprocess
import functools
sys.path.insert(0, 'scripts')
sys.path.insert(0, 'tools/families')
sys.path.insert(0, 'tools/trees')
import experiments as exp
import fam
import create_random_tree
from ete3 import Tree
import re
def treat_amalgamation(amalgamation, datadir, family, authorized_species):
f = open(amalgamation)
f.readline() # skip first comment
line = f.readline()
print(family)
tree = None
try:
tree = Tree(line)
except:
print("failed to read tree, skipping " + amalgamation)
return
if (len(tree.get_leaf_names()) < 4):
return
fam.init_family_directories(datadir, family)
exp.mkdir(fam.get_amalgamation_dir(datadir, family))
output_mapping_file = fam.get_mappings(datadir, family)
output_amalgamation = fam.get_amalgamation(datadir, family, "true", "true")
shutil.copy(amalgamation, output_amalgamation)
species_to_genes = {}
for gene in tree.get_leaf_names():
species = gene.split("_")[0]
assert(species in authorized_species)
if (not species in species_to_genes):
species_to_genes[species] = [gene]
else:
species_to_genes[species].append(gene)
with open(output_mapping_file, "w") as writer:
for species in species_to_genes:
genes = species_to_genes[species]
writer.write(species + ":" + ";".join(genes) + "\n")
def generate(amalgamations_dir, species_tree_path, datadir):
fam.init_top_directories(datadir)
true_species_tree = fam.get_species_tree(datadir)
authorized_species = set()
authorized_species = Tree(species_tree_path, format=1).get_leaf_names()
shutil.copy(species_tree_path, true_species_tree)
for f in os.listdir(amalgamations_dir):
family = f.split(".")[0]
amalgamation = os.path.join(amalgamations_dir, f)
treat_amalgamation(amalgamation, datadir, family, authorized_species)
fam.postprocess_datadir(datadir)
if (__name__ == "__main__"):
if (len(sys.argv) != 4):
print("Syntax: python " + os.path.basename(__file__) + " amalgamations_dir species_tree output_dir")
exit(1)
amalgamations_dir = sys.argv[1]
species_tree_path = sys.argv[2]
datadir = sys.argv[3]
generate(amalgamations_dir, species_tree_path, datadir)
|
# -*- coding: utf-8 -*-
# Copyright 2013-2020 The Wazo Authors (see the AUTHORS file)
# SPDX-License-Identifier: GPL-3.0-or-later
common_globals = {}
execfile_('common.py', common_globals)
MODEL_VERSIONS = {
u'CP920': u'78.84.0.125',
u'CP960': u'73.84.0.25',
u'T19P_E2': u'53.84.0.125', # 53.84.0.90 version does not support YDMP and YMCS
u'T21P_E2': u'52.84.0.125', # 52.84.0.90 version does not support YDMP and YMCS
u'T23P': u'44.84.0.140', # 44.84.0.90 version does not support YDMP and YMCS
u'T23G': u'44.84.0.140', # 44.84.0.90 version does not support YDMP and YMCS
u'T27G': u'69.84.0.125',
u'T40P': u'54.84.0.125', # 54.84.0.90 version does not support YDMP and YMCS
u'T40G': u'76.84.0.125', # 76.84.0.90 version does not support YDMP and YMCS
u'T41S': u'66.84.0.125',
u'T42S': u'66.84.0.125',
u'T46S': u'66.84.0.125',
u'T48S': u'66.84.0.125',
u'T52S': u'70.84.0.70',
u'T53': u'95.84.0.125',
u'T53W': u'95.84.0.125',
u'T54S': u'70.84.0.70',
u'T54W': u'96.84.0.125',
u'T57W': u'97.84.0.125',
u'T58': u'58.84.0.25',
}
COMMON_FILES = [
('y000000000044.cfg', u'T23-44.84.0.140.rom', 'model.tpl'),
('y000000000069.cfg', u'T27G-69.84.0.125.rom', 'model.tpl'),
('y000000000052.cfg', u'T21P_E2-52.84.0.125.rom', 'model.tpl'),
('y000000000053.cfg', u'T19P_E2-53.84.0.125.rom', 'model.tpl'),
('y000000000054.cfg', u'T40-54.84.0.125.rom', 'model.tpl'),
('y000000000076.cfg', u'T40G-76.84.0.125.rom', 'model.tpl'),
('y000000000065.cfg', u'T46S(T48S,T42S,T41S)-66.84.0.125.rom', 'model.tpl'),
('y000000000066.cfg', u'T46S(T48S,T42S,T41S)-66.84.0.125.rom', 'model.tpl'),
('y000000000067.cfg', u'T46S(T48S,T42S,T41S)-66.84.0.125.rom', 'model.tpl'),
('y000000000068.cfg', u'T46S(T48S,T42S,T41S)-66.84.0.125.rom', 'model.tpl'),
('y000000000095.cfg', u'T53W(T53)-95.84.0.125.rom', 'model.tpl'),
('y000000000070.cfg', u'T54S(T52S)-70.84.0.70.rom', 'model.tpl'),
('y000000000096.cfg', u'T54W-96.84.0.125.rom', 'model.tpl'),
('y000000000097.cfg', u'T57W-97.84.0.125.rom', 'model.tpl'),
('y000000000058.cfg', u'T58-58.84.0.25.rom', 'model.tpl'),
('y000000000078.cfg', u'CP920-78.84.0.125.rom', 'model.tpl'),
('y000000000073.cfg', u'CP960-73.84.0.25.rom', 'model.tpl'),
]
class YealinkPlugin(common_globals['BaseYealinkPlugin']):
IS_PLUGIN = True
pg_associator = common_globals['BaseYealinkPgAssociator'](MODEL_VERSIONS)
# Yealink plugin specific stuff
_COMMON_FILES = COMMON_FILES
|
# -*- coding: utf-8 -*-
import importlib
from django.dispatch import receiver
from django.conf import settings
from django.test.signals import setting_changed
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
_form_helper_module = uninitialized = object()
class ChloroformHelper(FormHelper):
def __init__(self, *args, **kw):
super(ChloroformHelper, self).__init__(*args, **kw)
self.add_input(Submit('submit', _('Submit')))
class ChloroformTagHelper(ChloroformHelper):
pass
@receiver(setting_changed)
def on_setting_changed(sender, setting, **kw):
global _form_helper_module
if setting == 'CHLOROFORM_HELPERS_MODULE':
_form_helper_module = uninitialized
def get_form_helper_module():
global _form_helper_module
if _form_helper_module is uninitialized:
module_name = getattr(settings, 'CHLOROFORM_HELPERS_MODULE', None)
if module_name is not None:
_form_helper_module = importlib.import_module(module_name)
return _form_helper_module
class FormHelperGetterMixin(object):
form_helper_class = None
def get_form_helper_class(self):
if self.form_helper_class is None:
raise ImproperlyConfigured(u'Missing form_helper_class attribute on class {}'.format(self.__class__))
module = get_form_helper_module()
try:
return getattr(module, self.form_helper_class.__name__)
except AttributeError:
pass
return self.form_helper_class
def get_form_helper(self, form=None):
helper_class = self.get_form_helper_class()
kwargs = self.get_form_helper_kwargs()
kwargs.setdefault('form', form)
return helper_class(**kwargs)
def get_form_helper_kwargs(self):
return {}
class FormHelperMixin(FormHelperGetterMixin):
def get_context_data(self, **kw):
context = super(FormHelperMixin, self).get_context_data(**kw)
form = context.get('form')
context.update({
'form_helper': self.get_form_helper(form),
})
return context
|
#!/usr/bin/env python
# script for drawing algorithm result
import argparse
from math import ceil, floor
import random
import warnings
# drawing shapes
import turtle as t
# save turtle to file
import Tkinter as tk
parser = argparse.ArgumentParser(description =
'Draw layout of small boards on big board.')
parser.add_argument('-f', '--file', metavar='FILE', type=str, nargs=1,
default=['../build/output.txt'], help='name of input file')
parser.add_argument('-s', '--scale', metavar='SC', type=float, nargs=1,
default=[0.25], help='scale of printout')
args = parser.parse_args()
# scale of printout
scale = args.scale[0]
class Board:
def __init__(self, idx, width, height, x, y, rotated):
self.idx = idx
self.width = floor(width*scale)
self.height = floor(height*scale)
self.x = floor(x*scale)
self.y = floor(y*scale)
self.rotated = rotated
def draw(self, turtle, color):
if self.x < 0 or self.y < 0:
print("Board %d will not be cut" % self.idx)
return
print("Cutting board %d" % self.idx)
turtle.penup()
turtle.setpos(self.x, self.y)
turtle.color('black', color)
turtle.pendown()
turtle.begin_fill()
edges = [self.height, self.width, self.height, self.width] \
if self.rotated \
else [self.width, self.height, self.width, self.height]
print("Position (%d, %d) width %d height %d" % (self.x/scale, self.y/scale, edges[0]/scale, edges[1]/scale))
for edge in edges:
turtle.forward(edge)
turtle.left(90)
turtle.end_fill()
def main():
fileName = args.file[0]
with open(fileName, 'r') as file:
total_surface = file.readline()
print("Total surface of boards is %s" % total_surface)
WIDTH, HEIGHT = ceil((2800)*scale), ceil((2070)*scale)
turtle = t.Turtle()
turtle.hideturtle()
turtle.speed(0)
turtle.penup()
turtle_screen = turtle.getscreen()
turtle_screen.setup(width = WIDTH + 50, height = HEIGHT + 50, startx = None, starty = None)
turtle_screen.setworldcoordinates(llx = 0, lly = HEIGHT + 10, urx = WIDTH + 10, ury = 0)
color_list = [
'#c77373',
'#b1e369',
'#329f7b',
'#d6b474',
'#926a6a'
]
# draw board
turtle.penup()
turtle.setpos(0, 0)
turtle.color('black', '#dbb4a0')
turtle.pendown()
turtle.begin_fill()
edges = [ WIDTH, HEIGHT, WIDTH, HEIGHT]
for edge in edges:
turtle.forward(edge)
turtle.left(90)
turtle.end_fill()
idx = 1;
for line in file.readlines():
line_split = line.split()
if len(line_split) == 5:
[w, h, x, y, r] = map(int, line_split)
b = Board(idx, w, h, x, y, r)
color = random.choice(color_list)
b.draw(turtle, color)
turtle_screen.update()
idx = idx + 1
#turtle_screen.tracer(True)
turtle_screen.getcanvas().postscript(file="board.eps", colormode='color')
turtle_screen.exitonclick()
#turtle.done()
if __name__ == '__main__':
main()
|
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
# Create your views here.
def index(request):
return render(request, "personal/index.html")
def contact(request):
return render(request, "personal/basic.html", {"content": ["Hi", "vitor@gmail.com"]}) |
# Copyright (c) 2015 Mellanox Technologies, Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
from oslo_concurrency import lockutils
import six
from neutron.agent.l2 import agent_extension
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron import manager
@six.add_metaclass(abc.ABCMeta)
class QosAgentDriver(object):
"""Defines stable abstract interface for QoS Agent Driver.
QoS Agent driver defines the interface to be implemented by Agent
for applying QoS Rules on a port.
"""
@abc.abstractmethod
def initialize(self):
"""Perform QoS agent driver initialization.
"""
@abc.abstractmethod
def create(self, port, qos_policy):
"""Apply QoS rules on port for the first time.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
#TODO(QoS) we may want to provide default implementations of calling
#delete and then update
@abc.abstractmethod
def update(self, port, qos_policy):
"""Apply QoS rules on port.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
@abc.abstractmethod
def delete(self, port, qos_policy):
"""Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
"""
class QosAgentExtension(agent_extension.AgentCoreResourceExtension):
SUPPORTED_RESOURCES = [resources.QOS_POLICY]
def initialize(self, connection, driver_type):
"""Perform Agent Extension initialization.
"""
self.resource_rpc = resources_rpc.ResourcesPullRpcApi()
self.qos_driver = manager.NeutronManager.load_class_for_provider(
'neutron.qos.agent_drivers', driver_type)()
self.qos_driver.initialize()
# we cannot use a dict of sets here because port dicts are not hashable
self.qos_policy_ports = collections.defaultdict(dict)
self.known_ports = set()
registry.subscribe(self._handle_notification, resources.QOS_POLICY)
self._register_rpc_consumers(connection)
def _register_rpc_consumers(self, connection):
endpoints = [resources_rpc.ResourcesPushRpcCallback()]
for resource_type in self.SUPPORTED_RESOURCES:
# we assume that neutron-server always broadcasts the latest
# version known to the agent
topic = resources_rpc.resource_type_versioned_topic(resource_type)
connection.create_consumer(topic, endpoints, fanout=True)
@lockutils.synchronized('qos-port')
def _handle_notification(self, qos_policy, event_type):
# server does not allow to remove a policy that is attached to any
# port, so we ignore DELETED events. Also, if we receive a CREATED
# event for a policy, it means that there are no ports so far that are
# attached to it. That's why we are interested in UPDATED events only
if event_type == events.UPDATED:
self._process_update_policy(qos_policy)
@lockutils.synchronized('qos-port')
def handle_port(self, context, port):
"""Handle agent QoS extension for port.
This method applies a new policy to a port using the QoS driver.
Update events are handled in _handle_notification.
"""
port_id = port['port_id']
qos_policy_id = port.get('qos_policy_id')
if qos_policy_id is None:
self._process_reset_port(port)
return
#Note(moshele) check if we have seen this port
#and it has the same policy we do nothing.
if (port_id in self.known_ports and
port_id in self.qos_policy_ports[qos_policy_id]):
return
self.qos_policy_ports[qos_policy_id][port_id] = port
self.known_ports.add(port_id)
qos_policy = self.resource_rpc.pull(
context, resources.QOS_POLICY, qos_policy_id)
self.qos_driver.create(port, qos_policy)
def delete_port(self, context, port):
self._process_reset_port(port)
def _process_update_policy(self, qos_policy):
for port_id, port in self.qos_policy_ports[qos_policy.id].items():
# TODO(QoS): for now, just reflush the rules on the port. Later, we
# may want to apply the difference between the rules lists only.
self.qos_driver.delete(port, None)
self.qos_driver.update(port, qos_policy)
def _process_reset_port(self, port):
port_id = port['port_id']
if port_id in self.known_ports:
self.known_ports.remove(port_id)
for qos_policy_id, port_dict in self.qos_policy_ports.items():
if port_id in port_dict:
del port_dict[port_id]
self.qos_driver.delete(port, None)
return
|
import werobot
from werobot.session.filestorage import FileStorage
import sys
robot = werobot.WeRoBot(token='ZeroAI')
@robot.text
def hello(message):
return '维护中,预计北京时间2022-12-11 23点恢复。'
robot.config['HOST'] = '0.0.0.0'
robot.config['PORT'] = 80
robot.run() |
import pandas_datareader.data as webdata
from datetime import date
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
from scipy import fftpack
from matplotlib.dates import DateFormatter
from matplotlib.dates import DayLocator
from matplotlib.dates import MonthLocator
from dateutil.relativedelta import relativedelta
today = date.today()
start = today- relativedelta(years=1)
quotes = webdata.get_data_yahoo("QQQ", start, today)
quotes = np.array(quotes)
dates = quotes.T[0]
qqq = quotes.T[4]
# 去除信号中的线性趋势。
y = signal.detrend(qqq)
alldays = DayLocator()
months = MonthLocator()
month_formatter = DateFormatter("%b %Y")
fig = plt.figure()
fig.subplots_adjust(hspace=.3)
ax = fig.add_subplot(211)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(month_formatter)
# 调大字号
ax.tick_params(axis='both', which='major', labelsize='x-large')
# 应用傅里叶变换,得到信号的频谱
amps = np.abs(fftpack.fftshift(fftpack.rfft(y)))
# 滤除噪声。如果某一频率分量的大小低于最强分量的10%,则将其滤除
amps[amps < 0.1*amps.max()] = 0
# 将滤波后的信号变换回时域,并和去除趋势后的信号一起绘制出来。
plt.plot(dates, y,'o', label='detrended')
plt.plot(dates, -fftpack.irfft(fftpack.ifftshift(amps)),label="filtered")
# 将x轴上的标签格式化为日期,并添加一个特大号的图例。
fig.autofmt_xdate()
plt.legend(prop={'size':'x-large'})
# 添加第二个子图,绘制滤波后的频谱。
ax2 = fig.add_subplot(212)
N = len(qqq)
plt.plot(np.linspace(-N/2, N/2, N), amps, label="transformed")
# 显示图像和图例
plt.legend(prop={'size':'x-large'})
plt.show() |
#!/proj/sot/ska3/flight/bin/python
#
#--- this script fix wrong temperature limit
#---
#--- May 10, 2021
#---
import os
import sys
import re
import string
import time
import numpy
import astropy.io.fits as pyfits
from astropy.io.fits import Column
import Chandra.Time
#
#--- reading directory list
#
path = '/data/mta/Script/MTA_limit_trends/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folder
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import several functions
#
import mta_common_functions as mcf #---- contains other functions commonly used in MTA scripts
import envelope_common_function as ecf #---- contains other functions commonly used in envelope
import fits_operation as mfo #---- fits operation collection
import read_limit_table as rlt #---- read limit table and create msid<--> limit dict
#
#--- other path setting
#
limit_dir = '/data/mta/Script/MSID_limit/Trend_limit_data/'
#
#--- fits generation related lists
#
col_names = ['time', 'msid', 'med', 'std', 'min', 'max',
'ylower', 'yupper', 'rlower', 'rupper', 'dcount',
'ylimlower', 'ylimupper', 'rlimlower', 'rlimupper', 'state']
col_format = ['D', '20A', 'D', 'D','D','D','D','D','D','D', 'I', 'D', 'D', 'D', 'D', '10A']
a_month = 86400 * 30
#
#--- set a temporary file name
#
import random
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
tstart = 733449594
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
def run():
m_dict = get_limit_data()
m_list = m_dict.keys()
cmd = 'ls -d ' + data_dir + '/* >' + zspace
os.system(cmd)
tdir_list = mcf.read_data_file(zspace, remove=1)
for edir in tdir_list:
cmd = 'ls ' + edir + "/*_short_data.fits > " + zspace
os.system(cmd)
f_list = mcf.read_data_file(zspace, remove=1)
for ent in f_list:
atemp = re.split('\/', ent)
btemp = re.split('_short_', atemp[-1])
msid = btemp[0]
if msid in m_list:
print(msid)
lim_list = m_dict[msid]
sfits = ent
lfits = sfits.replace('_short', '')
try:
update_limit_chk(msid, sfits, lim_list)
update_limit_chk(msid, lfits, lim_list)
except:
pass
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
def update_limit_chk(msid, mfits, lim_list):
hout = pyfits.open(mfits)
data = hout[1].data
hout.close()
clen = len(col_names)
cols = col_names
cols[1] = msid
tlist = []
for col in cols:
tlist.append(data[col])
tlen = len(tlist[0])
for k in range(0, tlen):
if tlist[0][k] < tstart:
continue
#tlist[6][k] = 0
#tlist[7][k] = 0
#tlist[8][k] = 0
#tlist[9][k] = 0
#yl = lim_list[0]
#yu = lim_list[1]
#rl = lim_list[2]
#ru = lim_list[3]
#tlist[11][k] = yl
#tlist[12][k] = yu
#tlist[13][k] = rl
#tlist[14][k] = ru
#continue
med = float(tlist[1][k])
yl = lim_list[0]
yu = lim_list[1]
rl = lim_list[2]
ru = lim_list[3]
tlist[11][k] = yl
tlist[12][k] = yu
tlist[13][k] = rl
tlist[14][k] = ru
if med > yl and med < yu:
tlist[6][k] = 0
tlist[7][k] = 0
tlist[8][k] = 0
tlist[9][k] = 0
elif med > rl and med <= yl:
tlist[6][k] = 1
tlist[7][k] = 0
tlist[8][k] = 0
tlist[9][k] = 0
elif med >= yu and med < ru:
tlist[6][k] = 0
tlist[7][k] = 1
tlist[8][k] = 0
tlist[9][k] = 0
elif med <= rl:
tlist[6][k] = 0
tlist[7][k] = 0
tlist[8][k] = 1
tlist[9][k] = 0
elif med >= yu:
tlist[6][k] = 0
tlist[7][k] = 0
tlist[8][k] = 0
tlist[9][k] = 1
else:
tlist[6][k] = 0
tlist[7][k] = 0
tlist[8][k] = 0
tlist[9][k] = 0
atemp = re.split('\/', mfits)
ffile = atemp[-1]
create_fits_file(msid, mfits, tlist)
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
def create_fits_file(msid, fits, data):
"""
create a fits file
input: msid--- msid
data--- a list of list of data
output: fits
"""
cols= col_names
cols[1] = msid
c1 = Column(name=cols[0], format=col_format[0], array = data[0])
c2 = Column(name=cols[1], format=col_format[1], array = data[1])
c3 = Column(name=cols[2], format=col_format[2], array = data[2])
c4 = Column(name=cols[3], format=col_format[3], array = data[3])
c5 = Column(name=cols[4], format=col_format[4], array = data[4])
c6 = Column(name=cols[5], format=col_format[5], array = data[5])
c7 = Column(name=cols[6], format=col_format[6], array = data[6])
c8 = Column(name=cols[7], format=col_format[7], array = data[7])
c9 = Column(name=cols[8], format=col_format[8], array = data[8])
c10 = Column(name=cols[9], format=col_format[9], array = data[9])
c11 = Column(name=cols[10], format=col_format[10], array = data[10])
c12 = Column(name=cols[11], format=col_format[11], array = data[11])
c13 = Column(name=cols[12], format=col_format[12], array = data[12])
c14 = Column(name=cols[13], format=col_format[13], array = data[13])
c15 = Column(name=cols[14], format=col_format[14], array = data[14])
c16 = Column(name=cols[15], format=col_format[15], array = data[15])
coldefs = pyfits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16])
tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
mcf.rm_files(fits)
tbhdu.writeto(fits)
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
def get_limit_data():
ifile = limit_dir + 'Limit_data/op_limits_new.db'
data = mcf.read_data_file(ifile)
m_dict = {}
for ent in data:
if ent[0] == '#':
continue
atemp = re.split('#', ent)
btemp = re.split('\s+', atemp[0])
unit = atemp[-2].strip()
if unit == 'K':
if btemp[6].strip() == 'none':
msid = btemp[0].strip()
ly = float(btemp[1])
uy = float(btemp[2])
lr = float(btemp[3])
ur = float(btemp[4])
m_dict[msid] = [ly, uy, lr, ur]
return m_dict
#-----------------------------------------------------------------------
if __name__ == "__main__":
run()
|
import json
import marshmallow as ma
from datetime import datetime
from webargs import fields
__all__ = [
'Str', 'Int', 'Bool', 'List', 'DelimitedList', 'Nested', 'Timestamp'
]
Str = fields.Str
Float = fields.Float
Int = fields.Int
Bool = fields.Bool
List = fields.List
DelimitedList = fields.DelimitedList
Nested = fields.Nested
Url = fields.Url
class JSONStringifyList(ma.fields.List):
default_error_messages = {
'invalid': 'Not a jsonstringify list'
}
def __init__(self, cls_or_instance, **kwargs):
super(JSONStringifyList, self).__init__(cls_or_instance, **kwargs)
def _deserialize(self, value, attr, data):
try:
res = json.loads(value[0])
return super(JSONStringifyList, self)._deserialize(res, attr, data)
except json.JSONDecodeError:
return self.fail('invalid')
class Timestamp(ma.fields.Field):
default_error_messages = {
'invalid': 'Not a valid timestamp'
}
def __init__(self, is_seconds=True, **kwargs):
self.is_seconds = is_seconds
super(Timestamp, self).__init__(**kwargs)
def _format_timestamp(self, value):
if value is None:
return None
try:
_v = float(value) * (1000 if not self.is_seconds else 1)
d = datetime.fromtimestamp(_v)
return d
except Exception as err:
print(err)
return self.fail('invalid')
def _serialize(self, value, attr, obj):
return float(value)
def _deserialize(self, value, attr, obj):
return self._format_timestamp(value)
|
from django.db import models
# Create your models here.
class Category(models.Model):
name=models.CharField(max_length=64)
def __str__(self):
return f"{self.name}"
class Regular_pizza(models.Model):
name=models.CharField(max_length=64)
small=models.DecimalField(max_digits=4,decimal_places=2)
large=models.DecimalField(max_digits=4,decimal_places=2)
def __str__(self):
return f"{self.name} - {self.small} -{self.large}"
class Sicilian_pizza(models.Model):
name=models.CharField(max_length=64)
small=models.DecimalField(max_digits=4,decimal_places=2)
large=models.DecimalField(max_digits=4,decimal_places=2)
def __str__(self):
return f"{self.name} - {self.small} -{self.large}"
class Topping(models.Model):
name=models.CharField(max_length=64)
def __str__(self):
return f"{self.name}"
class Sub(models.Model):
name=models.CharField(max_length=64)
small=models.DecimalField(max_digits=4,decimal_places=2,null=True,blank=True)
large=models.DecimalField(max_digits=4,decimal_places=2)
def __str__(self):
return f"{self.name} - {self.small} -{self.large}"
class Pasta(models.Model):
name=models.CharField(max_length=64)
price=models.DecimalField(max_digits=4,decimal_places=2)
def __str__(self):
return f"{self.name} - {self.price}"
class Salad(models.Model):
name=models.CharField(max_length=64)
price=models.DecimalField(max_digits=4,decimal_places=2)
def __str__(self):
return f"{self.name} - {self.price}"
class Dinner_platter(models.Model):
name=models.CharField(max_length=64)
small=models.DecimalField(max_digits=4,decimal_places=2)
large=models.DecimalField(max_digits=4,decimal_places=2)
def __str__(self):
return f"{self.name} - {self.small} -{self.large}" |
from knowledge_graph import app
import json
from json import dumps, load
from flask import request, make_response, abort, Response
from knowledge_graph.Mind import Mind
value_id_map = {
1 : "conformity",
2 : "tradition",
3 : "benevolence",
4 : "universalism",
5 : "self-direction",
6 : "stimulation",
7 : "hedonism",
8 : "achievement",
9 : "power",
10 : "security"
}
@app.route('/', methods=['GET'])
def home():
return "<h1>API for climatemind ontology</h1>"
@app.route('/ontology', methods=['GET'])
def query():
searchQueries = request.args.getlist('query')
searchResults = {}
mind = app.config["MIND"]
try:
for keyword in searchQueries:
searchResults[keyword] = mind.search(keyword)
except ValueError:
#todo: currently returns no results at all if 1 keyword in an array isn't found. fix this.
return make_response("query keyword not found"), 400
response = Response(dumps(searchResults))
response.headers['Content-Type'] = 'application/json'
return response, 200
@app.route('/get_questions', methods=['GET'])
def get_questions():
try:
with open('schwartz_questions.json') as json_file:
data = load(json_file)
except FileNotFoundError:
return make_response("Schwartz Questions not Found"), 400
response = Response(dumps(data))
response.headers['Content-Type'] = 'application/json'
return response, 200
@app.route('/get_user_scores', methods=['POST'])
def get_user_scores():
""" Users want to be able to get their score results after submitting
the survey. This method checks for a POST request from the front-end
containing a JSON object with the users scores.
The user can answer 10 or 20 questions. If they answer 20, the scores
are averaged between the 10 additional and 10 original questions to get
10 corresponding value scores.
Then to get a centered score for each value, each score value is subtracted
from the overall average of all 10 or 20 questions. This is returned to the
front-end.
"""
try:
parameter = request.json
except:
return make_response("Invalid User Response"), 400
value_scores = {}
overall_sum = 0
num_of_responses = 10
for value in parameter["SetOne"]:
id = value["id"]
score = value["score"]
overall_sum += score
value_scores[id] = { "name" : value_id_map[id],
"score" : score }
if parameter["SetTwo"]:
num_of_responses += 10
for value in parameter["SetTwo"]:
id = value["id"]
score = value["score"]
avg_score = (value_scores[id]["score"] + score) / 2
overall_sum += score
value_scores[id] = { "name" : value_id_map[id],
"score" : avg_score }
overall_avg = overall_sum / num_of_responses
print(overall_avg)
for id, value in value_scores.items():
centered_score = value["score"] - overall_avg
value_scores[id] = { "name" : value["name"],
"score" : centered_score}
response = Response(dumps(value_scores))
return response, 200
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015-2018 Shenzhen Auto-link world Information Technology Co., Ltd.
All Rights Reserved
Name: Config.py
Purpose:
Created By: Clive Lau <liuxusheng@auto-link.com.cn>
Created Date: 2017-01-01
Changelog:
Date Desc
2017-01-01 Created by Clive Lau
"""
# Builtin libraries
import os
import time
# Third-party libraries
from robot.api import logger
# Custom libraries
from TBoxCore import TBoxCore
class TBoxKeyword(object):
def __init__(self):
self._tag = self.__class__.__name__ + ': '
logger.console(self._tag + "__init__ called")
self._tbox = None
def initialize(self, device, server, channel, baudrate):
""" 初始化 TBox 设备
:return:
"""
logger.info(self._tag + "Initialize called")
self._tbox = TBoxCore(device, server, channel, baudrate)
self._tbox.on_create()
def uninitialize(self):
""" 反初始化 TBox 设备
:return:
"""
logger.info(self._tag + "Uninitialize called")
self._tbox.on_destroy()
def log_cleanup(self):
""" 清理TBox设备log
:return:
"""
logger.info(self._tag + 'Cleaning log for TBox')
TBoxCore.on_clean_log()
def log_collection(self):
""" 收集TBox设备log
:return:
"""
logger.info(self._tag + 'Collecting log for TBox')
timestamp = time.strftime('%Y%m%d', time.localtime(time.time()))
path = os.path.expandvars('$HOME') + '/Desktop/' + timestamp + '_Sherlock-TBox/' + 'dev_log'
TBoxCore.on_collect_log(path)
def wait_until_ready(self):
""" 等待 TBox 成功连接 MQTT Broker
:return: True if succeed to connect MQTT Broker or not
"""
logger.info(self._tag + "Wait until ready called")
return self._tbox.wait_until_ready()
def request_remote_ota(self, version, addr, timeout=30):
""" 请求远程升级
:param timeout: 设置超时
:return: True if succeed to configuration or not
"""
logger.info(self._tag + "Request remote control called")
return self._tbox.on_request_remote_ota(version, addr, timeout)
def request_remote_control(self, item, data, timeout=30):
""" 请求远程控制
:param timeout: 设置超时
:return: True if succeed to configuration or not
"""
logger.info(self._tag + "Request remote control called")
return self._tbox.on_request_remote_control(item, data, timeout)
def request_remote_diagnosis(self, timeout=30):
""" 请求远程诊断
:param timeout: 设置超时
:return: True if succeed to configuration or not
"""
logger.info(self._tag + "Request remote diagnosis called")
return self._tbox.on_request_remote_diagnosis(timeout)
def request_remote_config(self, item, data, timeout=30):
""" 请求远程配置
:param item: 配置项
:param data: 配置数据
:param timeout: 设置超时
:return: True if succeed to configuration or not
"""
logger.info(self._tag + "Request remote config called")
return self._tbox.on_request_remote_config(item, data, timeout)
def request_can_config(self, item, data, timeout=10):
""" 请求CAN配置
:param item: 配置项
:param data: 配置数据
:param timeout: 设置超时
:return: True if succeed to configuration or not
"""
logger.info(self._tag + "Request CAN config called")
return self._tbox.on_request_can_config(item, data, timeout)
def request_can_data(self, item, timeout=10):
""" 请求指定CAN数据
:param item: 配置项
:param timeout: 设置超时
:return: Specified data
"""
logger.info(self._tag + "Request CAN data called")
return self._tbox.on_request_can_data(item, timeout)
if __name__ == '__main__':
pass
|
import cv2
import sys
#import pygame
#i'm using pygame beacause he is too cute but nobody likes him. I feel sad about that cute python library
cascPath = sys.argv[1]
eyecaspath = sys.argv[2]
faceCascade = cv2.CascadeClassifier(cascPath)
eye_cascade = cv2.CascadeClassifier(eyecaspath)
video_capture = cv2.VideoCapture(0)
i = 0
frameWidth = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
lockeyes = False
morse = []
#(width, height) = (1000, 1000)
#screen = pygame.display.set_mode((width, height))
#pygame.display.flip()
#pygame.draw.line(screen, (255,0,255), (20,20), (70,80), 2)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
i = i + 1
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=2.1,
#minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray,scaleFactor=4.2)
for (ex,ey,ew,eh) in eyes:
lockeyes = True
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
print(eyes.shape[0])
if(lockeyes and eyes.shape[0] == 2):
morse.append(" ")
elif(lockeyes and eyes.shape[0] <= 1):
morse.append(".")
if(lockeyes == True):
cv2.putText(img = frame, text = "i can see your dark soul from these eyes and it's delicious",org = (0,50), fontFace = cv2.FONT_HERSHEY_DUPLEX, fontScale = 1,color = (0, 0, 0))
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
print(''.join(map(str, morse)))
video_capture.release()
cv2.destroyAllWindows()
|
# sudo CFLAGS=-stdlib=libc++ python3 maml.py
import argparse
import random
import pandas as pd
import pickle
import numpy as np
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
import learn2learn as l2l
import torchtext
from torchtext.datasets import text_classification
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
class Net(nn.Module):
def __init__(self, roberta, finetune=False):
super(Net, self).__init__()
self.roberta = roberta.model
if not finetune:
for param in self.roberta.parameters():
param.requires_grad = False
self.fc1 = nn.Linear(1024, 512)
self.bn1 = nn.BatchNorm1d(512)
self.fc2 = nn.Linear(512, 2)
def forward(self, tokens_list):
sentence_embeddings = []
for tokens in tokens_list:
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
if tokens.size(-1) > self.roberta.max_positions():
raise ValueError('tokens exceeds maximum length: {} > {}'.format(
tokens.size(-1), self.roberta.max_positions()
))
x, extra = self.roberta(
tokens,
features_only=True,
return_all_hiddens=True,
)
inner_states = extra['inner_states']
pooling_layer = inner_states[-2].transpose(0, 1)
sentence_embeddings.append(pooling_layer.mean(1).view(1024))
x = torch.stack(sentence_embeddings)
x = F.relu(self.bn1(self.fc1(x)))
x = self.fc2(x)
return x
def accuracy(predictions, targets):
predictions = predictions.argmax(dim=1)
acc = (predictions == targets).sum().float()
acc /= len(targets)
return acc.item()
class MAMLDataset(Dataset):
def __init__(self, dataset_name):
self.dataset = None
self.dataset_name = dataset_name
if dataset_name == 'SST':
self.dataset = torchtext.datasets.SST("./269_datasets/SST/train.txt", torchtext.data.Field(sequential=False), torchtext.data.Field(sequential=False))
elif dataset_name == 'toxic_comment':
self.dataset = pd.read_csv("./269_datasets/jigsaw-toxic-comment-classification-challenge/train.csv")
elif dataset_name == '4054689':
comments = pd.read_csv("./269_datasets/4054689/attack_annotated_comments.tsv", sep='\t')
annotations = pd.read_csv("./269_datasets/4054689/attack_annotations.tsv", sep='\t')
self.dataset = comments.merge(annotations, how='inner', on='rev_id')
elif dataset_name == 'detecting-insults-in-social-commentary':
self.dataset = pd.read_csv("./269_datasets/detecting-insults-in-social-commentary/train.csv")
elif dataset_name == 'GermEval-2018-Data-master':
self.dataset = None
elif dataset_name == 'hate-speech-and-offensive-language':
self.dataset = pd.read_csv("./269_datasets/hate-speech-and-offensive-language/labeled_data.csv")
elif dataset_name == 'hate-speech-dataset-master':
annotations_metadata = pd.read_csv("./269_datasets/hate-speech-dataset-master/annotations_metadata.csv")
ids = []
comments = []
for index, row in annotations_metadata.iterrows():
with open("./269_datasets/hate-speech-dataset-master/all_files/" + row['file_id'] + ".txt") as f:
ids.append(row['file_id'])
comments.append(f.read().strip())
comments_data = pd.DataFrame.from_dict({'file_id' : ids, 'comment' : comments})
self.dataset = annotations_metadata.merge(comments_data, how='inner', on='file_id')
elif dataset_name == 'IWG_hatespeech_public-master':
self.dataset = pd.read_csv('./269_datasets/IWG_hatespeech_public-master/german hatespeech refugees.csv')
elif dataset_name == 'quora-insincere-questions-classification':
self.dataset = pd.read_csv('./269_datasets/quora-insincere-questions-classification/train.csv')
elif dataset_name == 'twitter-sentiment-analysis-hatred-speech':
self.dataset = pd.read_csv('./269_datasets/twitter-sentiment-analysis-hatred-speech/train.csv')
if dataset_name != 'SST':
print(self.dataset.iloc[0])
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
if self.dataset_name == 'SST':
tokens = getattr(self.dataset[idx], 'text')
labels = int(getattr(self.dataset[idx], 'label') == 'negative')
elif self.dataset_name == 'toxic_comment':
tokens = self.dataset.iloc[idx]['comment_text']
labels = self.dataset.iloc[idx]['identity_hate']
elif self.dataset_name == '4054689':
tokens = self.dataset.iloc[idx]['comment']
labels = self.dataset.iloc[idx]['attack']
elif self.dataset_name == 'detecting-insults-in-social-commentary':
tokens = self.dataset.iloc[idx]['Comment']
labels = self.dataset.iloc[idx]['Insult']
elif self.dataset_name == 'GermEval-2018-Data-master':
tokens = None
labels = None
elif self.dataset_name == 'hate-speech-and-offensive-language':
tokens = self.dataset.iloc[idx]['tweet']
labels = int(self.dataset.iloc[idx]['class'] == 0)
elif self.dataset_name == 'hate-speech-dataset-master':
tokens = self.dataset.iloc[idx]['comment']
labels = int(self.dataset.iloc[idx]['label'] == 'hate')
elif self.dataset_name == 'IWG_hatespeech_public-master':
tokens = self.dataset.iloc[idx]['Tweet']
labels =int(self.dataset.iloc[idx]['HatespeechOrNot (Expert 1)'] == 'YES')
elif self.dataset_name == 'quora-insincere-questions-classification':
tokens = self.dataset.iloc[idx]['question_text']
labels = self.dataset.iloc[idx]['target']
elif self.dataset_name == 'twitter-sentiment-analysis-hatred-speech':
tokens = self.dataset.iloc[idx]['tweet']
labels = self.dataset.iloc[idx]['label']
return (tokens, labels)
def maml(lr=0.005, maml_lr=0.01, iterations=5, ways=2, shots=5, tps=5, fas=5, device=torch.device("cpu")):
roberta = torch.hub.load('pytorch/fairseq', 'roberta.large')
datasets = ['SST', 'toxic_comment', '4054689', 'detecting-insults-in-social-commentary', \
'hate-speech-and-offensive-language', 'hate-speech-dataset-master', \
'quora-insincere-questions-classification']
# datasets = ['SST', 'twitter-sentiment-analysis-hatred-speech']
# tps = 2
train_tasks_collection = []
for idx in range(len(datasets)):
print('\n\n### Dataset: ' + datasets[idx] + '###\n\n')
train = l2l.data.MetaDataset(MAMLDataset(datasets[idx]))
train_tasks = l2l.data.TaskDataset(train,
task_transforms=[
l2l.data.transforms.NWays(train, ways),
l2l.data.transforms.KShots(train, 2 * shots),
l2l.data.transforms.LoadData(train),
l2l.data.transforms.RemapLabels(train),
l2l.data.transforms.ConsecutiveLabels(train),
],
num_tasks=50)
train_tasks_collection.append(train_tasks)
model = Net(roberta)
meta_model = l2l.algorithms.MAML(model, lr=maml_lr)
opt = optim.Adam(meta_model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(opt, iterations, eta_min=0, last_epoch=-1)
loss_func = nn.CrossEntropyLoss()
for iteration in range(iterations):
iteration_error = 0.0
iteration_acc = 0.0
train_tasks_sampled = random.sample(train_tasks_collection, tps)
for tps_i in range(tps):
iteration_errors = torch.zeros(tps * fas)
error_weights = torch.rand(tps * fas, requires_grad=True)
learner = meta_model.clone()
learner.to(device)
train_task = train_tasks_sampled[tps_i].sample()
data, labels = train_task
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(len(data), dtype=bool)
adaptation_indices[np.arange(shots*ways) * 2] = True
evaluation_indices = ~adaptation_indices
adaptation_indices = adaptation_indices
data = np.array(data)
labels = np.array(labels)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
torch_adaptation_data = torch.zeros((len(adaptation_data), roberta.model.max_positions()), dtype=torch.long)
for i, elem in enumerate(adaptation_data):
encoding = roberta.encode(elem)[:roberta.model.max_positions()]
torch_adaptation_data[i, :len(encoding)] = encoding
adaptation_data = torch_adaptation_data.to(device)
torch_adaptation_labels = torch.LongTensor(adaptation_labels)
adaptation_labels = torch_adaptation_labels.to(device)
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
torch_evaluation_data = torch.zeros((len(evaluation_data), roberta.model.max_positions()), dtype=torch.long)
for i, elem in enumerate(evaluation_data):
encoding = roberta.encode(elem)[:roberta.model.max_positions()]
torch_evaluation_data[i, :len(encoding)] = encoding
evaluation_data = torch_evaluation_data.to(device)
torch_evaluation_labels = torch.LongTensor(evaluation_labels)
evaluation_labels = torch_evaluation_labels.to(device)
# Fast Adaptation
for step in range(fas):
train_error = loss_func(learner(adaptation_data), adaptation_labels)
learner.adapt(train_error, allow_unused=True, allow_nograd=True)
# Compute validation loss
# MAML MSL
predictions = learner(evaluation_data)
valid_error = loss_func(predictions, evaluation_labels)
valid_error /= len(evaluation_data)
valid_accuracy = accuracy(predictions, evaluation_labels)
iteration_errors[fas * tps_i + step] = valid_error
iteration_acc += valid_accuracy
iteration_error += torch.dot(error_weights, iteration_errors)
del adaptation_data
del adaptation_labels
del evaluation_data
del evaluation_labels
del learner
iteration_error /= tps
iteration_acc /= (tps * fas)
print('Loss : {:.3f} Acc : {:.3f}'.format(iteration_error.item(), iteration_acc))
# Take the meta-learning step
opt.zero_grad()
iteration_error.backward()
opt.step()
scheduler.step()
error_weights.data = error_weights.data - lr * error_weights.grad.data
torch.save(model.state_dict(), './models/maml.pt')
def pretrain(lr=0.005, iterations=5, shots=5, fas=5, device=torch.device("cpu")):
roberta = torch.hub.load('pytorch/fairseq', 'roberta.large')
pretrain_data = MAMLDataset('hate-speech-dataset-master')
pretrain_dataset = DataLoader(pretrain_data, batch_size=2*2*shots, shuffle=True)
iter_pretrain_dataset = iter(pretrain_dataset)
model = Net(roberta)
model.to(device)
opt = optim.Adam(model.parameters(), lr=lr)
loss_func = nn.CrossEntropyLoss()
for iteration in range(iterations):
iteration_error = 0.0
iteration_acc = 0.0
data, labels = None, None
try:
data, labels = next(iter_pretrain_dataset)
except:
pretrain_dataset = DataLoader(pretrain_data, batch_size=2*2*shots, shuffle=True)
iter_pretrain_dataset = iter(pretrain_dataset)
data, labels = next(iter_pretrain_dataset)
torch_data = torch.zeros((len(data), roberta.model.max_positions()), dtype=torch.long)
for i, elem in enumerate(data):
encoding = roberta.encode(elem)[:roberta.model.max_positions()]
torch_data[i, :len(encoding)] = encoding
torch_labels = torch.LongTensor(labels)
data = torch_data.to(device)
labels = torch_labels.to(device)
for step in range(fas):
predictions = model(data)
train_error = loss_func(predictions, labels)
train_acc = accuracy(predictions, labels)
opt.zero_grad()
train_error.backward()
for param in model.parameters():
if param.grad is not None:
param.data = param.data - lr * param.grad.data
iteration_error += train_error / len(data)
iteration_acc += train_acc
iteration_error /= fas
iteration_acc /= fas
print('Loss : {:.3f} Acc : {:.3f}'.format(iteration_error.item(), iteration_acc))
del data
del labels
torch.save(model.state_dict(), './models/pretrain.pt')
del model
def train(lr=0.005, iterations=5, shots=5, device=torch.device("cpu"), filepath=None):
roberta = torch.hub.load('pytorch/fairseq', 'roberta.large')
data = MAMLDataset('twitter-sentiment-analysis-hatred-speech')
# 90-10 train-test split
train_size = 6 * len(data) // 10
test_size = len(data) - train_size
train_data_split, test_data_split = torch.utils.data.random_split(data, [train_size, test_size])
train_dataset = DataLoader(train_data_split, batch_size=2*2*shots, shuffle=True)
# train_dataset = DataLoader(train_data_split, batch_size=128, shuffle=True)
iter_train_dataset = iter(train_dataset)
test_dataset = DataLoader(test_data_split, batch_size=len(test_data_split))
model = Net(roberta)
if filepath is not None:
model.load_state_dict(torch.load(filepath))
model.to(device)
opt = optim.Adam(model.parameters(), lr=lr)
loss_func = nn.CrossEntropyLoss()
train_accs = []
test_accs = []
train_losses = []
test_losses = []
for iteration in range(iterations):
train_data, train_labels = None, None
try:
train_data, train_labels = next(iter(train_dataset))
except:
train_dataset = DataLoader(train_data_split, batch_size=2*2*shots, shuffle=True)
# train_dataset = DataLoader(train_data_split, batch_size=128, shuffle=True)
iter_train_dataset = iter(train_dataset)
train_data, train_labels = next(iter(train_dataset))
torch_train_data = torch.zeros((len(train_data), roberta.model.max_positions()), dtype=torch.long)
for i, elem in enumerate(train_data):
encoding = roberta.encode(elem)[:roberta.model.max_positions()]
torch_train_data[i, :len(encoding)] = encoding
torch_train_labels = torch.LongTensor(train_labels)
train_data = torch_train_data.to(device)
train_labels = torch_train_labels.to(device)
train_predictions = model(train_data)
train_error = loss_func(train_predictions, train_labels)
train_acc = accuracy(train_predictions, train_labels)
opt.zero_grad()
train_error.backward()
for param in model.parameters():
if param.grad is not None:
param.data = param.data - lr * param.grad.data
# train_error /= len(train_data)
print('Train Loss : {:.3f} Train Acc : {:.3f}'.format(train_error.item(), train_acc))
train_losses.append(train_error)
train_accs.append(train_acc)
del train_data
del train_labels
test_data, test_labels = next(iter(test_dataset))
torch_test_data = torch.zeros((len(test_data), roberta.model.max_positions()), dtype=torch.long)
for i, elem in enumerate(test_data):
encoding = roberta.encode(elem)[:roberta.model.max_positions()]
torch_test_data[i, :len(encoding)] = encoding
torch_test_labels = torch.LongTensor(test_labels)
test_data = torch_test_data.to(device)
test_labels = torch_test_labels.to(device)
test_predictions = model(test_data)
test_error = loss_func(test_predictions, test_labels)
test_acc = accuracy(test_predictions, test_labels)
test_losses.append(test_error)
test_accs.append(test_acc)
# test_error /= len(test_data)
print('Test Loss : {:.3f} Test Acc : {:.3f}'.format(test_error.item(), test_acc))
del test_data
del test_labels
suffix = ''
if filepath == './models/maml.pt':
suffix = 'maml'
elif filepath == './models/pretrain.pt':
suffix = 'pretrain'
with open('./models/train_losses_' + suffix + '.pkl', 'wb') as f:
pickle.dump(train_losses, f)
with open('./models/train_accs_' + suffix + '.pkl', 'wb') as f:
pickle.dump(train_accs, f)
with open('./models/test_losses_' + suffix + '.pkl', 'wb') as f:
pickle.dump(test_losses, f)
with open('./models/test_accs_' + suffix + '.pkl', 'wb') as f:
pickle.dump(test_accs, f)
del model
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Learn2Learn SST Example')
parser.add_argument('--ways', type=int, default=2, metavar='N',
help='number of ways (default: 2)')
parser.add_argument('--shots', type=int, default=5, metavar='N',
help='number of shots (default: 5)')
parser.add_argument('-tps', '--tasks-per-step', type=int, default=5, metavar='N',
help='tasks per step (default: 5)')
parser.add_argument('-fas', '--fast-adaption-steps', type=int, default=5, metavar='N',
help='steps per fast adaption (default: 5)')
parser.add_argument('--iterations', type=int, default=5, metavar='N',
help='number of iterations (default: 5)')
parser.add_argument('--lr', type=float, default=0.005, metavar='LR',
help='learning rate (default: 0.005)')
parser.add_argument('--maml-lr', type=float, default=0.01, metavar='LR',
help='learning rate for MAML (default: 0.01)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda" if use_cuda else "cpu")
# print("Training MAML")
# maml(lr=args.lr,
# maml_lr=args.maml_lr,
# iterations=args.iterations,
# ways=args.ways,
# shots=args.shots,
# tps=args.tasks_per_step,
# fas=args.fast_adaption_steps,
# device=device)
# print("Training pretrain")
# pretrain(lr=args.lr,
# iterations=args.iterations,
# shots=args.shots,
# fas=args.fast_adaption_steps,
# device=device)
print("Training from MAML")
train(lr=args.lr,
iterations=args.iterations,
shots=args.shots,
device=device,
filepath='./models/maml.pt')
print("Training from pretrain")
train(lr=args.lr,
iterations=args.iterations,
shots=args.shots,
device=device,
filepath='./models/pretrain.pt')
print("Training from scratch")
train(lr=args.lr,
iterations=args.iterations,
shots=args.shots,
device=device,
filepath=None)
|
# -*- coding: utf-8 -*-
"""Tests for utmp files."""
import unittest
from dtformats import utmp
from tests import test_lib
class LinuxLibc6UtmpFileTest(test_lib.BaseTestCase):
"""Linux libc6 utmp file tests."""
# pylint: disable=protected-access
def testDebugPrintEntry(self):
"""Tests the _DebugPrintEntry function."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.LinuxLibc6UtmpFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('linux_libc6_utmp_entry')
entry = data_type_map.CreateStructureValues(
ip_address=test_file._EMPTY_IP_ADDRESS,
exit_status=5,
hostname=b'host',
microseconds=8,
pid=2,
session=6,
terminal=b'vty',
terminal_identifier=3,
termination_status=4,
timestamp=7,
type=1,
unknown1=b'unknown',
username=b'user')
test_file._DebugPrintEntry(entry)
def testDecodeString(self):
"""Tests the _DecodeString function."""
test_file = utmp.LinuxLibc6UtmpFile()
string = test_file._DecodeString(b'test\x00')
self.assertEqual(string, 'test')
def testReadEntries(self):
"""Tests the _ReadEntries function."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.LinuxLibc6UtmpFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['utmp-linux_libc6'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
test_file._ReadEntries(file_object)
def testReadFileObject(self):
"""Tests the ReadFileObject."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.LinuxLibc6UtmpFile(debug=True, output_writer=output_writer)
test_file_path = self._GetTestFilePath(['utmp-linux_libc6'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
class MacOSXUtmpxFileTest(test_lib.BaseTestCase):
"""Mac OS X 10.5 utmpx file tests."""
# pylint: disable=protected-access
def testDebugPrintEntry(self):
"""Tests the _DebugPrintEntry function."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.MacOSXUtmpxFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('macosx_utmpx_entry')
entry = data_type_map.CreateStructureValues(
hostname=b'host',
microseconds=1,
pid=2,
terminal=b'vty',
terminal_identifier=3,
timestamp=4,
type=5,
unknown1=6,
unknown2=b'unknown',
username=b'user')
test_file._DebugPrintEntry(entry)
def testDecodeString(self):
"""Tests the _DecodeString function."""
test_file = utmp.MacOSXUtmpxFile()
string = test_file._DecodeString(b'test\x00')
self.assertEqual(string, 'test')
def testReadEntries(self):
"""Tests the _ReadEntries function."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.MacOSXUtmpxFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['utmpx-macosx10.5'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
test_file._ReadEntries(file_object)
def testReadFileObject(self):
"""Tests the ReadFileObject."""
output_writer = test_lib.TestOutputWriter()
test_file = utmp.MacOSXUtmpxFile(debug=True, output_writer=output_writer)
test_file_path = self._GetTestFilePath(['utmpx-macosx10.5'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.1.7 on 2019-04-20 06:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('amazon', '0007_auto_20190420_1209'),
]
operations = [
migrations.RenameField(
model_name='women_shops_clothes',
old_name='price',
new_name='Price',
),
]
|
import sys
import os
f = open("C:/Users/user/Documents/python/ant/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
MAX_N = 100000
N = int(input())
S = list(map(int,input().split()))
T = list(map(int,input().split()))
itv = [0] * N
for i in range(N):
itv[i] = (T[i],S[i])
itv.sort()
ans = 0
t = 0
for i in range(N):
if t < itv[i][1]:
ans += 1
t = itv[i][0]
print(ans)
|
#################################################
#Introduction to Python Programming Constructs #
#Csci 1913 #
#Author(s): #
#Date: #
# #
#################################################
import math
#Step 3
def largest(a, b, c, d):
big=a
list=[a,b,c,d]
for x in list:
if x>big:
big=x
return big
#Test cases for step 3
print(largest(1, 2, 3, 4))#expected output: 4
#Step 4
def largest2(a, b, c, d):
big=0
list=[a,b,c,d]
for x in list:
if type(x) is int or type(x) is float:
if x>big:
big=x
return big
#Step 4 test cases
print(largest2("a", "b", 4.5, 3))#expected output:5
#Step 5
def largest3(*values):
big=0
list=values
for x in list:
if type(x) is int:
if x>big:
big=x
return big
#Step 5 test cases
print(largest3(1, 80, 3, 4))#expected output:5
#Step 6
def largest4(*values):
big=0
list=values
for x in list:
if type(x) is int:
while x>big:
big=x
return big
#Step 6 test cases
print(largest4(1, 2, 3, 4))#expected output:5
#Step 7
def insert(item, t):
listtemp=[]
temp=item
inserted=0
for x in range(0,len(t)):
if temp>t[x]:
listtemp.append(t[x])
elif temp<t[x] and inserted == 0:
listtemp.append(temp)
listtemp.append(t[x])
inserted = 1
else:
listtemp.append(t[x])
bob=tuple(listtemp)
return bob
#Step 7 test cases
print(insert(5, (1, 2, 3, 10, 12))) # produces (1, 2, 3, 5, 10, 12)
print(insert(5, (1,))) # produces (1, 5)
print(insert(5, (1, 2, 3, 10, 12))) # produces (1, 2, 3, 5, 10, 12)
#Step 8
def make_largest(f, g):
def applylargest(x):
if f(x)>g(x):
return f(x)
else:
return g(x)
return lambda x: applylargest(x)
#Step 8 test cases
maxsincos = make_largest(math.sin, math.cos)
print(maxsincos(1)) # expected output: FILL
print(maxsincos(0)) # expected output: FILL
def square(x):
return x**2
maxrootsquare = make_largest(math.sqrt, square)
print(maxrootsquare(5))
|
import tensorflow as tf
def lstm(input_feature=1000, output_feature = 46):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(input_feature, 80),
tf.keras.layers.LSTM(80),
tf.keras.layers.Dense(output_feature, activation='sigmoid')
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
for x in range(1, 11):
for y in range(1, 11):
if y <= x: print(x*y, end = ' ')
print("\r") |
import os
import sys
sys.path.append('..')
sys.path.append('../..')
import argparse
import utils
from student_utils import *
"""
======================================================================
Complete the following function.
======================================================================
"""
import numpy as np
import networkx as nx
import student_utils as s_utils
import utils
#------------------
def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):
"""
Write your algorithm here.
Input:
list_of_locations: A list of locations such that node i of the graph corresponds to name at index i of the list
list_of_homes: A list of homes
starting_car_location: The name of the starting location for the car
adjacency_matrix: The adjacency matrix from the input file
Output:
A list of locations representing the car path
A dictionary mapping drop-off location to a list of homes of TAs that got off at that particular location
NOTE: both outputs should be in terms of indices not the names of the locations themselves
"""
# Make networkx graph G
#G = make_graph(adjacency_matrix)
G, message = s_utils.adjacency_matrix_to_graph(adjacency_matrix)
#print("G:", G.edges())
# Get average distance in between locations
adj = np.array(adjacency_matrix)
adj[adj == "x"] = 0
adj = adj.astype(float)
#print(adj[0][0])
avg_dist = np.mean(adj)
# Get indices of homes
home_indices = [list_of_locations.index(i) for i in list_of_homes]
location_indices = range(0, len(list_of_locations))
starting_index = list_of_locations.index(starting_car_location)
#print("start index:", starting_index)
# Get adjacency dictionary of distances for each location
adjacencies = make_dictionary(adjacency_matrix, location_indices)
#print("adjacencies:", adjacencies)
# Get nodes
nodes = make_nodes(adjacencies, location_indices, home_indices, starting_index, avg_dist)
node_roots = list(nodes.keys())
#print("node_roots:", node_roots)
# Create graph of just nodes
node_paths, node_G = shortest_paths(G, list_of_locations, node_roots)
#print("node_G edges:", node_G.edges())
#print("edge (5,6)", node_G.get_edge_data(5, 6,default=0) )
#print("edge (5,8)", node_G.get_edge_data(5, 8,default=0) )
#print(node_G.nodes())
# TSP on node_G
#path = tsp_solver(node_G, starting_index)
tsp_path = christofides(node_G, starting_index)
#print("node_path:", tsp_path)
# Output path for driver IN LOCATIONS
output_path = make_path(list_of_locations, node_paths, tsp_path, starting_index)
#print("output path:", output_path)
#print("tsp_path:", tsp_path, "\n path:", output_path, "\n homes:", home_indices, "\n nodes:", nodes)
# Drop off points and TAs dropped
#dropoff_mapping = dicitonary of {dropoff_loc: [list of TAs dropped off], ...}
dropoff_mapping = dropoffs(output_path, nodes, tsp_path, home_indices, list_of_locations)
# Create output file
#print("output:", output_path, dropoff_mapping)
return output_path, dropoff_mapping
# Helpers --------------------------------
# Returns adjacency dictionary of distances for each location {location: [distances to every location], ...}
def make_dictionary(adjacency_matrix, location_indices):
#Create dictionary for every location
adjacencies = {}
#Create dictionary of adjacencent locations for every location
# adjacencies = {"loc" : [distance to every other loc], ...}
# If distance == "x" -> None
for i in location_indices:
adj = [None if j == "x" else j for j in adjacency_matrix[i]]
adjacencies[i] = adj
return adjacencies
# Returns dictionary of {node_home: [homes belonging to node], ...}
def make_nodes(adjacencies, locations, home_indices, starting_index, avg_dist):
#limit = 12000
limit = avg_dist #maximum distance away from node's base (average of all distances in adjacency)
nodes = {} #create a node around every location
#Create nodes for every home
for loc in locations:
if loc in home_indices:
nodes[loc] = list()
nodes[loc].append(loc) #Start every home's node with itself
else:
nodes[loc] = list()
for index in home_indices:
distance = adjacencies[loc][index]
if (distance != None) and (distance < limit):
#append other home that is within limit to the node starting at that home
#current = nodes[loc]
nodes[loc].append(index) #returns None
#Clean up node dictionary to only contain largest nodes ------------
deleted_nodes = nodes.copy()
homes_represented = home_indices
nodes_to_keep = list()
#for node in node.keys():
while homes_represented:
v = list(deleted_nodes.values())
k = list(deleted_nodes.keys())
biggest_node = k[v.index(max(v, key=len))]
#remove homes that are already included in list
#print(len(homes_represented))
homes_represented = [x for x in homes_represented if x not in nodes[biggest_node]]
deleted_nodes.pop(biggest_node, None)
for home in nodes[biggest_node]:
deleted_nodes.pop(home, None)
#print("deleted_nodes:", deleted_nodes)
nodes_to_keep.append(biggest_node)
#print(nodes_to_keep)
if starting_index not in nodes_to_keep:
nodes_to_keep.append(starting_index)
return {key: nodes[key] for key in nodes_to_keep}
#returns dictionary of shortest paths between nodes {(node_1, node_2): [list of path], ...}
# for outputting driver path
#returns new graph of just nodes and their associated distances
# for TSP solving with dwave
def shortest_paths(G, list_of_locations, node_roots):
node_paths = {}
#node_distances = {}
#Make new graph of nodes
node_G = nx.Graph()
for node in node_roots:
node_G.add_node(node)
#Get shortest path between every node and the distance of that path
for node_s in node_roots:
node_paths[node_s] = list()
for node_t in node_roots:
#index_s = list_of_locations.index(node_s)
#index_t = list_of_locations.index(node_t)
if (node_s != node_t):
path = nx.shortest_path(G, source = node_s, target = node_t, weight = "weight")
node_paths[(node_s, node_t)] = path
path_weight = nx.shortest_path_length(G, source= node_s, target= node_t, weight= "weight")
node_G.add_edge(node_s, node_t, weight= path_weight)
return node_paths, node_G
# Christofides TSP Solver
def christofides(G, starting_node):
optimal_path = list()
MST = nx.minimum_spanning_tree(G, weight='weight') # generates MST of graph G, using Prim's algo
odd_vertices = [] #list containing vertices with odd degree
for i in MST.nodes():
if MST.degree(i)%2 != 0:
odd_vertices.append(i) #if the degree of the vertex is odd, then append it to odd_vertices list
minimumWeightedMatching(MST, G, odd_vertices) #adds minimum weight matching edges to MST
# now MST has the Eulerian circuit
start = starting_node
visited = {node: False for node in MST.nodes()}
# finds the hamiltonian circuit (skips repeated vertices)
curr = start
visited[curr] = True
optimal_path.append(curr)
next = None
for nd in MST.neighbors(curr):
if visited[nd] == False or nd == start:
next = nd
break
if next == None:
return [start]
while next != start:
visited[next]=True
optimal_path.append(next)
# finding the shortest Eulerian path from MST
curr = next
for nd in MST.neighbors(curr):
if visited[nd] == False:
next = nd
break
if next == curr:
for nd in G.neighbors(curr):
if visited[nd] == False:
next = nd
break
if next == curr:
next = start
optimal_path.append(next)
return optimal_path
#utility function that adds minimum weight matching edges to MST
def minimumWeightedMatching(MST, G, odd_vertices):
while odd_vertices:
y = odd_vertices.pop()
weight = float("inf")
x = 1
closest = 0
for x in odd_vertices:
if G[y][x]['weight'] < weight :
weight = G[y][x]['weight']
closest = x
MST.add_edge(y, closest, weight = weight)
odd_vertices.remove(closest)
# Create output path for driver
def make_path(list_of_locations, node_paths, tsp_path, starting_index):
output_path = list()
p_prev = starting_index
output_path.append(starting_index)
#print("node paths:", node_paths)
for p_curr in tsp_path[1:]:
if node_paths[(p_prev, p_curr)][1]:
path = node_paths[(p_prev, p_curr)][1:]
output_path.extend(path)
else:
path = node_paths[(p_curr, p_prev)][::-1]
path = path[1:]
output_path.extend(path)
p_prev = p_curr
return output_path
# Create dictionary for drivers and paths
"""
dropped_TAs = Create dictionary of home_indices for whether or not they've been dropped off
dropoff_mapping = Create dictionary of every location on output_path and accompanying list for TAs dropped there
Iterate through output_path:
If path stop matches home, drop off TA
Mark in dropped_TAs
Add TA to dropoff_mapping for that location (in terms of INDEX)
Iterate through tsp_path:
For every node, drop off associated TAs if they haven't been dropped off yet
Mark in dropped_TAs
Add TA to dropoff_mapping for that node (in terms of INDEX)
Sort dropoff_mapping keys in order they are reached in output_path
return dropoff_mapping
"""
def dropoffs(output_path, nodes, tsp_path, home_indices, list_of_locations):
dropped_TAs = {}
dropoff_mapping = {}
for home in home_indices:
dropped_TAs[home] = False
for loc in output_path:
if loc in home_indices:
if not dropped_TAs[loc]:
dropped_TAs[loc] = True
if not loc in dropoff_mapping.keys():
dropoff_mapping[loc] = list()
dropoff_mapping[loc].append(loc)
for loc in output_path:
if loc in tsp_path:
for ta in nodes[loc]:
if not dropped_TAs[ta]:
dropped_TAs[ta] = True
if not loc in dropoff_mapping.keys():
dropoff_mapping[loc] = list()
dropoff_mapping[loc].append(ta)
for ta in list(dropped_TAs.keys()):
if not dropped_TAs[ta]:
raise ValueError("This TA hasn't been dropped off: ", ta)
#sort dropoffs?
return dropoff_mapping
"""
======================================================================
No need to change any code below this line
======================================================================
"""
"""
Convert solution with path and dropoff_mapping in terms of indices
and write solution output in terms of names to path_to_file + file_number + '.out'
"""
def convertToFile(path, dropoff_mapping, path_to_file, list_locs):
string = ''
for node in path:
string += list_locs[node] + ' '
string = string.strip()
string += '\n'
dropoffNumber = len(dropoff_mapping.keys())
string += str(dropoffNumber) + '\n'
for dropoff in dropoff_mapping.keys():
strDrop = list_locs[dropoff] + ' '
for node in dropoff_mapping[dropoff]:
strDrop += list_locs[node] + ' '
strDrop = strDrop.strip()
strDrop += '\n'
string += strDrop
utils.write_to_file(path_to_file, string)
def solve_from_file(input_file, output_directory, params=[]):
print('Processing', input_file)
input_data = utils.read_file(input_file)
num_of_locations, num_houses, list_locations, list_houses, starting_car_location, adjacency_matrix = data_parser(input_data)
car_path, drop_offs = solve(list_locations, list_houses, starting_car_location, adjacency_matrix, params=params)
basename, filename = os.path.split(input_file)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
output_file = utils.input_to_output(input_file, output_directory)
convertToFile(car_path, drop_offs, output_file, list_locations)
def solve_all(input_directory, output_directory, params=[]):
input_files = utils.get_files_with_extension(input_directory, 'in')
for input_file in input_files:
#ignore comments - only for running specific batches
#if (input_file[8] == "_"):
#------------
#if (int(input_file[7:9]) % 5 == 0):
#print(input_file)
#------------
solve_from_file(input_file, output_directory, params=params)
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Parsing arguments')
parser.add_argument('--all', action='store_true', help='If specified, the solver is run on all files in the input directory. Else, it is run on just the given input file')
parser.add_argument('input', type=str, help='The path to the input file or directory')
parser.add_argument('output_directory', type=str, nargs='?', default='.', help='The path to the directory where the output should be written')
parser.add_argument('params', nargs=argparse.REMAINDER, help='Extra arguments passed in')
args = parser.parse_args()
output_directory = args.output_directory
if args.all:
input_directory = args.input
solve_all(input_directory, output_directory, params=args.params)
else:
input_file = args.input
solve_from_file(input_file, output_directory, params=args.params)
|
from image_diet.test_diet import DietTest
from image_diet.test_commands import DietCommandTest
|
tabela = ('Atlético-PR', 'Atlético-GO', 'Atlético-MG', 'Bahia', 'Botafogo',
'Ceará SC', 'Corinthinas', 'Coritiba', 'Flamengo', 'Fluminense',
'Fortaleza', 'Goiás', 'Grêmio', 'Internacional', 'Palmeiras',
'Bragantino-SP', 'Santos', 'Sport Recife', 'São Paulo', 'Vasco da Gama')
print('-=' * 15)
print(f'Lista de times do Brasileirão: {tabela}')
print('-=' * 15)
print(f'Os 5 primerios são {tabela[0:5]}')
print('-=' * 15)
print(f'Os 4 últimos são {tabela[16:20]}')
print('-=' * 15)
time = 'sAnToS'.capitalize()
pos = tabela.index(time) + 1
print(f'O {time} está na {pos}ª posição')
|
# -*- mode: python -*-
import os
import shutil
for root, dirs, _ in os.walk("."):
for d in dirs:
if d == '__pycache__' and os.path.isdir(os.path.join(root, d)):
shutil.rmtree(os.path.join(root, d))
block_cipher = None
a = Analysis(
['ACExplorer.py'],
pathex=['D:\\Programs\\AC-Explorer'],
binaries=[],
datas=[
('./resources', './resources'),
('./pyUbiForge', './pyUbiForge'),
('./plugins', './plugins'),
('./icon.ico', '.')
],
hiddenimports=['PIL.Image', 'PIL.ImageDraw'],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False
)
pyz = PYZ(
a.pure,
a.zipped_data,
cipher=block_cipher
)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='ACExplorer',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
icon='icon.ico'
)
coll = COLLECT(
exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='ACExplorer'
)
|
import json
import responses
from changes import packaging, vcs
from . import context, setup, teardown
def test_commit_version_change():
vcs.commit_version_change(context)
def test_tag_and_push():
vcs.tag_and_push(context)
@responses.activate
def test_github_release():
responses.add(
responses.POST,
'https://api.github.com/repos/michaeljoseph/test_app/releases',
body=json.dumps(dict(
id='release-id',
upload_url='http://upload.url.com/'
)),
status=201,
content_type='application/json'
)
upload_url = vcs.create_github_release(context, 'gh-token', 'Description')
assert upload_url == 'http://upload.url.com/'
@responses.activate
def test_upload_release_distributions():
context.dry_run = False
distributions = packaging.build_distributions(context)
context.dry_run = True
for distribution in distributions:
responses.add(
responses.POST,
'http://upload.url.com/',
status=201,
content_type='application/json'
)
vcs.upload_release_distributions(
context,
'gh-token',
distributions,
'http://upload.url.com/',
)
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class VinciaToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('vincia')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['vincia'].version
values['PFX'] = spec['vincia'].prefix
fname = 'vincia.xml'
contents = str("""
<tool name="vincia" version="$VER">
<lib name="vincia"/>
<lib name="VinciaMG4"/>
<lib name="VinciaMG5"/>
<client>
<environment name="VINCIA_BASE" default="$PFX"/>
<environment name="LIBDIR" default="$$VINCIA_BASE/lib"/>
<environment name="INCLUDE" default="$$VINCIA_BASE/include"/>
</client>
<runtime name="VINCIADATA" value="$$VINCIA_BASE/share/Vincia/xmldoc"/>
<use name="root_cxxdefaults"/>
<use name="pythia8"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
|
from django.db import models
from taggit.managers import TaggableManager
# Create your models here.
# Blog Post Model
class Blogpost(models.Model):
title = models.CharField(max_length=250)
tags = TaggableManager()
image = models.ImageField(upload_to='post/', blank=True, null=True)
description = models.TextField(max_length=5000)
time = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(unique=True)
read = models.IntegerField(default=0)
# Comment Model
class Comment(models.Model):
post = models.ForeignKey(Blogpost, on_delete= models.CASCADE, related_name='comments')
name = models.CharField(max_length=100)
body = models.TextField(max_length=1000)
email = models.CharField(max_length=150)
creation = models.DateTimeField(auto_now_add=True)
approve = models.BooleanField(default=False)
|
from dataclasses import dataclass, astuple
from copy import deepcopy
@dataclass
class Pos:
y: int
x: int
def __hash__(self):
return hash(astuple(self))
def __add__(self, other):
return Pos(y=self.y + other.y, x=self.x + other.x)
def __lt__(self, other):
return astuple(self) < astuple(other)
def neighbours(self, min_p, max_p):
for x in range(-1, 2):
for y in range(-1, 2):
p = Pos(y=y, x=x) + self
if p.x > max_p.x or p.y > max_p.y or p.x < min_p.x or p.y < min_p.y:
continue
if p != self:
yield p
def print_map(forest):
t = sorted(forest.items(), key=lambda x: x[0])
l = None
for p, c in t:
if l is None or l.y != p.y:
print('\n', end='')
l = p
print(c, end='')
print('\n', end='')
def solution1(data, rounds):
forest = {}
min_p = None
max_p = None
for y, x, c in data:
p = Pos(y=y, x=x)
forest[p] = c
if min_p is None:
min_p = p
if max_p is None:
max_p = p
min_p = min(min_p, p)
max_p = max(max_p, p)
for i in range(rounds):
new_forest = deepcopy(forest)
for pos, c in forest.items():
adj = []
for n in pos.neighbours(min_p, max_p):
adj.append(forest[n])
if c == '.' and adj.count('|') >= 3:
new_forest[pos] = '|'
elif c == '|' and adj.count('#') >= 3:
new_forest[pos] = '#'
elif c == '#':
if adj.count('#') >= 1 and adj.count('|') >= 1:
new_forest[pos] = '#'
else:
new_forest[pos] = '.'
forest = new_forest
print_map(forest)
chars = list(forest.values())
return chars.count('|') * chars.count('#')
def parse_input1(input):
for y, line in enumerate(input.split('\n')):
for x, c in enumerate(line):
yield y, x, c
if __name__ == '__main__':
with open('input.txt') as fh:
print(solution1(parse_input1(fh.read()), 10))
|
import os
import pickle
class ConfigKeyError(Exception):
def __init__(self, this, key):
self._key = key
self._keys = this.keys()
def __str__(self):
return "Key \"{}\" not found. Available Keys : {}".format(self._key, self._keys)
class ConfigDict(dict):
config_directory = '/oop_python/'
def __init__(self, config_name):
ConfigDict._fname = ConfigDict.config_directory+config_name+'.pickle'
if not os.path.isfile(ConfigDict._fname):
with open(self._fname,'wb') as fh:
pickle.dump({'a':1},fh)
with open(ConfigDict._fname,'rb') as fh2:
unpickledlist = pickle.load(fh2)
self.update(unpickledlist)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
with open(ConfigDict._fname, 'wb') as fh:
pickle.dump(self, fh)
def __getitem__(self, key):
try:
return dict.__getitem__(self,key)
except:
raise ConfigKeyError(self,key)
#cd = ConfigDict('/a/b/doesnotexist.txt')
#cc = ConfigDict('config_file.txt')
#print(cc['non_existing_key'])
|
from django.contrib import admin
from .models import Roaster
# Register your models here.
admin.site.register(Roaster) |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pylab as pl
import seaborn as sns
#load page_views and page_edits
df_edits = pd.read_csv('pageedits_agg_subset.txt', sep="\t", header=None,
names=["article","article_id","page_size","num_revisions",
"num_editors","avg_time_bet_edits","avg_edits_user"
,"avg_edits_month","avg_edits_years",
"edits_day","edits_week","edits_month","edits_year"])
df_views = pd.read_csv('part-00000',sep=",", header=None, names=["article","page_views"])
#cleaning
df_views['article'] = df_views['article'].map(lambda x: x.strip("('"))
df_views['page_views'] = df_views['page_views'].map(lambda x: x.strip("u')"))
df_views['page_views'] = df_views['page_views'].map(lambda x: x[3:])
# join page_views and page_ids based on article name
df_edit_views = df_edits.merge(df_views)
#more cleaning
df_edit_views['edits_month'] = df_edit_views['edits_month'].map(lambda x: str(x).replace(',',''))
df_edit_views2 = df_edit_views[(df_edit_views['edits_month'].notnull()) & (df_edit_views['edits_month'] != 'nan')]
df_edit_views3 = df_edit_views2[df_edit_views['page_views'].notnull()]
df_edit_views4 = df_edit_views3[(df_edit_views3['page_views'] > 0) & (df_edit_views3['page_views'] != 'nan')]
df_edit_views4['edits_month'] = df_edit_views4['edits_month'].astype(int)
def remove_strings(x):
try:
return int(x)
except:
return 0
df_edit_views4['page_views'] = df_edit_views4['page_views'].apply(remove_strings)
df_edit_views4 = df_edit_views4[df_edit_views4['page_views'] > 0]
df_edit_views4['page_views'] = df_edit_views4['page_views'].astype(int)
# calculate wr_ratio and sort
df_edit_views4['wr_ratio'] = df_edit_views4['edits_month'].astype(int)/df_edit_views4['page_views'].astype(int)
df_edit_views5 = df_edit_views4.sort_values(["wr_ratio"],ascending=False)
df_edit_views5_unique = df_edit_views5.drop_duplicates(['article_id'])
#plot
num = len(df_edit_views5_unique)
x = np.linspace(1, num, num)
fig = plt.figure(figsize=(15, 10))
plt.xlim([0,10470])
#plt.yscale('log')
#plt.xscale('log')
plt.ylabel('Edit/Read Ratio')
y = df_edit_views5_unique['wr_ratio']
labels = []
for i in df_edit_views5_unique['wr_ratio']:
if i > 0.2:
labels.append(0)
elif i > 0.0001:
labels.append(1)
else:
labels.append(2)
s =[100*i for i in df_edit_views5_unique['wr_ratio']]
#pl.scatter(x,y)
plt.scatter(x,y,s=s)
#plt.scatter(x,y,color = (x,np.array(labels)),s=s)
plt.savefig('edit_reads_log_plot.pdf')
|
DEFAULT_DISCOUNT_PERCENT = 5 # Процент скидки по бонусной карте
BONUS_CARD_NUMBER_LEN = 15
BONUS_CARD_EMBOSSED_LEN = 6
CARD_SEARCH_MAP = {
BONUS_CARD_EMBOSSED_LEN: 'embossed_number',
BONUS_CARD_NUMBER_LEN: 'number',
}
LOGGER_NAME = 'errors'
CEILING = 50 # Рублей - кратность, до которой необходимо округлять
|
import os
import sys
import re
class Worker:
def __init__(self):
self.job = "."
self.timeRemaining = 0
def setup():
global fileHandle, fileData
filename = input("Enter an input file name (default input2.txt): ")
if filename == "":
filename = "input2.txt"
exists = os.path.isfile("./%s" % filename)
notEmpty = os.path.getsize("./%s" % filename) > 0
if exists and notEmpty:
fileHandle = open ("./%s" % filename, "r")
else:
print ("File doesn't exist or is empty.")
exit
fileData = []
for line in fileHandle:
fileData.append(line.rstrip())
fileHandle.close()
def readInstructions():
global nextSteps, dependentSteps, remainingSteps
nextSteps = {} # { X : [steps after step X]}
dependentSteps = {} # { X : [steps X is dependent on]}
remainingSteps = []
for entry in fileData:
match = re.search('Step (\\w) must be finished before step (\\w) can begin.', entry)
pre = match.group(1)
post = match.group(2)
if pre not in nextSteps:
nextSteps.update({ pre : [post] })
else:
nextSteps[pre].append(post)
if post not in dependentSteps:
dependentSteps.update({ post : [pre] })
else:
dependentSteps[post].append(pre)
# Build list of all steps to execute
remainingSteps.append(pre)
remainingSteps.append(post)
remainingSteps = sorted(set(remainingSteps))
def findFirstSteps(list):
global remainingSteps
temp = []
for step in remainingSteps:
if step not in list:
temp.append(step)
temp.sort(reverse=True)
return temp # One or more first steps
def findLastStep(list):
global remainingSteps
for step in remainingSteps:
if step not in list:
return step # Just one last step
def markJobExecuted(worker):
global executedSteps, remainingSteps
remainingSteps.remove(worker.job)
executedSteps.append(worker.job)
def readyToExecute(step):
global dependentSteps, remainingSteps
predecessorsExecuted = True
for predecessor in dependentSteps[step]:
if predecessor in remainingSteps:
predecessorsExecuted = False
return predecessorsExecuted
def reviewBacklog():
global backlog, instructionStack
for step in backlog:
if readyToExecute(step):
backlog.remove(step)
if step not in instructionStack:
instructionStack.append(step)
def pushNextSteps(step):
global lastStep, nextSteps, instructionStack, backlog
if step == lastStep: # Nothing to do for last step
return
# Push onto the stack if ready to execute, otherwise push
# into the backlog to be reviewed next execution cycle.
for next in nextSteps[step]:
if readyToExecute(next):
if next not in instructionStack:
instructionStack.append(next)
if next in backlog:
backlog.remove(next)
elif next not in backlog:
backlog.append(next)
# Stack must remain sorted, unique and filled with executable steps only.
instructionStack.sort(reverse=True)
def printHeader(workerCount):
print ("=========================")
print ("Next", nextSteps)
print ("Dependent", dependentSteps)
print ("Starting Stack", instructionStack)
print ("=========================")
print ("Second ", " ".join(["Elf "+str(x+1) for x in range(workerCount)]), " Done")
def printStats(step):
global instructionStack, backlog, remainingSteps, executedSteps
print ("Stack", instructionStack, "=>", step)
print ("Backlog", backlog)
print ("Remaining", remainingSteps)
print ("Order", executedSteps)
print ("=========================")
def printSecondSummary(timer):
global workerQueue, workerCount, executedSteps
print(" {0} ".format(str(timer).zfill(4)), " ".join([workerQueue[i].job for i in range(workerCount)]), " ", "".join(executedSteps))
def secondsPerStep(step):
return (ord(step) - 64) + 60 # -64 reduces the ASCII value to A=1, B=2, ...
def isWorking(worker):
if worker.job == ".":
return False
else: return True
def assignNextJob(worker):
global instructionStack, workerQueue
index = workerQueue.index(worker)
if not isWorking(worker) and len(instructionStack) > 0:
workerQueue[index].job = instructionStack.pop()
workerQueue[index].timeRemaining = secondsPerStep(workerQueue[index].job)
def assignInitialJobs():
global workerQueue
for worker in workerQueue:
assignNextJob(worker)
setup()
readInstructions()
firstSteps = findFirstSteps(dependentSteps) # First steps won't have predecessors
lastStep = findLastStep(nextSteps) # Last step won't have next steps
instructionStack = firstSteps # Preload the stack with the first steps
backlog = [lastStep] # Preload the backlog with the last step
executedSteps = [] # Output
# remainingSteps = [all steps]
stepCount = len(remainingSteps)
workerCount = 5
workerQueue = []
timer = 0
for i in range(workerCount):
workerQueue.append(Worker())
printHeader(workerCount)
assignInitialJobs() # Assign the first jobs
# Loop-invariant: only executable steps are in the stack. Non-executable are in the backlog.
# Each cycle processes one second.
while len(remainingSteps) > 0:
# First process all steps finished last round.
for i in range(workerCount):
# Process any steps finished last round and reset workers' jobs.
if isWorking(workerQueue[i]) and workerQueue[i].timeRemaining == 0:
markJobExecuted(workerQueue[i])
reviewBacklog()
pushNextSteps(workerQueue[i].job)
workerQueue[i].job = "."
#printStats(workerQueue[i].job)
# Next, give workers jobs starting from Worker 1.
for i in range (workerCount):
assignNextJob(workerQueue[i])
workerQueue[i].timeRemaining -= 1
printSecondSummary(timer)
timer += 1
if len(executedSteps) == stepCount: # 1099
print ("Step Order:", ''.join([step for step in executedSteps]), "// Time: {0} seconds".format(int(timer)-1) )
else:
print ("Error in executing steps.") |
'''
Given two arrays, write a function to compute their intersection.
Example 1:
Input: nums1 = [1,2,2,1], nums2 = [2,2]
Output: [2]
Example 2:
Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
Output: [9,4]
Note:
Each element in the result must be unique.
The result can be in any order.
'''
class Solution:
def intersection(self, nums1, nums2):
intersection=[]
for i in nums1:
if i in nums2 and i not in intersection:
intersection.append(i)
return intersection
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
|
# 递归,写好伴随条件
# 0需要特殊处理一下
# 直接a
class Solution:
def restoreIpAddresses(self, s: str) -> List[str]:
self.res = []
if len(s) > 12:
return self.res
def recur(s:str, cnt:int, temp:str):
# 此次划分不合要求,加快收敛
if len(s) > (4-cnt)*3:
return
# 满足条件,加入结果数组
if s == '':
if cnt == 4:
# 不要最后一个.
self.res.append(temp[:-1])
return
# 当前字符串开头为0,只能将其单独划分为一段
if s[0] == '0':
temp = temp + s[0] + '.'
recur(s[1:], cnt+1, temp)
else:
if len(s) > 0:
# 选一个
recur(s[1:], cnt+1, temp + s[0] + '.')
if len(s) > 1:
# 选两个
recur(s[2:], cnt+1, temp + s[0:2] + '.')
if len(s) > 2 and int(s[0:3]) <= 255:
# 选三个
recur(s[3:], cnt+1, temp + s[0:3] + '.')
recur(s, 0, "")
return self.res
|
#!/usr/bin/env python
# coding: utf-8
# # Assignment 4 Day 4
# Q.1 Find all occurences of substring in given string and print index.
#
# Input String: " what we think we become; we are python programmer"
# In[10]:
mainStr = 'what we think we become; we are python programmer'
substring = 'we'
print('substring is:',substring)
count = mainStr.count(substring)
print("'we' sub string frequency / occurrence count : " , count)
index = mainStr.find(substring)
print('first index of substring is',index)
------------------------------------------------------------------------------------------------------
# Q.2. Check diffrent strings with following functions:
#
# - islower()
#
# - isupper()
#
#
# In[11]:
st1='goodMorning'
# In[12]:
st1
# In[13]:
st1.islower()
# In[14]:
st1.isupper()
# In[15]:
st1='goodmorning'
# In[16]:
st1.islower()
# In[17]:
st1.isupper()
# In[18]:
st1='goodmorning2'
# In[19]:
st1.islower()
# In[20]:
st1.isupper()
# In[21]:
st1='gOOodmorning2'
# In[22]:
st1.islower()
# In[23]:
st1.isupper()
# In[24]:
st1='25682G##'
# In[25]:
st1.islower()
# In[26]:
st1.isupper()
# In[27]:
st1='GOODMORNING'
# In[28]:
st1.islower()
# In[29]:
st1.isupper()
# In[ ]:
|
import exceptions
import os
import time
import datetime
from wnodes.utils import utils
class MessageStoreError(exceptions.Exception):
pass
class MessageFormat(object):
header = 'APEL-cloud-message: v0.1'
def __init__(self, records_list = []):
self.records_list = records_list[:]
def add_record(self, record):
"""Adds a Record"""
self.records_list.append(record)
def get_information(self):
"""Gets message information"""
message = MessageFormat.header
for record in self.records_list:
message += record
return message
def store_in_file(self, location):
if not os.path.isdir(location):
msg = ('The %s location of the file does not exist.'
% location)
raise MessageStoreError(msg)
else:
suffix = utils.guid()
try:
sf = open(os.path.join(location, 'message_%s.txt' % suffix), 'w')
records = self.get_information()
sf.write(records)
sf.close()
except IOError, err:
raise MessageStoreError(err)
|
import numpy as np
import pandas as pd
import datetime
import data_examples
import feature_engineering
import pipeline
if __name__ == '__main__':
print(pipeline.predict_cao(
data_examples.CHARGE_CHEMISTRY,
data_examples.LIMESTONE_CONSUMPTIONS,
data_examples.CHARGE_CONSUMPTIONS,
data_examples.COKE_CONSUMPTIONS,
data_examples.COKE_SIEVING,
data_examples.LIMESTONE_CAO))
|
import requests
import re
from config import *
from requests.packages import urllib3
urllib3.disable_warnings()
def get_status_response(ip_addr):
url = 'https://' + ip_addr + ':2004/web/dynamic.php'
data = dict(ref='/header', autostart=0, target='refreshAlarm', r=0)
try:
response = requests.get(url, params=data, verify=False)
response.raise_for_status()
#print(response.text)
return response.text
except requests.exceptions.RequestException as e:
return "Error: " + str(e)
def format_status(text):
result = re.search('xml.*?([\u4e00-\u9fa5]+).*?dynamic_results', text, re.S)
old = re.search('index.php.*?(\w+).*?script', text, re.S)
if result:
return result.group(1)
if old:
return old.group(1)
else:
return None
def main():
print('Getting status')
for ip_addr in IP_LISTS:
response = get_status_response(ip_addr)
if format_status(response) == '系统正常':
print(ip_addr + ':' + format_status(response))
elif format_status(response) == '系统异常':
print(ip_addr + ':' + format_status(response))
elif format_status(response) == 'requirelogin':
print(ip_addr + ':' + 'Web Version Too Low')
else:
print(ip_addr + ':' + response)
if __name__ == '__main__':
main()
|
# Generated by Django 2.2.7 on 2019-11-23 23:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('info', '0007_auto_20191123_1902'),
]
operations = [
migrations.AlterField(
model_name='compensation',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='compensation', to='info.Category'),
),
]
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Callable, Mapping
from pants.base.parse_context import ParseContext
from pants.util.frozendict import FrozenDict
ContextAwareObjectFactory = Callable[[ParseContext], Callable[..., None]]
@dataclass(frozen=True)
class BuildFileAliases:
"""A structure containing sets of symbols to be exposed in BUILD files.
There are three types of symbols that can be directly exposed:
:API: public
- targets: These are Target subclasses or TargetMacro.Factory instances.
- objects: These are any python object, from constants to types.
- context_aware_object_factories: These are object factories that are passed a ParseContext and
produce one or more objects that use data from the context to enable some feature or utility;
you might call them a BUILD file "macro" since they expand parameters to some final, "real"
BUILD file object. Common uses include creating objects that must be aware of the current
BUILD file path or functions that need to be able to create targets or objects from within the
BUILD file parse.
"""
_objects: FrozenDict[str, Any]
_context_aware_object_factories: FrozenDict[str, ContextAwareObjectFactory]
@classmethod
def _validate_alias(cls, category: str, alias: str, obj: Any) -> None:
if not isinstance(alias, str):
raise TypeError(
"Aliases must be strings, given {category} entry {alias!r} of type {typ} as "
"the alias of {obj}".format(
category=category, alias=alias, typ=type(alias).__name__, obj=obj
)
)
@classmethod
def _validate_objects(cls, objects: dict[str, Any] | None) -> FrozenDict[str, Any]:
if not objects:
return FrozenDict()
for alias, obj in objects.items():
cls._validate_alias("objects", alias, obj)
return FrozenDict(objects)
@classmethod
def _validate_context_aware_object_factories(
cls, context_aware_object_factories: dict[str, ContextAwareObjectFactory] | None
) -> FrozenDict[str, ContextAwareObjectFactory]:
if not context_aware_object_factories:
return FrozenDict()
for alias, obj in context_aware_object_factories.items():
cls._validate_alias("context_aware_object_factories", alias, obj)
if not callable(obj):
raise TypeError(
"The given context aware object factory {alias!r} must be a callable.".format(
alias=alias
)
)
return FrozenDict(context_aware_object_factories)
def __init__(
self,
objects: dict[str, Any] | None = None,
context_aware_object_factories: dict[str, ContextAwareObjectFactory] | None = None,
) -> None:
"""
:API: public
"""
object.__setattr__(self, "_objects", self._validate_objects(objects))
object.__setattr__(
self,
"_context_aware_object_factories",
self._validate_context_aware_object_factories(context_aware_object_factories),
)
@property
def objects(self) -> FrozenDict[str, Any]:
"""
:API: public
"""
return self._objects
@property
def context_aware_object_factories(self) -> FrozenDict[str, ContextAwareObjectFactory]:
"""
:API: public
"""
return self._context_aware_object_factories
def merge(self, other: BuildFileAliases) -> BuildFileAliases:
"""Merges a set of build file aliases and returns a new set of aliases containing both.
Any duplicate aliases from `other` will trump.
:API: public
"""
if not isinstance(other, BuildFileAliases):
raise TypeError(f"Can only merge other BuildFileAliases, given {other}")
def _merge(item1: Mapping[str, Any], item2: Mapping[str, Any]) -> dict[str, Any]:
merged: dict[str, Any] = {}
merged.update(item1)
merged.update(item2)
return merged
objects = _merge(self.objects, other.objects)
context_aware_object_factories = _merge(
self.context_aware_object_factories, other.context_aware_object_factories
)
return BuildFileAliases(
objects=objects,
context_aware_object_factories=context_aware_object_factories,
)
|
from django.test import TestCase
from elections.models import ElectedRole
from elections.utils import ElectionBuilder
from organisations.tests.factories import OrganisationFactory
from .base_tests import BaseElectionCreatorMixIn
class TestElectoralSystems(BaseElectionCreatorMixIn, TestCase):
def test_scotland_local_stv(self):
"""
Scottish local elections have the type of `local` but unlike the
rest of the UK that uses FPTP, it uses STV
"""
# Elections without organisations don't have voting systems
election_id = ElectionBuilder(
"local", "2017-05-04"
).build_election_group()
assert election_id.voting_system is None
# "Normal" UK local election is FPTP
eng_org = OrganisationFactory(territory_code="ENG")
ElectedRole.objects.create(
election_type=self.election_type1,
organisation=eng_org,
elected_title="Councillor",
elected_role_name="Councillor for Foo Town",
)
election_id = (
ElectionBuilder("local", "2017-05-04")
.with_organisation(eng_org)
.build_election_group()
)
assert election_id.voting_system == "FPTP"
scot_org = OrganisationFactory(territory_code="SCT")
ElectedRole.objects.create(
election_type=self.election_type1,
organisation=scot_org,
elected_title="MSP",
elected_role_name="MSP for Foo Town",
)
# Scottish local elections are STV
scot_id = (
ElectionBuilder("local", "2017-05-04")
.with_organisation(scot_org)
.build_organisation_group(None)
)
assert scot_id.voting_system == "STV"
|
from .drivers.driverchrome import DriverChrome
from .drivers.driver import IDriver |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.