text stringlengths 8 6.05M |
|---|
from django.shortcuts import get_object_or_404, render
from django.views.generic import View
from django.http import HttpResponse
from django.utils.html import escape
from django.core.urlresolvers import reverse
from django.views.generic import TemplateView
from crispy_forms.layout import Submit
from crispy_forms.helper import FormHelper
from crispy_forms.utils import render_crispy_form
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.context_processors import csrf
from jsonview.decorators import json_view
from wedding.rsvp_form import rsvpForm
#rsvp form view class
class weddingRsvpView(View):
def get(self, request):
my_data_dictionary = {
"headline": 'RSVP', "rsvp_form": rsvpForm,
}
return render_to_response('wedding/rsvp.html', my_data_dictionary, context_instance=RequestContext(request))
@json_view
def post(self, request):
ctx = {}
ctx.update(csrf(request))
form_html = render_crispy_form(form, context=ctx)
form = rsvpForm(request.POST or None)
if form.is_valid():
# You could actually save through AJAX and return a success code here
form.save()
return render_to_response('wedding/rsvp.html', {'success': True}, context_instance=RequestContext(request))
# RequestContext ensures CSRF token is placed in newly rendered form_html
request_context = RequestContext(request)
form_html = render_crispy_form(form, context=request_context)
return render_to_response('weddingRsvpView.html', {'success': False, 'form_html': form_html}, context_instance=RequestContext(request))
|
# -*- coding: utf-8 -*-
# Copyright 2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains all the logic related to the actual model.
In this example, we use a random forest regressor.
Note the logic: a general class for building several types of models,
and one or more functions to build "standard" configurations. The functions
can be used to define common choices for the hyper-parameters and to make
experiments more scalable / repeatable.
"""
from sklearn.ensemble import RandomForestRegressor
class SimpleModel(RandomForestRegressor):
def __init__(self, random_state=42, n_estimators=10, max_features='auto'):
super(SimpleModel, self).__init__(
n_estimators=10, max_features='auto')
self.name = 'RandomForestRegressor'
def build_simple_model():
regressor = SimpleModel()
return regressor
|
import json
from pyspark import SparkContext, SparkConf
# from pyspark.sql import SparkSession
# from pyspark.sql import functions as f
import sys
import time
import multiprocessing
start_time = time.time()
# Run Configurations
input_path1 = sys.argv[1]
input_path2 = sys.argv[2]
output_path1 = sys.argv[3]
output_path2 = sys.argv[4]
# Level of Parallelism - Recommended by Spark
# http://spark.apache.org/docs/latest/tuning.html#level-of-parallelism
cpu_num = multiprocessing.cpu_count()
task_per_cpu = cpu_num * 2
# Spark Configurations
conf = SparkConf().setAppName('HW1 - Task 2').setMaster('local[*]')
sc = SparkContext(conf=conf)
# sc = SparkSession\
# .builder \
# .appName("HW1 - Task 2") \
# .getOrCreate()
# Data Input
distFileR = sc.textFile(input_path1).coalesce(task_per_cpu)
rdd_r = distFileR.map(json.loads)
distFileB = sc.textFile(input_path2).coalesce(task_per_cpu)
rdd_b = distFileB.map(json.loads)
# df_r = sc.read.json(input_path1)
# df_b = sc.read.json(input_path2)
# A. What are the average stars for each state? (DO NOT use the stars information in the business file) (2.5 point)
bid_state = rdd_b.map(lambda s: (s["business_id"], s["state"]))
bid_stars = rdd_r.map(lambda s: (s["business_id"], s["stars"]))
state_stars = bid_state.join(bid_stars)
ss_avg_rdd = state_stars\
.map(lambda s: (s[1][0], (s[1][1], 1)))\
.reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1]))\
.map(lambda s: (s[0], s[1][0] / s[1][1]))\
.sortBy(lambda s: (-s[1], s[0]))
# ss_avg_list = ss_avg_rdd.takeOrdered(20, key=lambda s: (-s[1], s[0]))
# state_bid = df_b.select(df_b["state"], df_b["business_id"])
# bid_stars = df_r.select(df_r["business_id"], df_r["stars"])
# state_stars = state_bid.join(bid_stars,
# state_bid["business_id"] == bid_stars["business_id"],
# how="left")
# ss_avg_df = state_stars.groupBy(state_stars["state"])\
# .agg(f.avg(state_stars["stars"]).alias("stars"))
# B. You are required to use two ways to print top 5 states with highest stars. You need to compare the time difference
# between two methods and explain the result within 1 or 2 sentences. (3 point)
# Method1: Collect all the data, and then print the first 5 states
start_time1 = time.time()
m1_list = ss_avg_rdd.collect()
print(m1_list[:5])
# m1_list = ss_list_df.sort(f.col("stars").desc(), ss_avg_df["state"]).collect()
# print(m1_list[:5])
duration1 = time.time() - start_time1
# Method2: Take the first 5 states, and then print all
start_time2 = time.time()
m2_list = ss_avg_rdd.take(5)
print(m2_list)
# m2_list = ss_list_df.sort(f.col("stars").desc(), ss_avg_df["state"]).take(5)
# print(m2_list)
duration2 = time.time() - start_time2
# Output Data 1
with open(output_path1, 'w') as op1:
op1.write("state,stars\n")
for line in m1_list:
op1.write(str(line[0]) + "," + str(line[1]) + "\n")
# Output Data 2
answer = dict()
answer["m1"] = duration1
answer["m2"] = duration2
answer["explanation"] = "Method 2 is faster than Method 1, since it only gets the first 5 entities" \
" verses getting the entire column in Method 1 which takes more time."
with open(output_path2, 'w') as op2:
json.dump(answer, op2)
duration = time.time() - start_time
print("Total time - Task 2: ", duration)
|
from webservice.ticketing.jira.jira import *
from webservice.ticketing.jira.issue import * |
"""
This solution was created by Dan, in collaboration with Connor, Taylor,
Eunkyu, Chris, Aurora, and Brandon. Though not as efficient as
Brandon's algorithm, I wanted to show other ways to approach this Ulam
spiral problem and some other Python tricks in practice. I didn't time
any of this so while some of the algorithms herein are slower than
Brandon's, some of them might not be.
"""
# We require three modules to import -- two should look familiar.
# The third, numpy.ma is an offshoot of numpy good for creating and
# manipulating "masked arrays" that have some values masked. This is
# ofthen used in arrays with bad pixels or data values in science.
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
def ulam_spiral(max_):
"""Create an Ulam spiral plot consisting of primes up to a maximum
value.
Arguments:
max_ -- The maximum number utilized in the Ulam spiral.
Outputs:
primes -- A numpy array containing all prime numbers up to "max_"
spiralArr -- A masked numpy array containing the spiral grid
plotted in the image.
"""
# This function creates an Ulam spiral, which is a spiral made up of
# prime numbers. It is not an independent function, as it makes use
# of a function defined later in the code, namely spiral_grid(). More
# on that later...
# First we have to build our array of prime numbers up the maximum
# value given by the "max_" argument. For our algorithm to work,
# we cannot put 1 in the array until the end, so we instead
# initialize it with a 2:
primes = np.array([2])
# We now want to build up this array with prime numbers. We will
# start with 3, the next available number, and work our way up,
# skipping all even numbers because we know they are not primes.
# To work our way up, we will use a for loop; please note that
# for us to consider the number "max_", we have to end at max_+1:
for num in range(3,max_+1,2):
# To check if the number is prime, we have to compare it to all
# prime numbers below the square root of the number in question:
root = np.floor(np.sqrt(num))
primeArr = num % primes[primes<=root]
# primeArr now contains the number in question modulo all primes
# lower than its root. In order for it to be prime, it must have
# no zero values, i.e., all "True", checked by the .all() method:
if primeArr.all():
primes = np.append(primes, num)
# Now that the loop has finished, we have all of the primes, except
# one, which you recall we couldn't add (because any number modulo
# one is zero). So we'll now append 1 to the front of the array:
primes = np.append(1, primes)
# Armed with all the primes, we must create a spiral grid from which
# we can create the Ulam spiral. To do this, we will once again need
# the integer square root of the max_ argument, as it will be the
# length of the spiral box needed for the grid:
sideLength = np.floor(np.sqrt(max_))
# We now have to create the grid. This is done using the
# spiral_grid() function. If you haven't yet, you should jump to that
# function's code below, and then come back.
# Ok, we will create a 2 x 2 spiral grid of length "sideLength" and
# then flatten it (to a 1-D array) using the .flatten() method:
spiralArr1d = spiral_grid(sideLength).flatten()
# This next step has two things going on, so let me walk you through.
# The numpy module has a function called in1d(), which will take
# two 1-D arrays and return a comparison of which entries in the
# first array are also found in the second. As such, the output
# is a boolean array the size of the first array with True values
# where they matched and False values where they didn't. We want to
# compare the spiral 1-D array with the primes. However, we want the
# primes themselves, so we use the "invert" keyword argument to put
# a False value where the primes are, and a True where they aren't.
# We then will use numpy.ma to mask all the values where we have
# "True" values from the previous comparison, i.e., the non-primes.
# Once that is done, we use the .reshape() method to return it to a
# spiral grid. If you print this out, you will get a numerical
# representation of the Ulam spiral:
spiralArr1d[np.in1d(spiralArr1d,primes, invert=True)] = ma.masked
spiralArr = spiralArr1d.reshape(sideLength, sideLength)
# Ok, now we just have to make the plot. Instead of figuring out an
# x axis like Brandon does so he can use plt.plot(), I will use
# matplotlib's image plotting routine. The downside is it will
# color code the points and weigh them to the outer points, since
# they have higher values (I'm not reducing primes to ones).
# So to plot an image, you use plt.imshow(). Don't worry too much
# about these parameters, but if you're curious, it's documented on
# the matplotlib website:
plt.imshow(spiralArr, cmap='gray', interpolation='nearest')
plt.savefig('ulam_spiral_dan.pdf', dpi=300)
# And we're done! Once you close the plot, the function returns both
# the array of prime numbers, and the spiral grid used in the plot.
return primes, spiralArr
def spiral_grid(box_length):
"""Return a spiral grid of the inputted length. This box length
can be either odd or even.
Arguments:
box_length -- the length of each side of the grid.
Outputs:
grid -- the square spiral grid.
"""
# Part of why I put this function here is to illustrate that it
# doesn't matter the order of the functions you create in a
# module, even if the above ones reference the below ones. It is
# good practice to separate independent functions from dependent
# ones, and to put the dependent ones on bottom...oops.
# First, I test for the simplest case, and reject anyone annoying
# enough to try to use this code to make a 1 x 1 spiral grid:
if box_length == 1 or box_length == 1.0:
raise ValueError('SPIRAL_GRID: Cannot make spiral grid of length 1.')
# Now I create the skeleton of the grid, an array of zeros
# of the correct length to be filled in later:
grid = np.zeros([box_length, box_length])
# Now I find which index in the grid will hold the value 1:
oneIndex = int(box_length - 1) // 2
grid[oneIndex, oneIndex] = 1
# This algorithm is a bit dense to read on its own, so let me walk
# you through it in words and then let you parse it with what it's
# doing in mind. I'm first creating an array with all the numbers
# from 1 to the max number in the grid which will fill into the grid
# itself (e.g., in a 5 x 5 grid, a 25 integer array). I then have to
# keep track of 3 things as I fill in the spiral grid: the length of
# the side I'm filling in (Length), the position in the number array
# of the values I'm about to fill in (startIndex), and my current
# position I'm located in the spiral grid (position). I fill in the
# values in up to down and then left to right when the length is odd,
# and fill them in down to up and then right to left when the length
# is even (checked by Length % 2). I continue this until Length is
# equal to box length. At that point, I fill in the final piece of
# the spiral, which depends on whether we have an even or odd grid.
# Ok, let's dive in:
# Some housekeeping initializations:
num_array = np.arange(box_length*box_length) + 1
Length = 1
startIndex = 0
position = [oneIndex, oneIndex]
# Using a while loop...could be a for loop but this seemed easier:
while Length < box_length:
# If we are filling in odd length:
if Length % 2 is 1:
grid[(position[0]+1):(position[0]+Length+1),(position[1])] = \
num_array[startIndex+1:startIndex+Length+1]
grid[(position[0]+Length),(position[1]+1):position[1]+Length+1] = \
num_array[startIndex+Length+1:startIndex+Length+Length+1]
position = [position[0]+Length, position[1]+Length]
startIndex = int(startIndex + (Length*2))
# If we are filling in even length, do this instead. You'll see
# in here the usage of np.flipud(), which flips an array in the
# "up-down" direction. This is because when I fill in numbers
# from down to up or right to left, I'm filling in backwards, so
# I have to flip the array of numbers to be filled in:
elif Length % 2 is 0:
grid[(position[0]-Length):position[0], position[1]] = \
np.flipud(num_array[startIndex+1:startIndex+Length+1])
grid[position[0]-Length,position[1]-Length:position[1]] = \
np.flipud(num_array[startIndex+Length+1:startIndex+ \
(Length*2)+1])
position = [position[0]-Length, position[1]-Length]
startIndex = int(startIndex + (Length*2))
Length += 1
# You'll notice I used backslashes to continue long lines rather than
# using parenthetical continuations. This is because the code already
# had a lot of parentheses and I didn't want to compound the density
# of parentheses. Remember, readability > convention. I also have a
# tendency to line up many of my nearby equal signs. This is not
# convention but I find it to be much more readable--it's not a
# requirement for you to do, but I like it a lot.
# Finish up the last section of grid, depending on even or odd grid:
if box_length % 2 is 1:
grid[1:box_length,0] = num_array[int((box_length-1)*-1):]
elif box_length % 2 is 0:
grid[0:box_length-1,-1] = np.flipud(num_array[int((box_length-1)*-1):])
# Done!
return grid
|
# Дана последовательность целых чисел a[0..n-1] и натуральное число k, такое что для любых i, j: если j >= i + k, то a[i] <= a[j]. Требуется отсортировать последовательность. Последовательность может быть очень длинной. Время работы O(n * log(k)). Доп. память O(k).
# Использовать слияние.
# Sample Input:
# 20
# 3 4 1 2 0 9 7 8 6 5 11 12 13 10 14 18 19 16 17 15
# 10
# Sample Output:
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
import sys
def mergeSort(alist):
# print("Splitting ",alist)
if len(alist)>1:
mid = len(alist)//2
lefthalf = alist[:mid]
righthalf = alist[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
i=0
j=0
k=0
while i<len(lefthalf) and j<len(righthalf):
if lefthalf[i]<righthalf[j]:
alist[k]=lefthalf[i]
i=i+1
else:
alist[k]=righthalf[j]
j=j+1
k=k+1
while i<len(lefthalf):
alist[k]=lefthalf[i]
i=i+1
k=k+1
while j<len(righthalf):
alist[k]=righthalf[j]
j=j+1
k=k+1
# print("Merging ",alist)
input = sys.stdin
n = int(input.readline())
arr = list(map(int, input.readline().split()))
m = int(input.readline())
mergeSort(arr)
for i in range(len(arr)):
print(arr[i], end=" ")
|
#! /usr/bin/env python
import os
import tornado.ioloop
import tornado.options
from tornado.options import define, options
import tornado.web
from src.api.controller.BaseStaticFileHandler import BaseStaticFileHandler
from src.api.controller.ServerListController import ServerListController
from src.api.controller.InfoController import InfoController
from src.api.controller.CommandsController import CommandsController
from src.api.controller.InfoListController import InfoListController
from src.api.controller.StatusController import StatusController
from src.api.controller.SettingsController import SettingsController
from src.api.controller.SlowlogController import SlowlogController
from daemonized import daemonized
class RedisLive(daemonized):
def run_daemon(self):
define("port", default=8888, help="run on the given port", type=int)
define("debug", default=0, help="debug mode", type=int)
tornado.options.parse_command_line()
# print os.path.abspath('.')
# Bootup
handlers = [
(r"/api/servers", ServerListController),
(r"/api/info", InfoController),
(r"/api/status", StatusController),
(r"/api/infolist", InfoListController),
(r"/api/commands", CommandsController),
(r"/api/settings", SettingsController),
(r"/api/slowlog", SlowlogController),
(r"/(.*)", BaseStaticFileHandler, {"path": os.path.abspath('.') + '/webroot'})
]
server_settings = {'debug': options.debug}#log level?
application = tornado.web.Application(handlers, **server_settings)
application.listen(options.port)
print "start at:0.0.0.0:%d http://127.0.0.1:8888/index.html"%(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
live = RedisLive()
live.start()
|
#!/usr/bin/python
import numpy as np
import pylab as py
import os,sys
from COMMON import mpc, light, grav, msun, yr, nanosec, week, hub0, h0, omm, omv
import COMMON as CM
import mpmath
from scipy import integrate
#from time import time
#################################################
#INPUT PARAMETERS:
inc=0. #Inclination.
phi0=0. #Phase at the coalescence.
mch=10**(10.3) #Chirp mass.
tobs=15.*yr #Interval of time that the binary is observed.
fmin=1e-8 #Frequency at which a binary is observed for the first time.
zvec=np.array([0.2, 2.7, 5.])
colorvec=np.array(['black', 'blue', 'red'])
#linestylevec=np.array(['--', ':', '-.'])
linestylevec=np.array(['-', '-', '-'])
#Plotting parameters:
hfactor=1e-13
minh=-2.
maxh=1.5
#Other parameters:
outputdir='../plots/'
oplot='waveforms.pdf'
minreds=1e-2 #Minimum redshift considered in the calculation of luminosity distances.
maxreds=1e2 #Maximum redshift.
zbin=1000 #Number of redshift bins.
tbin=1000 #Number of time bins.
#################################################
#Creating a vector of redshift and a vector of luminosity distance.
reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbin) #Vector of redshifts logarithmically spaced.
lumdistvec=np.zeros(len(reds)) #This will be D_L(z), the luminosity distance, in Mpc.
dist_const=light/(hub0*h0)/mpc #A constant that multiplies distances.
for zi in xrange(len(reds)):
lumdistvec[zi]=(1.+reds[zi])*integrate.quad(lambda z: (omm*(1.+z)**3.+omv)**(-0.5),0,reds[zi])[0]*dist_const
print lumdistvec
exit()
#################################################
#Choose plotting options that look optimal for the paper.
fig_width = 3.4039
goldenmean=(np.sqrt(5.)-1.0)/2.0
fig_height = fig_width * goldenmean
sizepoints=8
legendsizepoints=6.
py.rcParams.update({
'backend': 'ps',
'ps.usedistiller': 'xpdf',
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'axes.titlesize': sizepoints,
'axes.labelsize': sizepoints,
'text.fontsize': sizepoints,
'xtick.labelsize': sizepoints,
'ytick.labelsize': sizepoints,
'legend.fontsize': legendsizepoints
})
left, right, top, bottom, cb_fraction=0.15, 0.94, 0.96, 0.16, 0.145 #Borders of the plot.
#Create a plot.
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
py.ion()
absminh=1e10
absmaxh=0
for ii in xrange(len(zvec)):
redsi=zvec[ii]
colori=colorvec[ii]
linestyli=linestylevec[ii]
#redsi=0.1 #Redshift of the binary (I will try different ones and make it a vector).
lumdisti=lumdistvec[abs(reds-redsi).argmin()] #Luminosity distance corresponding to the chosen redshift of the binary.
tvec=np.linspace(0., tobs, tbin) #Vector of time.
tvec=tvec[tvec<CM.tcfun(mch, redsi, fmin)]
hvec=CM.hplus(mch, redsi, lumdisti, CM.tcfun(mch, redsi, fmin), tvec, inc, phi0)
hamp=CM.htime(mch, redsi, lumdisti, CM.tcfun(mch, redsi, fmin), tvec)
ax.plot(tvec/yr, hvec/hfactor, color=colori, linestyle=linestyli, label='$z=%.1f$' %redsi)
ax.plot(tvec/yr, hamp/hfactor, color=colori, linestyle=':')
absminh=min(absminh, min(hvec))
absmaxh=max(absmaxh, max(hvec))
#ax.grid()
#ax.plot(snrvec, np.ones(len(snrvec))*dpt, '--', color='black')
#ax.vlines(min(snrtvec), 0., 1., linestyle='', color='black')
#ax.vlines(max(snrtvec), 0., 1., '--', color='black')
#ax.fill_betweenx(np.ones(len(snrvec)), min(snrtvec), max(snrtvec), color='grey')
ax.set_xlabel('$\\mathrm{Time / yr}$')
ax.set_ylabel('$h_+/10^{%i}$' %np.log10(hfactor))
#ax.set_xscale('log')
#ax.set_yscale('log')
ax.set_xlim(0., tobs/yr)
#ax.set_xticks([ 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3])
#ax.set_xticklabels(["$-2$", "$ -1$", "$0$", "$1$", "$2$", "$3$"])
#ax.set_ylim(min(hvec/hfactor), max(hvec/hfactor))
#ax.set_ylim(absminh/hfactor, absmaxh/hfactor)
ax.set_ylim(minh, maxh)
ax.legend(loc='lower left')
#ax.set_yticks([1e-16, 1e-15, 1e-14, 1e-13])
#ax.set_yticklabels(["$-16$", "$-15$", "$-14$", "$-13$"])
#ax.text(10,1e-14,'$h^\\textrm{LSO}$',fontsize=9)
#ax.legend(loc='lower right',handlelength=3.5)
fig.savefig(outputdir+oplot, transparent=True)
|
from rest_framework import serializers
from api.models import Company, CompanyStuff
from authentication.serializers import UserSerializer
class CompanySerializer(serializers.ModelSerializer):
class Meta:
model = Company
fields = ('id', 'name')
class CompanyStuffSerializer(serializers.ModelSerializer):
stuff = UserSerializer(many=False)
class Meta:
model = CompanyStuff
fields = ('company', 'stuff', 'role')
|
from nio import MatrixRoom
from dors import command_hook, Jenny, HookMessage
import requests
@command_hook(['bible'])
async def bible(bot: Jenny, room: MatrixRoom, event: HookMessage):
verse = event.args[0] + '%20' + event.args[1]
# TODO: USE ASYNC!
i = requests.get('https://labs.bible.org/api/?passage=' + verse + '&type=json').json()
res = ''
for v in i:
res += " {0}: {1}".format(v['verse'], v['text'])
resp = "{0}".format(res)
if len(resp) > 370:
resp = resp[:370] + '... >> https://labs.bible.org/api/?passage=' + verse
await bot.message(room.room_id, resp[1:])
@command_hook(['quran', 'Quran'])
async def quran(bot: Jenny, room: MatrixRoom, event: HookMessage):
verse = event.args[0]
i = requests.get('https://api.alquran.cloud/ayah/' + verse + '/en.asad').json()
if i['code'] == 200:
resp = "{0}".format(i['data']['text'])
else:
resp = "{0}".format(i['data'])
if len(resp) > 370:
resp = resp[:370] + '...'
await bot.message(room.room_id, resp)
|
import numpy as np
def maximization(X,gamma, chi):
# Update model parameters: pi, A, B
K = np.shape(gamma[0])[0]
N = len(gamma)
T = np.shape(gamma[0])[1]
D = np.shape(X[0][0])[0]
# -------------- pi update ---------------
pi = np.zeros((1, K))
for n in range(N):
pi = pi + gamma[n][:,0]
pi = pi/N
# -------------- A update ---------------
num = 0
for n in range(N):
for t in range(1,T):
num += chi[n,t,:,:]
den = 0
for n in range(N):
for t in range(1,T):
for k in range(K):
den+= chi[n,t,:,k]
A = num/den
A = A / A.sum(axis=0)
# -------------- B update ---------------
B = np.zeros((K, D))
for k in range(K):
den = 0
for n in range(N):
prod = np.sum(X[n][0]*np.tile(gamma[n][k,:,np.newaxis],D).T, axis=1)
B[k,:] = B[k,:] + prod
den += np.sum(gamma[n][k,:])
B[k,:]=B[k, :]/den
#print(gamma[n].shape)
#print(np.transpose(X[0][n]).shape)
#prod = np.dot(gamma[n],np.transpose(X[0][n]))
#B = B + prod
# B = B / B.sum(axis = 0)
pi_update = pi
A_update = A
B_update = B
return [pi_update, A_update, B_update] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# from django.shortcuts import get_object_or_404, render
# from django.http import HttpResponseRedirect
# from django.urls import reverse
# from django.views import generic
# from django.utils import timezone
# class IndexView(generic.ListView):
# template_name = 'health_tracker/index.html'
# context_object_name = 'health tracker overview'
from django.http import HttpResponse
def index(request):
return HttpResponse("This is the health tracker index.")
|
import sys
REC = {}
def happy(n,path=[]):
if n in REC:
return REC[n]
elif n%10 == 1:
REC[n] = 1
return 1
elif n in path:
REC[n] = 0
return 0
else:
REC[n] = happy(sum(i*i for i in map(int,str(n))),path+[n])
return REC[n]
with open(sys.argv[1],'r') as f:
for line in f:
print happy(int(line.strip()))
|
from django.test import TestCase, RequestFactory
from list_app.models import Entry, List
from django.utils.html import escape
from list_app.forms import EntryForm, ListForm, EMPTY_ENTRY_ERROR, EMPTY_LIST_ERROR
from django.contrib.auth.models import User
from django.http import HttpRequest
from list_app.views import new_list
class HomePageTest(TestCase):
def test_home_page_renders_home_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'home.html')
def test_home_page_uses_list_form(self):
response = self.client.get('/')
self.assertIsInstance(response.context['form'], ListForm)
class ListViewTestLoggedIn(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='user1', email='user1@mockmyid.com', password='pass123')
self.client.login(username='user1', password='pass123')
def post_invalid_input_list(self):
return self.client.post('/lists/new', data={'title': '' })
def post_invalid_input(self):
list_ = List.objects.create(title='Pierwsza Lista', user=self.user)
return self.client.post('/lists/%d/%s/' % (list_.id, list_.slug), data={'artist': '', 'album': ''})
def test_displays_entry_form(self):
list_ = List.objects.create(title='Pierwsza Lista', user=self.user)
response = self.client.get('/lists/%d/%s/' % (list_.id, list_.slug))
self.assertIsInstance(response.context['form'], EntryForm)
self.assertContains(response, 'name="artist')
self.assertContains(response, 'name="album')
def test_uses_list_template(self):
list_ = List.objects.create(title='Pierwsza Lista', user=self.user)
response = self.client.get('/lists/%d/%s/' % (list_.id, list_.slug))
self.assertTemplateUsed(response, 'list.html')
def test_displays_only_entries_for_that_list(self):
correct_list = List.objects.create(title='Pierwsza Lista', user=self.user)
Entry.objects.create(artist='Artist 1', album='Album 1', list=correct_list)
Entry.objects.create(artist='Artist 2', list=correct_list)
other_list = List.objects.create(title='Druga Lista', user=self.user)
Entry.objects.create(artist='Artist 3', album='Album 3', list=other_list)
Entry.objects.create(artist='Artist 4', album='Album 4', list=other_list)
response = self.client.get('/lists/%d/%s/' % (correct_list.id,correct_list.slug))
self.assertContains(response, 'Artist 1')
self.assertContains(response, 'Album 1')
self.assertContains(response, 'Artist 2')
self.assertNotContains(response, 'Artist 3')
self.assertNotContains(response, 'Artist 4')
def test_passes_correct_list_to_template(self):
correct_list = List.objects.create(title='Pierwsza Lista', user=self.user)
response = self.client.get('/lists/%d/%s/' % (correct_list.id, correct_list.slug))
self.assertEqual(response.context['list'], correct_list)
def test_for_invalid_input_nothing_saved_to_db(self):
self.post_invalid_input()
self.assertEqual(Entry.objects.count(), 0)
def test_form_invalid_input_list_nothing_saved_to_db(self):
self.post_invalid_input_list()
self.assertEqual(List.objects.count(), 0)
def test_for_invalid_input_readers_list_template(self):
response = self.post_invalid_input()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'list.html')
def test_for_invalid_input_passes_form_to_template(self):
response = self.post_invalid_input()
self.assertIsInstance(response.context['form'], EntryForm)
def test_for_invalid_input_shows_error_on_page(self):
response = self.post_invalid_input()
self.assertContains(response, escape(EMPTY_ENTRY_ERROR))
def test_logged_in_user_cant_open_other_user_list(self):
user2 = User.objects.create_user('user2', 'user2@mockmyid.com', 'pass123')
list_ = List.objects.create(title='Druga Lista', user=user2)
response = self.client.get('/lists/%d/%s/' % (list_.id, list_.slug))
self.assertEqual(response.status_code, 403)
class ListViewTestLoggedOut(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='user1', email='user1@mockmyid.com', password='pass123')
def test_logged_out_user_cant_open_list_view_page(self):
list_ = List.objects.create(title='Pierwsza Lista', user=self.user)
response = self.client.get('/lists/%d/%s/' % (list_.id, list_.slug))
self.assertRedirects(response, '/accounts/login/?next=/lists/%d/%s/' % (list_.id, list_.slug))
class NewListTestLoggedIn(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user('user1', 'user1@mockmyid.com', 'pass123')
self.client.login(username='user1', password='pass123')
def post_invalid_input(self):
return self.client.post('/lists/new', data={'artist': '', 'album': ''})
def post_invalid_input_list(self):
return self.client.post('/lists/new', data={'title': '' })
def test_list_can_have_user(self):
list_ = List.objects.create(title="Pierwsza Lista", user = self.user)
self.assertEqual(list_.user, self.user)
def test_saving_POST_request_for_logged_in_user(self):
response = self.client.post('/lists/new', data={'title': 'Pierwsza Lista', })
self.assertEqual(Entry.objects.count(), 0)
self.assertEqual(List.objects.count(), 1)
def test_redirect_after_POST_for_logged_in_user(self):
response = self.client.post('/lists/new', data={'title': 'Pierwsza Lista', })
new_list = List.objects.first()
self.assertRedirects(response, '/lists/%d/%s/' % (new_list.id, new_list.slug, ))
def test_for_invalid_input_renders_home_template(self):
response = self.post_invalid_input_list()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
def test_validation_error_are_shown_on_home_page(self):
response = self.post_invalid_input_list()
self.assertContains(response, escape(EMPTY_LIST_ERROR))
def test_for_invalid_input_passes_form_to_template(self):
response = self.post_invalid_input_list()
self.assertIsInstance(response.context['form'], ListForm)
def test_invalid_list_entries_not_saved(self):
self.post_invalid_input_list()
self.assertEqual(List.objects.count(), 0)
self.assertEqual(Entry.objects.count(), 0)
def test_can_save_a_POST_request_to_an_existing_list(self):
correct_list = List.objects.create(title='Pierwsza Lista', user=self.user)
self.client.post('/lists/%d/%s/' % (correct_list.id, correct_list.slug), data={'artist': 'Autechre', 'album': 'Amber'})
self.assertEqual(Entry.objects.count(), 1)
new_entry = Entry.objects.first()
self.assertEqual(new_entry.artist, 'Autechre')
self.assertEqual(new_entry.album, 'Amber')
self.assertEqual(new_entry.lastfm_artist_url, 'http://www.last.fm/music/Autechre')
self.assertEqual(new_entry.rym_artist_url, 'http://rateyourmusic.com/artist/autechre')
self.assertEqual(new_entry.spotify_artist_url, 'https://open.spotify.com/artist/6WH1V41LwGDGmlPUhSZLHO')
self.assertEqual(new_entry.spotify_album_url, 'https://open.spotify.com/album/7EfhvG3RwdhzXrFlkDVxg4')
self.assertEqual(new_entry.spotify_artist_uri, 'spotify:artist:6WH1V41LwGDGmlPUhSZLHO')
self.assertEqual(new_entry.spotify_album_uri, 'spotify:album:7EfhvG3RwdhzXrFlkDVxg4')
def test_POST_redirect_to_list_view(self):
correct_list = List.objects.create(title='Pierwsza Lista', user=self.user)
response = self.client.post('/lists/%d/%s/' % (correct_list.id, correct_list.slug), data={'artist': 'Autechre', 'album': 'Amber', })
self.assertRedirects(response, '/lists/%d/%s/' % (correct_list.id, correct_list.slug))
def test_logged_in_user_can_open_my_lists_page(self):
list_ = List.objects.create(title='Pierwsza Lista', user=self.user)
Entry.objects.create(artist='Autechre', album='Amber', list=list_)
response = self.client.get('/lists/user/%s/' % self.user.username)
self.assertEqual(response.status_code, 200)
class NewListLoggedOut(TestCase):
def test_POST_redirect_to_login_page(self):
response = self.client.post('/lists/new', data={'title': 'Pierwsza Lista', })
self.assertRedirects(response, '/accounts/login/?next=/lists/new')
class ListButtonsLoggedIn(TestCase):
def setUp(self):
self.user = User.objects.create_user('user1', 'user1@mockmyid.com', 'pass123')
self.client.login(username='user1', password='pass123')
list_ = List.objects.create(title='Pierwsza Lista', user=self.user)
Entry.objects.create(artist='Autechre', album='Amber', list=list_)
Entry.objects.create(artist='King Midas Sound', album='Cool Out', list=list_)
Entry.objects.create(artist='xxccxasadsadsada', album='xxccxasadsadsada', list=list_)
Entry.objects.create(artist='Aphex Twin', album='', list=list_)
def test_list_page_show_lastfm_artist_link_for_correct_artist(self):
entry1 = Entry.objects.get(artist='Autechre')
entry2 = Entry.objects.get(artist='King Midas Sound')
entry1.lastfm_artist_getInfo()
entry2.lastfm_artist_getInfo()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertContains(response, entry1.lastfm_artist_url)
self.assertContains(response, entry2.lastfm_artist_url)
def test_list_page_show_rym_artist_link_correct_artist(self):
entry1 = Entry.objects.get(artist='Autechre')
entry2 = Entry.objects.get(artist='King Midas Sound')
entry1.rym_artist_geturl()
entry2.rym_artist_geturl()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertContains(response, entry1.rym_artist_url)
self.assertContains(response, entry2.rym_artist_url)
def test_list_page_show_spotify_artist_url_link_correct_artist(self):
entry1 = Entry.objects.get(artist='Autechre')
entry2 = Entry.objects.get(artist='King Midas Sound')
entry1.spotify_artist_geturl()
entry2.spotify_artist_geturl()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertContains(response, entry1.spotify_artist_url)
self.assertContains(response, entry2.spotify_artist_url)
def test_list_page_show_spotify_album_url_link_for_correct_artist(self):
entry1 = Entry.objects.get(artist='Autechre')
entry2 = Entry.objects.get(artist='King Midas Sound')
entry1.spotify_album_geturl()
entry2.spotify_album_geturl()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertContains(response, entry1.spotify_album_url)
self.assertContains(response, entry2.spotify_album_url)
def test_list_page_show_spotify_artist_uri_link_correct_artist(self):
entry1 = Entry.objects.get(artist='Autechre')
entry2 = Entry.objects.get(artist='King Midas Sound')
entry1.spotify_artist_geturi()
entry2.spotify_artist_geturi()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertContains(response, entry1.spotify_artist_uri)
self.assertContains(response, entry2.spotify_artist_uri)
def test_list_page_show_spotify_album_uri_link_correct_artist_album(self):
entry1 = Entry.objects.get(artist='Autechre')
entry2 = Entry.objects.get(artist='King Midas Sound')
entry1.spotify_album_geturi()
entry2.spotify_album_geturi()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertContains(response, entry1.spotify_album_uri)
self.assertContains(response, entry2.spotify_album_uri)
def test_list_page_not_show_lastfm_artist_link_button_when_artist_not_exist(self):
entry1 = Entry.objects.get(artist='xxccxasadsadsada')
entry1.lastfm_artist_getInfo()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertNotContains(response, 'id="lastfm_artist_url"')
def test_list_page_not_show_rym_artist_link_button_when_artist_not_exist(self):
entry1 = Entry.objects.get(artist='xxccxasadsadsada')
entry1.rym_artist_geturl()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertNotContains(response, 'id="rym_artist_url"')
def test_list_page_not_show_spotify_artist_link_button_when_artist_not_exist(self):
entry1 = Entry.objects.get(artist='xxccxasadsadsada')
entry1.spotify_artist_geturl()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertNotContains(response, 'id="spotify_artist_url"')
def test_list_page_not_show_spotify_album_link_button_when_album_not_exist(self):
entry1 = Entry.objects.get(artist='xxccxasadsadsada')
entry1.spotify_album_geturl()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertNotContains(response, 'id="spotify_album_url"')
def test_list_page_not_show_spotify_album_link_button_when_album_is_blank(self):
entry1 = Entry.objects.get(artist='Aphex Twin')
entry1.spotify_album_geturl()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertNotContains(response, 'id="spotify_album_url"')
def test_list_page_not_show_spotify_artist_uri_link_when_artist_not_exist(self):
entry1 = Entry.objects.get(artist='xxccxasadsadsada')
entry1.spotify_artist_geturi()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertNotContains(response, 'id="spotify_artist_uri"')
def test_list_page_not_show_spotify_album_link_uri_button_when_album_not_exist(self):
entry1 = Entry.objects.get(artist='xxccxasadsadsada')
entry1.spotify_album_geturi()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertNotContains(response, 'id="spotify_album_uri"')
def test_list_page_not_show_spotify_album_link_uri_button_when_album_is_blank(self):
entry1 = Entry.objects.get(artist='Aphex Twin')
entry1.spotify_album_geturi()
response = self.client.get('/lists/%d/%s/' % (entry1.list.id, entry1.list.slug))
self.assertNotContains(response, 'id="spotify_album_uri"')
class DeleteEntryTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('user1', 'user1@mockmyid.com', 'pass123')
self.client.login(username='user1', password='pass123')
def test_entry_list_displays_delete_button(self):
list_ = List.objects.create(title='Pierwsza Lista', user=self.user)
entry1 = Entry.objects.create(artist='Artist 1', album='Album 1', list=list_)
entry2 = Entry.objects.create(artist='Artist 2', list=list_)
response = self.client.get('/lists/%d/%s/' % (list_.id, list_.slug))
self.assertContains(response, '/lists/%d/%d/delete/' % (list_.id, entry1.id))
self.assertContains(response, '/lists/%d/%d/delete/' % (list_.id, entry2.id))
def test_delete_url_redirect_to_list(self):
list_ = List.objects.create(title='Pierwsza Lista', user=self.user)
entry1 = Entry.objects.create(artist='Artist 1', album='Album 1', list=list_)
response = self.client.get('/lists/%d/%d/delete/' % (list_.id, entry1.id,))
self.assertRedirects(response, '/lists/%d/%s/' % (list_.id, list_.slug))
def test_url_can_delete_entry(self):
list_ = List.objects.create(title='Pierwsza Lista', user=self.user)
entry1 = Entry.objects.create(artist='Artist 1', album='Album 1', list=list_)
entry2 = Entry.objects.create(artist='Artist 2', list=list_)
self.assertEqual(Entry.objects.count(), 2)
new_entry = Entry.objects.first()
self.assertEqual(new_entry.artist, 'Artist 1')
self.assertEqual(new_entry.album, 'Album 1')
self.client.get('/lists/%d/%d/delete/' % (list_.id, entry1.id))
self.assertEqual(Entry.objects.count(), 1)
new_entry = Entry.objects.first()
self.assertEqual(new_entry.artist, 'Artist 2')
self.client.get('/lists/%d/%d/delete/' % (list_.id, entry2.id))
self.assertEqual(Entry.objects.count(), 0)
def test_invalid_delete_url_redirect_to_404(self):
response = self.client.get('/lists/6/6/delete/')
self.assertEqual(response.status_code, 404)
def test_logged_in_user_cant_delete_entry_from_other_user_list(self):
user2 = User.objects.create(username='wronguser', email='wrong@owner.com', password='wrongwrong')
list_ = List.objects.create(title='Pierwsza Lista', user=user2)
entry1 = Entry.objects.create(artist='Artist 1', album='Album 1', list=list_)
self.assertEqual(Entry.objects.count(), 1)
response = self.client.get('/lists/%d/%d/delete/' % (list_.id, entry1.id))
self.assertEqual(response.status_code, 403)
self.assertEqual(Entry.objects.count(), 1)
class DeleteEntryLoggedOutTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('user1', 'user1@mockmyid.com', 'pass123')
def test_logged_out_user_cant_delete_entry_redirect_to_login_page(self):
list_ = List.objects.create(title='Pierwsza Lista', user=self.user)
entry1 = Entry.objects.create(artist='Artist 1', album='Album 1', list=list_)
response = self.client.get('/lists/%d/%d/delete/' % (list_.id, entry1.id))
self.assertRedirects(response, '/accounts/login/?next=/lists/%d/%d/delete/' % (list_.id, entry1.id))
class MyListsTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('user1', 'user1@mockmyid.com', 'pass123')
self.client.login(username='user1', password='pass123')
def test_my_lists_page_use_template(self):
response = self.client.get('/lists/user/%s/' % self.user.username)
self.assertTemplateUsed(response, 'my_lists.html')
def test_passes_correct_owner_to_template(self):
User.objects.create(username='wronguser', email='wrong@owner.com', password='wrongwrong')
correct_user = self.user
response = self.client.get('/lists/user/user1/')
self.assertEqual(response.context['user'], correct_user)
def test_logged_in_user_cant_open_my_list_page_other_user(self):
User.objects.create(username='wronguser', email='wrong@owner.com', password='wrongwrong')
response = self.client.get('/lists/user/wronguser/')
self.assertEqual(response.status_code, 403)
class MyListsLoggedOutTest(TestCase):
def test_logged_out_user_cant_open_my_lists_page(self):
user = User.objects.create_user('user1', 'user1@mockmyid.com', 'pass123')
list_ = List.objects.create(title='Pierwsza Lista', user=user)
Entry.objects.create(artist='Autechre', album='Amber', list=list_)
response = self.client.get('/lists/user/%s/' % user.username)
self.assertRedirects(response, '/accounts/login/?next=/lists/user/%s/' % user.username) |
import random
while True:
user_action = input("Enter a choice (rock, paper, scissors): ")
possible_actions = ["rock", "paper", "scissors"]
computer_action = random.choice(possible_actions)
print(f"\nYou chose {user_action}, computer chose {computer_action}.\n")
if user_action == computer_action:
print(f"Both players selected {user_action}. It's a tie!")
elif user_action == "rock":
if computer_action == "scissors":
print("Rock smashes scissors! You win!")
else:
print("Paper covers rock! You lose.")
elif user_action == "paper":
if computer_action == "rock":
print("Paper covers rock! You win!")
else:
print("Scissors cuts paper! You lose.")
elif user_action == "scissors":
if computer_action == "paper":
print("Scissors cuts paper! You win!")
else:
print("Rock smashes scissors! You lose.")
play_again = input("Play again? (y/n): ")
if play_again.lower() != "y":
break
|
## https://github.com/DigitalCraftsStudents/Instructor-Notes-Clint/blob/master/Programming-102/8-function-return-value.md
# def add_numbers(a,b):
# result = a + b
# return result
# final = add_numbers(1,3) / add_numbers(4,6)
# print(final)
## implicit returns - in python the implicit return is always "None", if no 'return' statement, see below
# def multi_numbers(a,b):
# a*b
# print(multi_numbers(5,6))
## explicit return - return is explicitly stated below, using 'return'
# def multi_numbers(a,b):
# return a*b
# print(multi_numbers(5,6))
###==============
# def make_dictionary(first, last, phone, zip):
# return{
# "first_name":first,
# "last_name":last,
# "phone_number":phone,
# "zip_code":zip
# }
# clint_data = make_dictionary("clint", "fleet", "803-222-0090", "30903")
# for key in clint_data:
# print(key)
###==============
#exercise 1 - Write a program that has a function with two parameters.
#return the concatinated value of the two parameters.
#print the results
# def first_last(a,b):
# full_name = f"{a} {b}"
# return(full_name)
# print(first_last("chad","reynolds"))
##exercise 2
# - Write a program that has a function named total_count that
# expects a list of strings as it argument when the function is called.
#Have the returned value be a dictionary with the keys 'list_length' and 'total_chars'.
#The list_length value needs to be the length of the list and the total_chars needs to be the total count of characters of all of the items in the array.
#Call the function 3 times with 3 different lists.
#(hint) len is usable on lists and strings.
#below is wrong
def total_count():
list_length = len(total_count([])
total_characters = len(total_count())
totals = f"list length: {list_length}, total characters: {total_characters}"
return(totals)
print(total_count["i", "am", "great"] |
from tkinter import *
import random as rnd
length = 0
password = []
usr_password = ''
ne = ''
def get_enter():
global length
length = int(pass_len.get())
def func():
win.destroy()
def generator():
global length, password, usr_password, a, label2
random_pass = ["'", 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '1', '2', '3', '4', '5', '6', '7',
'8', '9', '0', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '+', '=', '{', '}', '[', ']',
'|', ':', ';', "'", '>', '<', '/', '?', ',', '.', '~', '`']
print(len(random_pass))
n = 0
while n != length:
password.append(random_pass[rnd.randint(0, 91)])
n += 1
for i in password:
usr_password += i
a.set(usr_password)
label2 = Label(win, textvariable=a)
usr_password = ''
password = []
def one():
get_enter()
generator()
win = Tk()
# win.wm_attributes("-fullscreen", True)
win.title("RANDOM PASSWORD GENERATOR")
frame = Frame(win, height=800, width=1000, bg='light blue')
frame.pack()
label1 = Label(frame, text="How long do you want your password to be?")
label1.place(x=433, y=200)
a = StringVar()
label2 = Label(win, textvariable=a)
label2.place(x=445, y=295)
label3 = Label(win, text='Your password is:- ')
label3.place(anchor='ne', x=440, y=295)
pass_len = Entry(frame)
pass_len.place(width=170, x=470, y=250)
pass_button = Button(frame, text='Generate', command=one)
pass_button.place(x=490, y=340)
exit_button = Button(frame, text='Exit', command=func)
exit_button.place(height=25, width=40, x=570, y=340)
win.mainloop()
# 64087 |
from panda3d.core import *
import numpy as np
from Geometry import normalizer
def TupleSum(args): # used in
'''
concatenates tuples inside lists
'''
assert type(args) == list # just in case u were still wondering
S=()
for x in args:
S+=x
return S
class RectangleSurface:
def __init__(self,l,w,Vl,Vw):
self.GeomNode = self.create(Vl,Vw,l,w)
self.NormalTool = normalizer()
self.SizeData = (l,w,Vl,Vw)
return None
def create(self,Vlenght,Vwidth,length,width):
'''
creates a triangulated rectangle
'''
VertexCount = Vlenght*Vwidth
array = GeomVertexArrayFormat()
array.add_column('vertex',3, Geom.NTFloat32, Geom.CPoint) # we'll work only with vertex coordinates rn, I don't want to mess with lighting and shit
array.add_column('normal',3, Geom.NTFloat32, Geom.CNormal)
# I'm leaving gaps because I still suck and I'm scared of getting lost
format = GeomVertexFormat.getV3n3() # calling the format this way makes it already predefined
LocalVdata = GeomVertexData('DynamicPlate', format, Geom.UH_static)
LocalVdata.setNumRows(VertexCount)
# some useless spacing again
vertex = GeomVertexWriter(LocalVdata,'vertex')
normal = GeomVertexWriter(LocalVdata,'normal')
#LSpacing , WSpacing = length/Vlenght , width/Vwidth # not necessary since we're using numpy to calculate coordinates
LCoord , WCoord = np.linspace(-length/2,length/2,Vlenght) , np.linspace(-width/2,width/2,Vwidth)
localZ = 0 # defines Z height of the plane (DynamicPlate)
for x in LCoord:
for y in WCoord:
vertex.addData3f(x,y,localZ)
normal.addData3d(0,0,-1) # initial vector
# vertex data has been created, we still need the geomprimitives
#GPrimList = []
tempGeom = Geom(LocalVdata)
for i in range(Vwidth-1):
TempData = TupleSum([(x+i-1,x+i) for x in range(1,VertexCount,Vwidth)]) # this tuple contains the list of indexes for the vertices of each geomtristrip (one band at a time)
'''
primitive = GeomTristrips(Geom.UHStatic)
for j in TempData:
#assert j < LocalVdata.get_num_rows() # debug
primitive.add_vertex(j)
primitive.close_primitive()
#GPrimList.append(primitive)
tempGeom.add_primitive(primitive)
'''
# kinda long code for such a simple thing
TempData = list(TempData)
bufferData = list(tuple(TempData[:1]) + TupleSum([(TempData[x],TempData[x-1]) for x in range(2, len(TempData),2)]))
if len(bufferData) != len(TempData):
bufferData.append(TempData[len(TempData)-1])
TempData = bufferData
TempData = tuple(TempData)
primitive = GeomTristrips(Geom.UHStatic)
for j in TempData:
primitive.add_vertex(j)
primitive.close_primitive()
tempGeom.add_primitive(primitive)
'''
WARNING: FURTHER AUTOMATED NORMAL CALCULATION SHOULD BE INSERTED HERE
(the vertices and primitives have been defined, but the node hasn't
been created yet)
...
nvm solved it
'''
PlateNode = GeomNode('gnode')
PlateNode.addGeom(tempGeom)
#PlateNodePath = render.attachNewNode(PlateNode)
return PlateNode
def GetPosData(self):
'''
Provides positional data for each vertex. Output format: LVecBase3f List
'''
# https://docs.panda3d.org/1.10/python/programming/internal-structures/other-manipulation/reading-existing-geometry#reading-existing-geometry-data
PosOutput = [] # vertices
for i in range(self.GeomNode.getNumGeoms()): # we know it only contains one in this particular algorithm
geom = self.GeomNode.getGeom(i)
#state = self.GeomNode.getGeomState(i) # unused variable (that's why I commented it)
vdata = geom.getVertexData() # at this point we have all the positions stored here
# creating readers
vertex = GeomVertexReader(vdata, "vertex")
# I need to transfer the positional data to the ouput list (one sublist per geom)
BufferPosList = []
# vertex scanning
while not vertex.isAtEnd():
BufferPosList.append(vertex.getData3()) # stored data for this particular geom
PosOutput.append(BufferPosList)
return PosOutput[0] # format: 1D array, one sublist per encountered geom, each sublist contains LVecBase3f positional values (we only need the first and only geom)
def GetNormalData(self):
'''
Provides normal data for each vertex. Output format: LVecBase3f List
'''
NormalOutput = []
for i in range(self.GeomNode.getNumGeoms()):
geom = self.GeomNode.getGeom(i)
vdata = geom.getVertexData()
normal = GeomVertexReader(vdata, "normal")
vertex = GeomVertexReader(vdata, "vertex")
BufferNormalList = []
while not vertex.isAtEnd():
vertex.getData3() # while condition toggling
BufferNormalList.append(normal.getData3()) # a - sign is necessary
NormalOutput.append(BufferNormalList)
return NormalOutput[0] # there's only one geomNode, I created a list in case I need to add more stuff
def deform(self,data): # data is the position map
geom = self.GeomNode.modifyGeom(0)
vdata = geom.modifyVertexData()
# prim = geom.modifyPrimitive(0) # not necessary here, could be usefull in any other situation tho
vertexWriter = GeomVertexRewriter(vdata, 'vertex')
for i in range(len(data)):
assert (type(data[i][0]) == float and type(data[i][1]) == float and type(data[i][2]) == float)
vertexWriter.setRow(i)
vertexWriter.setData3f(data[i])
output = self.NormalTool.compute_data(data, self.SizeData)
self.NormalTool.blit_normals(output, geom) # apply changes
return None # this function modifies the geomNode as a global var
array = GeomVertexArrayFormat()
array.addColumn("vertex",3,Geom.NTFloat32,Geom.CPoint) |
# This program solves the farmer, grain, goose, fox problem
'''
Character Code Reference
F = Farmer
G = Grain
E = Goose
X = Fox
'''
# Define position of characters in state list
char_pos = {
'F': 0,
'G': 1,
'E': 2,
'X': 3
}
# returns the result of an action on a given state
def get_result(state, action):
new_state = state.copy()
for char in action:
new_state[char_pos.get(char)] = False if new_state[char_pos.get(char)] else True
return new_state
# return if given state is valid
def is_state_valid(state):
if state[1] == state[2]:
if state[1] != state[0] and state[1] != state[3]:
return False
elif state[2] == state[3]:
if state[2] != state[0] and state[2] != state[1]:
return False
if state[0] != state[1] and state[0] != state[2] and state[0] != state[3]:
return False
return True
# returns list of valid actions for given state
def get_valid_actions(state):
all_actions = ['F', 'FG', 'FE', 'FX']
valid_actions = []
for action in all_actions:
result = get_result(state, action)
if is_state_valid(result):
valid_actions.append(action)
return valid_actions
# returns if given state matches the end goal
def is_goal_state(state):
goal_state = [False, False, False, False]
for i in range(len(state)):
if state[i] != goal_state[i]:
return False
return True
# find a valid sequence of actions to solve the problem
def find_sequence(states, sequence):
state = states[-1]
actions = get_valid_actions(state)
for action in actions:
new_state = get_result(state, action)
new_sequence = sequence.copy()
new_sequence.append(action)
if new_state in states:
continue
new_states = states.copy()
new_states.append(new_state)
if is_goal_state(new_state):
return new_sequence
else:
seq = find_sequence(new_states, new_sequence)
if seq:
return seq
return None
# main function
def main():
states = [[True, True, True, True]] # list of states initialized with start state
print(find_sequence(states, []))
if __name__ == '__main__':
main() |
from common.run_method import RunMethod
import allure
@allure.step("极运营/班主任/知识库/知识分类/新增")
def documentDirectory_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/班主任/知识库/知识分类/新增"
url = f"/service-crm/documentDirectory"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/班主任/知识库/知识分类/修改")
def documentDirectory_put(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/班主任/知识库/知识分类/修改"
url = f"/service-crm/documentDirectory"
res = RunMethod.run_request("PUT", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/班主任/知识库/知识分类/id查询")
def documentDirectory_id_get(id, params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/班主任/知识库/知识分类/id查询"
url = f"/service-crm/documentDirectory/{id}"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/班主任/知识库/知识分类/列表查询")
def documentDirectory_list_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/班主任/知识库/知识分类/列表查询"
url = f"/service-crm/documentDirectory/list"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/班主任/知识库/知识分类/查询所有")
def documentDirectory_all_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/班主任/知识库/知识分类/查询所有"
url = f"/service-crm/documentDirectory/all"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/班主任/知识库/知识分类/删除")
def documentDirectory_ids_delete(ids, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/班主任/知识库/知识分类/删除"
url = f"/service-crm/documentDirectory/{ids}"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("知识/知识列表/列表树查询")
def documentDirectory_queryChildDirectory_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "知识/知识列表/列表树查询"
url = f"/service-crm/documentDirectory/queryChildDirectory"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("知识/知识列表/目录树查询")
def documentDirectory_queryDirectoryTree_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "知识/知识列表/目录树查询"
url = f"/service-crm/documentDirectory/queryDirectoryTree"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
|
"""
spine @ rig
a simple spline ik setup
"""
import maya.cmds as mc
from .. base import module
from .. base import control
def build(
spineJoints,
rootJoint,
spineCurve,
bodyLocator,
chestLocator,
pelvisLocator,
prefix = 'spine',
rigScale =1.0,
baseRig = None
):
"""
@param spineJoints: list( str ), list of spine joints
@param rootJnt: str, root joint
@param spineCurve: str, name of spine cubic curve with CVs matching first spine joints
@param bodyLocator: str, reference transform for position of body control
@param chestLocator: str, reference transform for position of chest control
@param pelvisLocator: str, reference transform for position of pelvis control
@param prefix: str, prefix to name new objects
@param rigScale: float, scale factor for size of controls
@param baseRig: instance of base.module.Base class
@return: dictionary with rig module objects
"""
#make rig Module
rigModule = module.Module(prefix = prefix, baseObj = baseRig)
#make spineCurve clusters
spineCurveCVs = mc.ls(spineCurve + '.cv[*]', flatten =1)
numSpineCVs = len(spineCurveCVs)
spineCurveClusters = []
for i in range(numSpineCVs):
cls = mc.cluster(spineCurveCVs[i], n = prefix + 'cluster%d'%(i+1))[1]
spineCurveClusters.append(cls)
mc.hide(spineCurveClusters)
mc.parent( spineCurve, rigModule.partsNoTransGrp )
bodyCtl = control.Control(prefix = prefix + 'Body',
translateTo = bodyLocator,
scale = rigScale*4,
parent = rigModule.controlsGrp,
orient = [1,0,0]
)
chestCtl = control.Control(prefix = prefix + 'Chest',
translateTo = chestLocator,
scale = rigScale*6,
parent = bodyCtl.C,
orient = [0,0,1]
)
pelvisCtl = control.Control(prefix = prefix + 'Pelvis',
translateTo = pelvisLocator,
scale = rigScale*6,
parent = bodyCtl.C,
orient = [0,0,1]
)
midSectionCtl = control.Control(prefix = prefix + 'MidSection',
translateTo = spineCurveClusters[2],
scale = rigScale*6,
parent = bodyCtl.C,
orient = [0,0,1]
)
_offSetBodyCtlShape( bodyCtl, spineJoints, rigScale )
#parent controls - sr = skipRotation
mc.parentConstraint(chestCtl.C, pelvisCtl.C, midSectionCtl.Off, sr = ['x','y','z'], mo =1)
#parent clusters
mc.parent(spineCurveClusters[:2], pelvisCtl.C)
mc.parent(spineCurveClusters[2:-2], midSectionCtl.C)
mc.parent(spineCurveClusters[-2:], chestCtl.C)
#attachChestJoint
mc.orientConstraint(chestCtl.C, spineJoints[-2], mo=1)
#ik spline setup
spineIK = mc.ikHandle(n= prefix + '_ikh',solver = 'ikSplineSolver', sj = spineJoints[0],
ee = spineJoints[-2], c= spineCurve, ccv=0, parentCurve=0)[0]
mc.hide(spineIK)
mc.parent(spineIK, rigModule.partsNoTransGrp)
#setup ik twist (using advanced twist maya stuff)
mc.setAttr(spineIK + '.dTwistControlEnable', 1)
mc.setAttr(spineIK + '.dWorldUpType', 4)
mc.connectAttr(chestCtl.C + '.worldMatrix', spineIK + '.dWorldUpMatrixEnd')
mc.connectAttr(pelvisCtl.C + '.worldMatrix', spineIK + '.dWorldUpMatrix')
#parent root joint
mc.parentConstraint(pelvisCtl.C, rootJoint, mo =1)
return {'module':rigModule, 'bodyCtl':bodyCtl}
def _offSetBodyCtlShape(bodyCtl, spineJoints, rigScale):
offsetGrp = mc.group( em = 1, p = bodyCtl.C )
mc.parent( offsetGrp, spineJoints[2] )
ctrlCls = mc.cluster( mc.listRelatives( bodyCtl.C, s = 1 ) )[1]
mc.parent( ctrlCls, offsetGrp )
mc.move( 10 * rigScale, offsetGrp, moveY = 1, relative = 1, objectSpace = 1 )
mc.delete( bodyCtl.C, ch = 1 )
|
from cudatext import *
"""
in the Editor of created dialog: links are unclickable
"""
class Command:
def run(self):
h, editor = self.init_form()
# both dont work
#dlg_proc(h, DLG_SHOW_NONMODAL)
dlg_proc(h, DLG_SHOW_MODAL)
def init_form(self):
h = dlg_proc(0, DLG_CREATE)
dlg_proc(h, DLG_PROP_SET, prop={
'w': 200,
'h': 100,
})
n = dlg_proc(h, DLG_CTL_ADD, 'editor')
dlg_proc(h, DLG_CTL_PROP_SET, index=n, prop={
'align': ALIGN_CLIENT,
})
h_ed = dlg_proc(h, DLG_CTL_HANDLE, index=n)
ed = Editor(h_ed)
ed.set_text_all('\nhttp://t.co\n')
return h, ed
|
#测试网络,输出loss图像
from keras.models import Sequential
from keras.models import load_model
import numpy as np
import matplotlib.pyplot as plt
model_name = "ResNet50"
X = np.load('drive/app/X_data.npy')
Y = np.load('drive/app/Y_data.npy')
X = X / 25
x_test = X[5000:]
y_test = Y[5000:]
model=Sequential()
model=load_model('drive/app/' + model_name + '_model.h5')
y_pre=model.predict(x_test, batch_size=None, verbose=0, steps=None)
plt.title(model_name+" test")
plt.xlabel("y_test")
plt.ylabel("y_pre")
plt.scatter(y_test,y_pre)
#plt.plot(x_test,x_pre,"ob")
plt.savefig('drive/app/'+model_name+'_test.jpg', dpi=200)
plt.show() |
from django.shortcuts import render, get_object_or_404
from django.views import generic
from django.template import RequestContext
from .models import Petition
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
# Create your views here.
class PetitionView(generic.ListView):
template_name = 'petition/petition.html'
context_object_name = 'latest_petition_list'
def get_queryset(self):
return Petition.objects.all()[:1]
def vote(request, petition_id):
p = get_object_or_404(Petition, pk=petition_id)
try:
selected_vote = p.vote_set.get(pk=request.POST['vote'])
except (KeyError, Vote.DoesNotExist):
return render(request, 'petition/petition.html'), {
'petition' : p,
'error_message' : "you didn't support the petition"
}
else:
selected_vote.votes += 1
selected_vote.save()
return HttpResponseRedirect(reverse('petition:petition'))
|
def compute_sum(n, total):
"""
Compute the total sum range 0 to n
"""
# print(n)
# base case, if you reach n is 0 then you want to return it
print(total[0])
if n == 0:
return 0
total[0] = total[0] + compute_sum(n-1, total)
# else the previous value + (n - 1)
# return n + compute_sum(n-1)
n = 5 # expected 15
total = [0]
print(compute_sum(3, total))
|
import os
import torch
import torch.nn as nn
from torchvision import datasets, models, transforms
def convrelu(in_channels, out_channels, kernel, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True),
)
def normal_init(m, mean, std):
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
m.weight.data.normal_(mean, std)
m.bias.data.zero_()
class ResNetUNet(nn.Module):
def __init__(self, input_channel, output_channel_num):
super(ResNetUNet,self).__init__()
base_model = models.resnet18(pretrained=True)
base_model.conv1 = torch.nn.Conv2d(input_channel,64,kernel_size=(7, 7),stride=(2, 2),padding=(3, 3),bias=False)
self.base_layers = list(base_model.children())
self.layer0 = nn.Sequential(
*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2)
self.layer0_1x1 = convrelu(64, 64, 1, 0)
self.layer1 = nn.Sequential(
*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4)
self.layer1_1x1 = convrelu(64, 64, 1, 0)
self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8)
self.layer2_1x1 = convrelu(128, 128, 1, 0)
self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16)
self.layer3_1x1 = convrelu(256, 256, 1, 0)
self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32)
self.layer4_1x1 = convrelu(512, 512, 1, 0)
self.upsample = nn.Upsample(scale_factor=2,
mode='bilinear',
align_corners=True)
self.conv_up3 = convrelu(256 + 512, 512, 3, 1)
self.conv_up2 = convrelu(128 + 512, 256, 3, 1)
self.conv_up1 = convrelu(64 + 256, 256, 3, 1)
self.conv_up0 = convrelu(64 + 256, 128, 3, 1)
self.conv_original_size0 = convrelu(input_channel, 64, 3, 1)
self.conv_original_size1 = convrelu(64, 64, 3, 1)
self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1)
self.conv_last = nn.Conv2d(64, output_channel_num, 1)
def forward(self, input):
x_original = self.conv_original_size0(input)
x_original = self.conv_original_size1(x_original)
layer0 = self.layer0(input)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer4 = self.layer4_1x1(layer4)
x = self.upsample(layer4)
layer3 = self.layer3_1x1(layer3)
x = torch.cat([x, layer3], dim=1)
x = self.conv_up3(x)
x = self.upsample(x)
layer2 = self.layer2_1x1(layer2)
x = torch.cat([x, layer2], dim=1)
x = self.conv_up2(x)
x = self.upsample(x)
layer1 = self.layer1_1x1(layer1)
x = torch.cat([x, layer1], dim=1)
x = self.conv_up1(x)
x = self.upsample(x)
layer0 = self.layer0_1x1(layer0)
x = torch.cat([x, layer0], dim=1)
x = self.conv_up0(x)
x = self.upsample(x)
x = torch.cat([x, x_original], dim=1)
x = self.conv_original_size2(x)
out = self.conv_last(x)
return out
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
#====================================
# Generators
#====================================
class Pix2PixUNetGenerator( nn.Module ):
"""
UNet 構造での生成器
"""
def __init__(
self,
n_in_channels = 3,
n_out_channels = 3,
n_fmaps = 64,
dropout = 0.5 # 生成器 G に入力する入力ノイズ z は、直接 dropout を施すという意味でのノイズとして実現する。
):
super( Pix2PixUNetGenerator, self ).__init__()
def conv_block( in_dim, out_dim ):
model = nn.Sequential(
nn.Conv2d( in_dim, out_dim, kernel_size=3, stride=1, padding=1 ),
nn.BatchNorm2d( out_dim ),
nn.LeakyReLU( 0.2, inplace=True ),
nn.Conv2d( out_dim, out_dim, kernel_size=3, stride=1, padding=1 ),
nn.BatchNorm2d( out_dim ),
nn.Dropout( dropout )
)
return model
def dconv_block( in_dim, out_dim ):
model = nn.Sequential(
nn.ConvTranspose2d( in_dim, out_dim, kernel_size=3, stride=2, padding=1,output_padding=1 ),
nn.BatchNorm2d(out_dim),
nn.LeakyReLU( 0.2, inplace=True ),
nn.Dropout( dropout )
)
return model
# Encoder(ダウンサンプリング)
self.conv1 = conv_block( n_in_channels, n_fmaps )
self.pool1 = nn.MaxPool2d( kernel_size=2, stride=2, padding=0 )
self.conv2 = conv_block( n_fmaps*1, n_fmaps*2 )
self.pool2 = nn.MaxPool2d( kernel_size=2, stride=2, padding=0 )
self.conv3 = conv_block( n_fmaps*2, n_fmaps*4 )
self.pool3 = nn.MaxPool2d( kernel_size=2, stride=2, padding=0 )
self.conv4 = conv_block( n_fmaps*4, n_fmaps*8 )
self.pool4 = nn.MaxPool2d( kernel_size=2, stride=2, padding=0 )
#
self.bridge=conv_block( n_fmaps*8, n_fmaps*16 )
# Decoder(アップサンプリング)
self.dconv1 = dconv_block( n_fmaps*16, n_fmaps*8 )
self.up1 = conv_block( n_fmaps*16, n_fmaps*8 )
self.dconv2 = dconv_block( n_fmaps*8, n_fmaps*4 )
self.up2 = conv_block( n_fmaps*8, n_fmaps*4 )
self.dconv3 = dconv_block( n_fmaps*4, n_fmaps*2 )
self.up3 = conv_block( n_fmaps*4, n_fmaps*2 )
self.dconv4 = dconv_block( n_fmaps*2, n_fmaps*1 )
self.up4 = conv_block( n_fmaps*2, n_fmaps*1 )
# 出力層
self.out_layer = nn.Sequential(
nn.Conv2d( n_fmaps, n_out_channels, 3, 1, 1 ),
nn.Tanh(),
)
return
def forward( self, input ):
# Encoder(ダウンサンプリング)
conv1 = self.conv1( input )
pool1 = self.pool1( conv1 )
conv2 = self.conv2( pool1 )
pool2 = self.pool2( conv2 )
conv3 = self.conv3( pool2 )
pool3 = self.pool3( conv3 )
conv4 = self.conv4( pool3 )
pool4 = self.pool4( conv4 )
#
bridge = self.bridge( pool4 )
# Decoder(アップサンプリング)& skip connection
dconv1 = self.dconv1(bridge)
concat1 = torch.cat( [dconv1,conv4], dim=1 )
up1 = self.up1(concat1)
dconv2 = self.dconv2(up1)
concat2 = torch.cat( [dconv2,conv3], dim=1 )
up2 = self.up2(concat2)
dconv3 = self.dconv3(up2)
concat3 = torch.cat( [dconv3,conv2], dim=1 )
up3 = self.up3(concat3)
dconv4 = self.dconv4(up3)
concat4 = torch.cat( [dconv4,conv1], dim=1 )
up4 = self.up4(concat4)
# 出力層
output = self.out_layer( up4 )
return output
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
#====================================
# Discriminators
#====================================
class Pix2PixPatchGANDiscriminator( nn.Module ):
"""
PatchGAN の識別器
"""
def __init__(
self,
n_in_channels = 3,
n_fmaps = 32
):
super( Pix2PixPatchGANDiscriminator, self ).__init__()
# 識別器のネットワークでは、Patch GAN を採用するが、
# patchを切り出したり、ストライドするような処理は、直接的には行わない
# その代りに、これを畳み込みで表現する。
# つまり、CNNを畳み込んで得られる特徴マップのある1pixelは、入力画像のある領域(Receptive field)の影響を受けた値になるが、
# 裏を返せば、ある1pixelに影響を与えられるのは、入力画像のある領域だけ。
# そのため、「最終出力をあるサイズをもった特徴マップにして、各pixelにて真偽判定をする」ことと 、「入力画像をpatchにして、各patchの出力で真偽判定をする」ということが等価になるためである。
def discriminator_block1( in_dim, out_dim ):
model = nn.Sequential(
nn.Conv2d( in_dim, out_dim, 4, stride=2, padding=1 ),
nn.LeakyReLU( 0.2, inplace=True )
)
return model
def discriminator_block2( in_dim, out_dim ):
model = nn.Sequential(
nn.Conv2d( in_dim, out_dim, 4, stride=2, padding=1 ),
nn.InstanceNorm2d( out_dim ),
nn.LeakyReLU( 0.2, inplace=True )
)
return model
self.layer1 = discriminator_block1( n_in_channels * 2, n_fmaps )
self.layer2 = discriminator_block2( n_fmaps, n_fmaps*2 )
self.layer3 = discriminator_block2( n_fmaps*2, n_fmaps*4 )
self.layer4 = discriminator_block2( n_fmaps*4, n_fmaps*8 )
self.output_layer = nn.Sequential(
nn.ZeroPad2d( (1, 0, 1, 0) ),
nn.Conv2d( n_fmaps*8, 1, 4, padding=1, bias=False ),
nn.Sigmoid()
)
def forward(self, x, y ):
output = torch.cat( [x, y], dim=1 )
output = self.layer1( output )
output = self.layer2( output )
output = self.layer3( output )
output = self.layer4( output )
output = self.output_layer( output )
output = output.view(-1)
return output
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
|
"""
If you run this program from the cmd you can search for a keyword in the last crawler result.
You can run this from the cmd with:
$ searcher.py search keyword
The search result is printed in a cmd compatible version and saved to "./search_results/result.json".
TODO:
- make use of some cool regex!
"""
import json
import os
import fire
from crawler import Crawler
class Searcher:
"""
this class provides an interface to the crawlers last result.
when initialized the self.data is synced with the latest crawler result in "./res/"
and made searchable.
important functions:
- manual_search(keyword, safe=True)
searches all articles titles for the given in keyword and returns them as list.
- search(keyword)
updates the current dataset and does a full search based on all users in self.users and the
filters set there.
- update()
updates the internal dataset with the newest from "./crawler_results/".
"""
def __init__(self):
# setting paths
self.__SEP = os.path.sep
self.__BASE_PATH = os.path.abspath('.')
self.__RESULTS_PATH = self.__BASE_PATH + self.__SEP + 'search_results'
self.__USERS_PATH = self.__BASE_PATH + self.__SEP + 'users.json'
# init __CRAWLER instance for getting the data
self.__CRAWLER = Crawler()
# get the data
self.data = self.__CRAWLER.get_last_article_result()
# get users
with open(self.__USERS_PATH, 'r') as fp:
self.__USERS = json.loads(fp.read())
fp.close()
def manual_search(self, keyword, save=True):
"""
this function is for a search from the command line. Gets saved to
"./search_results/manual_search_result.json" per default.
$ searcher.py search keyword
:param keyword: keyword to search for
:param save: boolean; if set to true, result is saved to file.
:return: result
"""
self.update()
# do the searching stuff
keyword = str(keyword).lower()
full_result = list()
for source in self.data:
for article in source['articles']:
if article['description'] is not None:
if keyword in article['description'].lower() or keyword in article['title'].lower():
full_result.append(article)
else:
if keyword in article['title'].lower():
full_result.append(article)
# save result to file
if save:
filename = "manual_search_result.json"
filename_path = self.__RESULTS_PATH + self.__SEP + filename
with open(filename_path, "w") as fp:
fp.write(json.dumps(full_result))
fp.close()
return full_result
else:
return full_result
def search(self):
"""
updates the current dataset and does a full search based on all users in self.users and the
filters set there. the result is saved to "./search_results/result.json".
:return: result-dictionary - [{"username" : "name",
"results": ["filter":"filtername", "results":[list of articles]]]
"""
self.update()
result = []
# iterate through each user and its filters
for user in self.__USERS:
tmp_result = dict(
username=user['name'],
results=list()
)
for filter in user['filters']:
keyword = filter['keyword'].lower()
filter_result = dict(
filter=keyword,
results=list()
)
# do the searching things
for source in self.data:
for article in source['articles']:
if article['description'] is not None:
if keyword in article['description'].lower() or keyword in article['title'].lower():
# if result, write into the users tmp_result
filter_result['results'].append(article)
else:
if keyword in article['title'].lower():
filter_result['results'].append(article)
if len(filter_result['results']) > 0:
tmp_result['results'].append(filter_result)
result.append(tmp_result)
# write the whole thing to disk
filename = self.__RESULTS_PATH + self.__SEP + "result.json"
with open(filename, 'w') as fp:
fp.write(json.dumps(result))
fp.close()
return result
def update(self):
""" updates self.data with the newest file in "./crawler_results/"
"""
self.data = self.__CRAWLER.get_last_article_result()
#####
# the following functions are only for providing cli via fire library.
#####
def fire_search(keyword):
searcher = Searcher()
result = searcher.manual_search(keyword, save=False)
fire_print(result)
def fire_print(search_result):
for element in search_result:
print()
print(50 * '-')
for key in element.keys():
print("{0}: {1}".format(key, element[key]))
print(50 * '-')
if __name__ == '__main__':
payload = dict(
search=fire_search
)
fire.Fire(payload) |
from django.db import models
import parser
from django.contrib.auth.models import User
class Comic(models.Model):
"""A webcomic (e.g.: "xkcd", "Questionable Content").
Each webcomic has a strategy for retrieving its data:
- Next Button Harvesting: The system will search for a "Next" link on the comic's last
page, checking if there is a new comic pointed there
- Archive Listing: The system will monitor an "archives" page, trying to find the next
comic there
- URL Pattern: The system will look for numbered URLs, e.g., it would see
"http://mycomic.com/view.php?id=1234" and try to find the next one as 1235.
"""
STRATEGY_CHOICES = (
("N", "Next Button Harvesting"),
("L", "Archive Listing"),
("U", "URL pattern")
)
name = models.CharField(max_length=255)
home_url = models.CharField(max_length=2000, null=False, db_index=True);
strategy = models.CharField(max_length=1, choices=STRATEGY_CHOICES)
next_button_xpath = models.CharField(max_length=500, null=True)
next_button_expected_html = models.CharField(max_length=2000, null=True)
episode_title_xpath = models.CharField(max_length=500, null=True)
archive_url = models.CharField(max_length=2000, null=True)
url_pattern = models.CharField(max_length=2000, null=True)
def __unicode__(self):
return self.name
def checkNewEpisode(self):
"""Checks if a comic has a new episode, and, if so, update the comic"""
if self.strategy == "N":
last_episode = self.episode_set.order_by("-order")[0]
next_comic_url = parser.getNext(
last_episode.url,
self.next_button_xpath,
self.next_button_expected_html,
)
if next_comic_url:
e = Episode()
e.comic = self
e.order = last_episode.order + 1
e.url = next_comic_url
if self.episode_title_xpath:
e.title = parser.getTextForXpath(next_comic_url, self.episode_title_xpath)
else:
# TODO
pass
e.save()
return True
return False
class Episode(models.Model):
"""Each webcomic is divided in episodes, which are required to have unique URLs"""
comic = models.ForeignKey(Comic)
order = models.IntegerField()
title = models.CharField(max_length=500)
url = models.CharField(max_length=2000, db_index=True)
def __unicode__(self):
return self.comic.name + " - " + self.title
def next(self):
"""Returns the "next" episode, if available"""
episodes = Episode.objects.filter(comic=self.comic).filter(order=self.order + 1)
return episodes[0] if episodes else None
class UserProfile(models.Model):
"""Users are stored on Django's facility. This class extends it to allow linking them to comics, episodes and such"""
user = models.ForeignKey(User, unique=True)
read_episodes = models.ManyToManyField(Episode, related_name="read_by_users")
last_read_episodes = models.ManyToManyField(Episode, related_name="last_read_by_users")
favorite_comics = models.ManyToManyField(Comic)
def read(self, episode):
"""Marks an episode as "read" (also making it the last-read-episode) """
self.read_episodes.add(episode)
last_read = self.last_read_episodes.filter(comic=episode.comic)
if last_read:
self.last_read_episodes.remove(last_read[0])
self.last_read_episodes.add(episode)
def unread(self, episode):
"""Removes the "read" status from an episode (without affecting the last-read)"""
self.read_episodes.remove(episode)
def initNextBasedComic(name, home_url, url_episode_1, url_episode_2, url_episode_3, title_episode_2="", episode_title_xpath=""):
"""Creates a new next-harvesting-based webcomic on the database.
It needs the comic name, URL for the first three episodes and title for the third
(to deduce the title xpath)
It can search for the title xpath by receving the second episode's title, or receive
the xpath directly. If no xpath is supplied or searched, episodes will have
auto-generated titles.
Returns the newly created comic"""
# Comic setup (finding the next button and, if needed, title xpath)
c = Comic()
c.name = name
c.home_url = home_url
c.strategy = "N"
links = parser.findLinks(url_episode_2, url_episode_3)
if links:
(c.next_button_xpath, c.next_button_expected_html) = links[0]
else:
raise ValueError("Can't find link from " + url_episode_2 + " to " + url_episode_3)
if not episode_title_xpath:
if title_episode_2:
episode_title_xpath = parser.findXpathFor(url_episode_2, title_episode_2)
if not episode_title_xpath:
raise ValueError("Can't find element containing title '" + title_episode_2 + "' at " + url_episode_2)
# Initial episodes setup
(e1, e2) = (Episode(), Episode())
(e1.order, e2.order) = (1, 2)
(e1.url, e2.url) = (url_episode_1, url_episode_2)
if episode_title_xpath:
c.episode_title_xpath = episode_title_xpath
(e1.title, e2.title) = (parser.getTextForXpath(url_episode_1, episode_title_xpath),
parser.getTextForXpath(url_episode_2, episode_title_xpath))
# Persist the comic, then the episodes
# (the object association is lost if you do it before saving)
c.save()
(e1.comic, e2.comic) = (c, c)
e1.save()
e2.save()
return c
"""Cool trick: auto-create user profiles when they are accessed.
from: http://www.djangorocks.com/hints-and-tips/automatically-create-a-django-profile.html"""
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
|
this is my new command
|
# Generated by Django 3.0.4 on 2020-03-12 19:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20200312_1829'),
]
operations = [
migrations.AlterField(
model_name='wplyw',
name='created_at',
field=models.DateTimeField(auto_now=True, null=True),
),
]
|
def cross_product(x, y):
ans = 0
for i in range(min(len(x), len(y))):
ans += x[i]*y[i]
return float(ans)
def norma(x):
ans = 0
for i in x:
ans += i*i
return ans ** (0.5)
def scalar_product(x, y):
return float(norma(x)*norma(y))
def get_user_vector(user):
ans = []
for i in user:
ans.append(user[i])
return ans
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
# Used to parse the XML for the VirtualMachineDescriptor.
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
from vmutils import VirtualMachineException
import os
import re
################################################################################################################
# Represents an XML description of a VM.
################################################################################################################
class VirtualMachineDescriptor(object):
# The namespace and nodes used for QEMU parameters.
qemuXmlNs = "http://libvirt.org/schemas/domain/qemu/1.0"
qemuCmdLineNodeName = "{%s}commandline" % qemuXmlNs
qemuArgNodeName = "{%s}arg" % qemuXmlNs
################################################################################################################
# Constructor.
################################################################################################################
def __init__(self, xmlDescriptorString):
# Load the XML root element from the XML descriptor string.
self.xmlRoot = ElementTree.fromstring(xmlDescriptorString)
################################################################################################################
#
################################################################################################################
@staticmethod
def does_name_fit(xml_string, new_name):
# Any new data must not be bigger than the previous one, or it won't fit in the raw header.
new_name_will_fit = False
original_name = VirtualMachineDescriptor.get_raw_name(xml_string)
if original_name:
new_name_will_fit = len(new_name) <= len(original_name)
return new_name_will_fit
################################################################################################################
# Gets the name from a raw xml descriptor string.
################################################################################################################
@staticmethod
def get_raw_name(xml_string):
name = None
matches = re.search(r"<name>([\w\-]+)</name>", xml_string)
if matches:
name = matches.group(1)
return name
################################################################################################################
# Updates the name and id of an xml by simply replacing the text, without parsing, to ensure the result will
# have exactly the same length as before.
################################################################################################################
@staticmethod
def update_raw_name_and_id(saved_xml_string, uuid, name):
updated_xml = re.sub(r"<uuid>[\w\-]+</uuid>", "<uuid>%s</uuid>" % uuid, saved_xml_string)
updated_xml = re.sub(r"<name>[\w\-]+</name>", "<name>%s</name>" % name, updated_xml)
return updated_xml
################################################################################################################
# Returns an XML string with the contents of this VMDescriptor
################################################################################################################
def getAsString(self):
xmlString = ElementTree.tostring(self.xmlRoot)
return xmlString
################################################################################################################
# Returns the port the VNC server is listening on, if any.
################################################################################################################
def getVNCPort(self):
vnc_node = self.xmlRoot.find("devices/graphics[@type='vnc']")
if vnc_node is not None:
vnc_port = vnc_node.get("port")
return vnc_port
else:
raise VirtualMachineException("VNC not set up for this VM.")
################################################################################################################
# Sets the realtek network driver instead of the default virtio one. Needed for Windows-based VMs that do
# not have the virtio driver installed (which does come installed in Linux distributions).
################################################################################################################
def setRealtekNetworkDriver(self):
# Get the devices node
devices = self.xmlRoot.find('devices')
# We assume the VM has exactly 1 network interface.
network_card = devices.find("interface")
model = network_card.find("model")
model.set("type", "rtl8139")
################################################################################################################
# Will enable bridged mode in the XML.
################################################################################################################
def enableBridgedMode(self, adapter):
# Get the devices node
devices = self.xmlRoot.find('devices')
# Find the network card, change its type to bridge.
# We assume the VM has exactly 1 network interface.
network_card = devices.find("interface")
network_card.set("type", "bridge")
# Update or add the source element, needed for bridged mode.
network_card_source = network_card.find("source")
if network_card_source is not None:
network_card_source.set("bridge", adapter)
else:
network_card.append(ElementTree.fromstring('<source bridge="%s"/>' % adapter))
################################################################################################################
# Will enable the non-bridged mode in the XML.
################################################################################################################
def enableNonBridgedMode(self, adapter):
# Get the devices node
devices = self.xmlRoot.find('devices')
# Find the network card, change its type to ethernet.
# We assume the VM has exactly 1 network interface.
network_card = devices.find("interface")
network_card.set("type", "user")
network_card.set("name", adapter)
network_card_source = network_card.find("source")
if network_card_source is not None:
network_card.remove(network_card_source)
################################################################################################################
# Sets the mac address to the given value.
# We assume the VM has exactly 1 network interface.
################################################################################################################
def setMACAddress(self, mac_address):
# Get the network card.
network_card = self.xmlRoot.find('devices/interface')
# Update or add the mac element.
mac_element = network_card.find("mac")
if mac_element is not None:
mac_element.set("address", mac_address)
else:
network_card.append(ElementTree.fromstring('<mac address="%s"/>' % mac_address))
################################################################################################################
# Ensures that VNC is enabled and accessible remotely.
################################################################################################################
def enableRemoteVNC(self):
self.enableVNC("0.0.0.0")
################################################################################################################
# Ensures that VNC is enabled and accessible locally only.
################################################################################################################
def enableLocalVNC(self):
self.enableVNC("127.0.0.1")
################################################################################################################
# Ensures VNC is enabled.
################################################################################################################
def enableVNC(self, listening_address):
vnc_graphics = self.xmlRoot.find("devices/graphics[@type='vnc']")
if vnc_graphics is None:
devices_node = self.xmlRoot.find("devices")
devices_node.append(ElementTree.fromstring('<graphics type="vnc" port="-1" autoport="yes" keymap="en-us" listen="' + listening_address + '"/>'))
else:
vnc_graphics.set("listen", listening_address)
vnc_address = self.xmlRoot.find("devices/graphics/listen[@type='address']")
if vnc_address is not None:
vnc_address.set("address", listening_address)
################################################################################################################
# Disables VNC access.
################################################################################################################
def disableVNC(self):
vnc_node = self.xmlRoot.find("devices/graphics[@type='vnc']")
if vnc_node is not None:
print 'Disabling VNC access.'
devices_node = self.xmlRoot.find("devices")
devices_node.remove(vnc_node)
################################################################################################################
# Removes the security label.
################################################################################################################
def removeSecLabel(self):
sec_label = self.xmlRoot.find('seclabel')
if sec_label is not None:
print 'Removing security label.'
self.xmlRoot.remove(sec_label)
################################################################################################################
# Sets the path to the main disk image.
################################################################################################################
def setDiskImage(self, newDiskImagePath, newDiskType):
# Find the first disk in the description.
diskElements = self.xmlRoot.findall('devices/disk')
mainDiskImageNode = None
mainDiskDriverNode = None
for diskElement in diskElements:
diskType = diskElement.attrib['device']
if diskType == 'disk':
mainDiskImageNode = diskElement.find('source')
mainDiskDriverNode = diskElement.find('driver')
break
# Check if we found a disk.
if mainDiskImageNode == None or mainDiskDriverNode == None:
raise VirtualMachineException("No disk found in XML descriptor.")
# Set the path to the new disk image.
mainDiskImageNode.set("file", os.path.abspath(newDiskImagePath))
mainDiskDriverNode.set("type", newDiskType)
################################################################################################################
# Sets the VM name.
################################################################################################################
def setName(self, newName):
nameElement = self.xmlRoot.find('name')
if nameElement is None:
raise VirtualMachineException("No name node found in XML descriptor.")
nameElement.text = newName
################################################################################################################
# Sets the VM id.
################################################################################################################
def setUuid(self, newUUID):
uuidElement = self.xmlRoot.find('uuid')
if uuidElement is None:
raise VirtualMachineException("No UUID node found in XML descriptor.")
uuidElement.text = newUUID
################################################################################################################
# Gets the VM id.
################################################################################################################
def getUuid(self):
uuidElement = self.xmlRoot.find('uuid')
if uuidElement is None:
raise VirtualMachineException("No UUID node found in XML descriptor.")
return str(uuidElement.text)
################################################################################################################
# Sets port redirection commands for qemu.
################################################################################################################
def setPortRedirection(self, portMappings):
# Get the node with qemu-related arguments.
qemuElement = self.xmlRoot.find(self.qemuCmdLineNodeName)
# If the node was not there, add it.
if qemuElement == None:
qemuElement = Element(self.qemuCmdLineNodeName)
self.xmlRoot.append(qemuElement)
# Values for redirect arguments.
portRedirectionCommand = '-redir'
# First we will remove all redirections that contain either the host or guest port.
qemuArgumentElements = qemuElement.findall(self.qemuArgNodeName)
lastRedirElement = None
for qemuArgument in qemuArgumentElements:
# Get the actual value to check.
qemuArgumentValue = qemuArgument.get('value')
# Store "redir" commands since, if we have to remove a redirection, we also have to remove this previous node.
if(portRedirectionCommand in qemuArgumentValue):
lastRedirElement = qemuArgument
continue
# We will assume that only redirections will have the :%d::%d format. If we find any argument
# with this format and the host or guest ports redirected, we will remove it, along with
# the previous redir command argument.
#if(':%d::' % int(hostPort) in qemuArgumentValue) or ('::%d' % int(guestPort) in qemuArgumentValue):
# We will assume that only redirection arguments have "tcp:" in them, and we will remove them all.
if('tcp:' in qemuArgumentValue):
qemuElement.remove(lastRedirElement)
qemuElement.remove(qemuArgument)
if('-usb' in qemuArgumentValue):
qemuElement.remove(qemuArgument)
# Now we setup the redirection for all the port mappings that were provided.
for hostPort, guestPort in portMappings.iteritems():
#break
portRedirectionValue = 'tcp:%d::%d' % (int(hostPort), int(guestPort))
qemuElement.append(Element(self.qemuArgNodeName, {'value':portRedirectionCommand}))
qemuElement.append(Element(self.qemuArgNodeName, {'value':portRedirectionValue}))
#break
|
count = 0
while (count<3):
count = count + 1
print("Hello Tanawin") |
# I pledge my honor that I have abided by the Stevens Honor System
# I understand that I may access the course textbook and course lecture notes but I am not to access any other resource.
# I also pledge that I worked alone on this exam.
def python_operations():
i = int(input("Please enter '1' for mathematical functions and '2' for string operations: "))
if i != 1 and i != 2:
raise ValueError(f"Invalid code {i}. Must be 1 or 2")
if i == 1:
print("Mathematical Operations")
j = int(input("Enter '1' for Addition, '2' for Subtraction, '3' for Multiplication, and '4' for Division: "))
if j != 1 and j != 2 and j != 3 and j != 4:
raise ValueError(f"Invalid code {j}. Must be a number from 1 to 4")
if j == 1:
k = int(input("You will now enter two numbers. Enter the first: "))
l = int(input("Enter the second. This will be added to the first: "))
total = k + l
print("The sum is " + str(total))
if j == 2:
k = int(input("You will now enter two numbers. Enter the first: "))
l = int(input("Enter the second. This number will be subtracted from the first: "))
result = k - l
print("The result is " + str(result))
if j == 3:
k = int(input("You will now enter two numbers. Enter the first: "))
l = int(input("Enter the second. This will be multiplied by the first: "))
product = k * l
print("The product is " + str(product))
if j == 4:
k = int(input("You will now enter two numbers. Enter the first: "))
l = int(input("Enter the second. The first will be divided by this number: "))
quotient = k / l
print("The quotient is " + str(quotient))
if i == 2:
print("String Operations")
m = int(input("Enter '1' to determine the number of vowels and '2' to encrypt a string: "))
if m != 1 and m != 2:
raise ValueError(f"Invalid code {m}. Must be 1 or 2")
if m == 1:
vowels = ['A', 'E', 'I', 'O', 'U', 'a', 'e', 'i', 'o', 'u']
number = 0
n = input("Enter a string of letters, and it will return the number of vowels: ")
for o in range(len(n)):
if n[o] in vowels:
number = number + 1
print("The number of vowels in this string is " + str(number))
if m == 2:
code = []
group_one = ['a', 'b', 'c', 'd', 'e', 'f']
group_two = ['g', 'h', 'i', 'j', 'k', 'l']
group_three = ['m', 'n', 'o', 'p', 'q', 'r', 's']
group_four = ['t', 'u', 'v', 'w', 'x', 'y', 'z']
p = input("Enter a string of lowercase letters, and it will encrypt it for you: ")
for x in range(len(p)):
if p[x] in group_one:
code.append(1)
elif p[x] in group_two:
code.append(2)
elif p[x] in group_three:
code.append(3)
elif p[x] in group_four:
code.append(4)
print()
print("The encrypted code, in a list, is: ")
print(code)
python_operations() |
#Practice questions Week 4
# Print to canvas
###################################################
# Student should add code where relevant to the following.
import simplegui
# Draw handler
def draw(canvas):
canvas.draw_text("It works!",[120, 112], 48, "Red")
# Create frame and assign callbacks to event handlers
frame = simplegui.create_frame("It works", 400, 200)
frame.set_draw_handler(draw)
frame.start()
###################################################
# Display "This is easy?"
# Student should add code where relevant to the following.
import simplegui
# Draw handler
def draw(canvas):
canvas.draw_text('This is easy?', (20, 20), 12, 'Red')
# Create frame and assign callbacks to event handlers
frame = simplegui.create_frame("This is easy", 400, 200)
frame.set_draw_handler(draw)
# Start the frame animation
frame.start()
###################################################
# Display an X
# Student should add code where relevant to the following.
import simplegui
# Draw handler
def draw_handler(canvas):
canvas.draw_text('X', (5, 40), 48, 'Red')
# Create frame and assign callbacks to event handlers
frame = simplegui.create_frame('Testing', 96, 96)
frame.set_draw_handler(draw_handler)
frame.start()
# Start the frame animation
###################################################
# Define a function that returns formatted minutes and seconds
# Time formatting function
# Student should enter function on the next lines.
def format_time(tot_seconds):
minutes=tot_seconds//60
seconds=tot_seconds%60
ans= str(minutes)+' minutes and '+str(seconds)+' seconds'
return ans
# Tests
print format_time(23)
print format_time(1237)
print format_time(0)
print format_time(1860)
# Output to console
#0 minutes and 23 seconds
#20 minutes and 37 seconds
#0 minutes and 0 seconds
#31 minutes and 0 seconds
###################################################
# Move a ball
# Student should add code where relevant to the following.
import simplegui
# Define globals - Constants are capitalized in Python
HEIGHT = 400
WIDTH = 400
RADIUS_INCREMENT = 5
ball_radius = 20
# Draw handler
def draw_handler(canvas):
canvas.draw_circle((HEIGHT/2, WIDTH/2), ball_radius, 5, 'Green')
# Event handlers for buttons
def increase_radius():
global ball_radius
ball_radius +=RADIUS_INCREMENT
def decrease_radius():
global ball_radius
if ball_radius>RADIUS_INCREMENT:
ball_radius -=RADIUS_INCREMENT
# Create frame and assign callbacks to event handlers
frame = simplegui.create_frame("Ball control", WIDTH, HEIGHT)
frame.set_draw_handler(draw_handler)
frame.add_button("Increase radius", increase_radius)
frame.add_button("Decrease radius", decrease_radius)
# Start the frame animation
frame.start()
|
# Test of fcsreader.py
# (cc) 2017 Ali Rassolie
# Formagna
# An illustration of how access the arguments.
# Den här kommer att spara det the user har entered, vilket är värt att förstå.
# Alltså, när användaren skriver exempelvis python 1.py -a hello world (för att -a
# accepterar två arguemnts) kommer man att spara skiten i en Namespace lista,
# verkar det som, där man har equatat argumenten utan bindestreck med the values
# man entered som en string!
# We should look on SSC-A on FSC-A, then FSC-H on FSC-W, then
# APC-A on SSC-A, count on APC-A, then APC-A on SSC-A
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from pandas import DataFrame as df
from pandas import concat, cut
from fcsreader import fcsReader
from subprocess import call
from math import log
from mpld3 import plugins, utils
from matplotlib import use
from bs4 import BeautifulSoup as bs
from plotly.graph_objs import Scatter, Layout
import numpy as np
import plotly as ply
import matplotlib.pyplot as plt, mpld3
import seaborn
import os
__version__ = "0.2"
class Analyze:
def __init__(self, config="config/config.yaml", pos=0, name=None, *args, **kwargs):
self.pos = pos
self.name = name
print("[GLOBAL] Starting the analysis")
print("[GLOBAL] Opening config")
with open(config, "r") as file:
self.config = {i.split(": ")[0]: i.split(": ")[1].replace("\n", "") for i in file.readlines()}
self.path = self.config["PARENT"]
print(f"[GLOBAL] The parent-path: {self.path}")
def read(self, file=None, **kwargs):
if not file:
self.__files()
file=self.file
process = fcsReader(file)
self.meta = process.meta
self.dataset = process.data
def __files(self, top=None, delimiter="\\", **kwargs):
if not top: top=self.path
self.files = [f"{i[0]}{delimiter}{k}" for i in os.walk(top) for k in i[-1] if k.endswith(".fcs")]
if self.name: self.file = [i for i in self.files if self.name in i][0]
else:
self.file = self.files[self.pos]
self.names = [f"{i.split(delimiter)[-2]}_{i.split(delimiter)[-1]}" for i in self.files]
########################
### Analysis methods ###
########################
def kmeans(self, dataset=None, nclusters=2, logx=False, logy=False, limit_dataset=None, transpose=False, channels=None, **kwargs):
print("[KMeans] Running KMeans clustering")
if dataset is None: dataset = self.dataset
if limit_dataset: pr_dataset = df(dataset, index=dataset.index, columns=limit_dataset)
elif not limit_dataset: pr_dataset = df(dataset, index=dataset.index)
# Necessary for seaborn, providing the number of
# clusters to color
predict = KMeans(n_clusters=nclusters, **kwargs).fit(pr_dataset)
predicted = df(predict.predict(pr_dataset), columns=["mapping"])
dataset = concat([dataset, predicted], axis=1)
if channels and not transpose: x,y=channels[0],channels[1]
elif channels and transpose: x,y=channels[1],channels[0]
print(f"[KMeans] x: {x}, y: {y}")
if logx is True: dataset[x] = dataset[x].apply(func=lambda x: self.log(x))
if logy is True: dataset[y] = dataset[y].apply(func=lambda y: self.log(y))
# Using seaborn so as to color map the scatter plot
order = [i for i in range(nclusters)]
fg = seaborn.FacetGrid(data=dataset, hue="mapping", hue_order=order, aspect=1.61)
fg.map(plt.scatter, x, y).add_legend()
def histo(self, dataset=None, channels=None):
if dataset is None: dataset=self.dataset
if channels is None: channels=self.channels
def log(self, i):
if i > 0: return log(i)
else: return None
def plot(self, dataset=None, xfunc=None, yfunc=None, transpose=False, save=False, threeD=False, **kwargs):
# We are using the plot method associated with the dataframe
# Note that that transposing the
if not dataset: dataset = self.dataset
if transpose:
temp = kwargs["x"]
kwargs["x"] = kwargs["y"]
kwargs["y"] = temp
if xfunc: dataset[kwargs["x"]] = dataset[kwargs["x"]].apply(func=lambda x: xfunc(x))
if yfunc: dataset[kwargs["y"]] = dataset[kwargs["y"]].apply(func=lambda y: yfunc(y))
self.dataset.plot(**kwargs)
if not save: plt.show()
def plot_3d(self, dataset=None, xfunc=None, yfunc=None, zfunc=None, save=False, threeD=False, kind="scatter", transpose=False, **kwargs):
# We are using the plot method associated with the dataframe
# Note that that transposing the
if not dataset: dataset = self.dataset
if transpose:
temp = kwargs["x"]
kwargs["x"] = kwargs["y"]
kwargs["y"] = temp
if xfunc: dataset[kwargs["x"]] = dataset[kwargs["x"]].apply(func=lambda x: xfunc(x))
if yfunc: dataset[kwargs["y"]] = dataset[kwargs["y"]].apply(func=lambda y: yfunc(y))
if yfunc: dataset[kwargs["z"]] = dataset[kwargs["z"]].apply(func=lambda z: yfunc(z))
threedee = plt.figure().gca(projection="3d")
if kind=="scatter": threedee.scatter(dataset[kwargs["x"]],dataset[kwargs["y"]],dataset[kwargs["z"]])
threedee.set_xlabel(kwargs["x"])
threedee.set_ylabel(kwargs["y"])
threedee.set_zlabel(kwargs["z"])
if not save: plt.show()
def freq(self, column, dataset=None, scope = 64, func=None, *args, **kwargs):
print("[FREQ] Running frequency method")
if not dataset: dataset=self.dataset
if func: dataset[column] = dataset[column].apply(func=lambda x: func(x))
# Converting it to a more
_min, _max = dataset[column].min(), dataset[column].max()
res = (_max - _min)/scope
frequency = dataset[column].groupby(cut(dataset[column], np.arange(_min,_max,res))).count()
return frequency
def saveplots(self, func=None, folder=None, rdata=False, delimiter="\\", description=None, log_overwrite=True, logfile="log.txt", *args, **kwargs):
if not folder: folder=self.config["OUTPUT"]
use("Agg")
for pos, file in enumerate(self.files):
self.read(file=file)
name = f"{folder}{self.names[pos].replace('.fcs','')}{description}{pos}.png"
if not func:
dataset.plot(**kwargs)
elif func and not rdata:
func(*args, **kwargs)
elif func and rdata:
data = func(*args, **kwargs)
data.plot()
plt.savefig(name)
plt.close()
if log_overwrite and pos==0:
with open(f"{folder}{logfile}", "w"):
pass
# self.logger(file=name, logfile=f"{folder}{logfile}")
def limiter(self, channels, dataset=None, xmax=None, xmin=None,ymax=None, ymin=None, nclusters=2, save=False, **kwargs):
print("[LIMITER] Running the limiter")
if not dataset: dataset = self.dataset
x = channels[0]
y = channels[1]
# For convenience; who gives a shit about overhead.
upper_limit = lambda name, _max: dataset[name].apply(lambda k: 1 if k <= _max else 0)
lower_limit = lambda name, _min: dataset[name].apply(lambda k: 1 if k >= _min else 0)
if xmax: dataset["mapping"] = upper_limit(x, xmax)
if ymax: dataset["mapping"] = upper_limit(y, ymax)
if xmin: dataset["mapping"] = lower_limit(x, xmin)
if ymin: dataset["mapping"] = lower_limit(y, ymin)
order = [i for i in range(nclusters)]
fg = seaborn.FacetGrid(data=dataset, hue="mapping", hue_order=order, aspect=1.61)
fg.map(plt.scatter, x, y).add_legend()
if not save: plt.show()
def plot_map(self, x=None, y=None, dataset=None, xfunc=None, yfunc=None, transpose=False, save=False, threeD=False, nclusters=2, **kwargs):
print("[MAPPER] Running the map plotter")
if not dataset: dataset = self.dataset
order = [i for i in range(nclusters)]
fg = seaborn.FacetGrid(data=dataset, hue="mapping", hue_order=order, aspect=1.61)
fg.map(plt.scatter, x, y).add_legend()
if not save: plt.show()
def logger(self, file, clusters=2, dataset=None, map="mapper", logfile="log.txt", state="a", folder=None, **kwargs):
if not dataset: dataset = self.dataset
data = list(dataset["mapping"])
first = 0
tot = len(data)
for i in data:
if i == 1: first +=1
ratio = f"{(first/tot)*100}%"
to_write=f"**************\nSample: {file}\n debris/total: {ratio}\n"
with open(logfile, state) as file:
file.write(to_write)
def gen_html(self, dataset=None, channels=["FSC-A", "SSC-A"]):
if not dataset: dataset = self.dataset
data = [dataset[i].values for i in channels]
fig = plt.figure()
ax = fig.add_subplot(111)
plot = ax.scatter(data[0], data[1])
plugins.clear(fig)
plugins.connect(fig, plugins.LinkedBrush(plot), plugins.ClickSendToBack(plot))
the_html = mpld3.fig_to_html(fig)
with open("initialfigure.html", "w") as file:
file.write(the_html)
o = bs(open("initialfigure.html"), "html.parser")
script = str(o.find_all("script")[0])
script_2 = script.replace("<script>","").replace("</script>","")
with open("the_figure.js", "w") as file:
file.write(script_2)
with open("the_figure.html", "w") as file:
the_html = the_html.replace(script, "<script src='.\\the_figure.js'></script>")
file.write(the_html)
def gen_html_ply(self, dataset=None, channels=["FSC-A", "SSC-A"]):
if not dataset: dataset = self.dataset
data = [dataset[i].values for i in channels]
# Note that u should be looking for the zoomlayer class,
# to get the box selection
ply.offline.plot({"data":[Scatter(x=data[0], y=data[1], mode="markers")]}, )
def _log(i):
if i > 0: return log(i)
else: return None
if __name__ == '__main__':
use("Agg")
run = Analyze()
run.read(file="C:\\Users\\Ali Rassolie\\Desktop\\Emb_data\\exporteddebrisembla\\160420_O8-289\\72307.fcs")
run.gen_html_ply() |
from model.assistance.assistanceDao import AssistanceDAO
from model.assistance.justifications.justifications import Justification
from model.assistance.justifications.status import Status
from model.assistance.justifications.status import StatusDAO
from model.assistance.justifications.justifications import SingleDateJustification
from model.assistance.utils import Utils
from model.users.users import UserDAO
import datetime
import uuid
class InformedAbsenceJustificationDAO(AssistanceDAO):
dependencies = [UserDAO, StatusDAO]
@classmethod
def _createSchema(cls, con):
super()._createSchema(con)
cur = con.cursor()
try:
sql = """
CREATE SCHEMA IF NOT EXISTS assistance;
create table IF NOT EXISTS assistance.justification_informed_absence (
id varchar primary key,
user_id varchar not null references profile.users (id),
owner_id varchar not null references profile.users (id),
jdate date default now(),
notes varchar,
created timestamptz default now()
);
"""
cur.execute(sql)
finally:
cur.close()
@classmethod
def _fromResult(cls, con, r):
j = InformedAbsenceJustification()
j.id = r['id']
j.userId = r['user_id']
j.ownerId = r['owner_id']
j.date = r['jdate']
j.notes = r['notes']
j.setStatus(Status.getLastStatus(con, j.id))
return j
@classmethod
def persist(cls, con, j):
assert con is not None
assert j is not None
cur = con.cursor()
try:
if not hasattr(j, 'id') or j.id is None:
j.id = str(uuid.uuid4())
if len(j.findById(con, [j.id])) <= 0:
r = j.__dict__
cur.execute('insert into assistance.justification_informed_absence(id, user_id, owner_id, jdate, notes) '
'values (%(id)s, %(userId)s, %(ownerId)s, %(date)s, %(notes)s)', r)
else:
r = j.__dict__
cur.execute('update assistance.justification_informed_absence set user_id = %(userId)s, owner_id = %(ownerId)s, '
'jdate = %(date)s, notes = %(notes)s where id = %(id)s', r)
return j.id
finally:
cur.close()
@classmethod
def findById(cls, con, ids):
assert isinstance(ids, list)
cur = con.cursor()
try:
cur.execute('select * from assistance.justification_informed_absence where id in %s', (tuple(ids),))
return [ cls._fromResult(con, r) for r in cur ]
finally:
cur.close()
@classmethod
def findByUserId(cls, con, userIds, start, end):
assert isinstance(userIds, list)
assert isinstance(start, datetime.date)
assert isinstance(end, datetime.date)
if len(userIds) <= 0:
return
cur = con.cursor()
try:
eDate = datetime.date.today() if end is None else end
cur.execute('select * from assistance.justification_informed_absence where user_id in %s and jdate BETWEEN %s AND %s', (tuple(userIds), start, eDate))
return [ cls._fromResult(con, r) for r in cur ]
finally:
cur.close()
class InformedAbsenceJustification(SingleDateJustification):
dao = InformedAbsenceJustificationDAO
def __init__(self, date = None, userId = None, ownerId = None):
super().__init__(date, userId, ownerId)
self.identifier = "Ausente con aviso"
self.classType = SingleDateJustification.__name__
def getIdentifier(self):
return self.identifier
@classmethod
def create(cls, con, date, userId, ownerId):
cls._checkConstraints(con, date, userId)
return super().create(con, date, userId, ownerId)
@classmethod
def _getYearJustifications(cls, con, date, userId):
yearStart = Utils._cloneDate(date).replace(month=1,day=1)
yearEnd = Utils._cloneDate(date).replace(month=12,day=31)
justs = cls.dao.findByUserId(con, [userId], yearStart, yearEnd)
return [j for j in justs if j.getStatus().status == 1 or j.getStatus().status == 2]
@classmethod
def _getMonthJustifications(cls, date, justs):
actualMonth = date.month
jMonth = [j for j in justs if j.getDate().month == actualMonth and (j.getStatus().status == 1 or j.getStatus().status == 2)]
return jMonth
@classmethod
def _checkConstraints(cls, con, date, userId):
"""
Se controla:
6 anuales
2 mensuales
"""
justs = cls._getYearJustifications(con, date, userId)
if len(justs) >= 6:
raise Exception('Límite anual alcanzado')
if len(cls._getMonthJustifications(date, justs)) >= 2:
raise Exception('Límite mensual alcanzado')
@classmethod
def getData(cls, con, userId, date, schedule):
data = super().getData(con, userId, date, schedule)
justs = cls._getYearJustifications(con, date, userId)
data['yStock'] = 6 - len(justs)
data['mStock'] = 2 - len(cls._getMonthJustifications(date, justs))
return data
|
import h5py
import numpy as np
import sys
try:
sys.path.append('/afs/ipp/aug/ads-diags/common/python/lib')
from sf2equ_20200525 import EQU
import mapeq_20200507 as meq
AVAILABLE = True
except:
AVAILABLE = False
def isAvailable():
"""
Returns ``True`` if this module can be used to fetch equilibrium data
on this system.
"""
global AVAILABLE
return AVAILABLE
def getLUKE(shot, time, npsi=80, ntheta=80, filename=None):
"""
Returns magnetic equilibrium data for the given time of the specified
AUG shot. If ``filename`` is provided, the data is also saved to the
named LUKE equilibrium data file.
The shape of the returned 2D arrays are (ntheta, npsi).
:param shot: ASDEX Upgrade shot to fetch equilibrium data for.
:param time: Time to fetch equilibrium data for.
:param filename: Name of file to store data in.
"""
equ = EQU(shot)
# Radial grid (in normalized poloidal flux)
rhop = np.linspace(0, 1, npsi+1)[1:]
# Poloidal angle
theta = np.linspace(0, 2*np.pi, ntheta)
# Flux surface (R, Z) coordinates
R, Z = meq.rhoTheta2rz(equ, rhop, theta, t_in=time, coord_in='rho_pol')
R = R[0,:]
Z = Z[0,:]
# Poloidal flux psi
psi = meq.rho2rho(equ, rhop, t_in=time, coord_in='rho_pol', coord_out='Psi')[0,:]
# Calculate aspect ratio and normalize poloidal flux
tidx = meq.get_nearest_index(equ.time, [time])[0][0]
Rp = equ.Rmag[tidx]
Zp = equ.Zmag[tidx]
a = R[0,-1]-Rp
ieps = Rp / a
psi_apRp = psi / ieps
# Magnetic field components
Br, Bz, Bphi = meq.rz2brzt(equ, r_in=R.flatten(), z_in=Z.flatten(), t_in=time)
Br = Br[0,:].reshape(R.shape)
Bz = Bz[0,:].reshape(R.shape)
Bphi = Bphi[0,:].reshape(R.shape)
equil = {
'id': 'ASDEX Upgrade #{} t={:.4f}s'.format(shot, time),
'Rp': np.array([Rp]), 'Zp': np.array([Zp]),
'psi_apRp': psi_apRp,
'theta': theta,
'ptx': R-Rp, 'pty': Z-Zp,
'ptBx': Br, 'ptBy': Bz, 'ptBPHI': Bphi
}
if filename:
with h5py.File(filename, 'w') as f:
f.create_group('equil')
for key in equil.keys():
f['equil/{}'.format(key)] = equil[key]
return equil
def getVolume(shot, time, filename=None):
"""
Returns the plasma volume enclosed by a given flux surface.
"""
tidx = meq.get_nearest_index(equ.time, [time])[0][0]
data = {'psiN': equ.psiN[tidx,:], 'vol': equ.vol[tidx,:]}
if filename:
np.savez(filename, **data)
return data
|
#!thesis/api
from flask import make_response, jsonify
from core.databaseMongo import nodesDB as db, mainDB
from validator import validateNodeRequest as validate, cleanUpNode as clean
from pymongo.errors import OperationFailure
"""
def computeAvailability(resp, nodeId):
alist = [n["_id"] for n in actionsDB.getActions()]
for actionId in alist:
actionsDB.updateAvailability(actionId, nodeId)
"""
def newNode(request):
valid, resp = validate(request)
if not valid:
return make_response(jsonify(resp), 400)
if db.getNode(resp["name"]):
error = {"error": "Name '" + resp["name"] + "' already in use"}
return make_response(jsonify(error), 406)
"""
if resp.pop('setup'):
user = resp.pop("ssh_user")
password = resp.pop("ssh_password")
"""
# TODO RUN SETUP
resp = clean(resp) # remove unwanted fields before storing in DB
try:
id = db.insertNode(resp)
except OperationFailure as e:
print e
return make_response(jsonify({"error": "First run the mongoDB instance on the target node"}), 400)
# computeAvailability(resp, id)
return make_response(id, 200)
def deleteNode(request, token):
if not db.getNode(token):
return make_response(jsonify({'error': "No node with id " + token}),
406)
msg = db.deleteNode(token)
return make_response(msg, 200)
def getNodes(request):
nodes = db.getNodes()
return make_response(jsonify({"nodes": nodes}), 200)
def reset():
mainDB.resetReplicaSet()
return make_response("OK", 200)
|
import bs4
import re
def Capture_Hashtags(text):
#task 1: Clean up HTML (mainly new lines)
soup = bs4.BeautifulSoup(text, 'lxml')
text = soup.get_text()
text = text.replace('\n',' ')
text = text.replace('\r','.')
#task 4: remove links
text = re.sub('https?://[A-Za-z0-9./~]+','', text)
hashtags = re.findall(r'#(\w+)',text)
return hashtags
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('test_skryaga.main.views',
url(r'^requests/$', 'request_list', name='request_list'),
url(r'^requests_change/$', 'ajax_request_priority',
name='request_priority_edit'),
)
|
# -*- coding: utf-8 -*-
"""
Evaluation script for DOA estimation
"""
# interfacing with matlab on my machine required unsetting this env variable
import os
os.unsetenv('MKL_NUM_THREADS')
import argparse
import torch
from model import CRNN, ConvNet, LSTM_FIRST, LSTM_FULL, LSTM_LAST
from doa_math import DoaClasses,to_cartesian,to_class
from doa_stats import ToleranceScore
from graphics import plot_curve
from config import Config,Dropouts
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def compute_tolerance_score(config):
model = config.model
_,_,test_loader = config.get_loaders()
doa_classes = config.doa_classes
use_all_lstm_frames = config.use_all_lstm_frames()
tolerance_score = ToleranceScore(config.thresholds,doa_classes)
CC = CX = XC = XX = 0
with torch.no_grad():
total = 0
for i,(X,Y) in enumerate(test_loader):
total += len(Y)
X = X.float().to(device)
Y = Y.to(device)
Yhat = model(X)
if doa_classes:
if use_all_lstm_frames:
Yhat = torch.sum(Yhat, 2)
Y = Y[:, 0] # Can take the 0th b/c labels identical for frames
_, Yhat = torch.max(Yhat, 1)
Yhat = [to_cartesian(x,doa_classes) for x in Yhat]
else:
if use_all_lstm_frames:
Yhat = torch.sum(Yhat, 1)/25
Y = Y[:, 0]
tolerance_score.update(Yhat,Y)
return tolerance_score
def plot_SNR_curve(curve):
plot_curve(curve)
def compute_stats(config):
tolerance_score = compute_tolerance_score(config)
def inference_model(network,lstm_out,out_format,model_path):
doa_classes = DoaClasses()
if out_format == "cartesian":
out_dim = 3
elif out_format == "class":
out_dim = len(doa_classes.classes)
if network == "CNN":
model = ConvNet(device, Dropouts(0,0,0), out_dim, doa_classes)
elif network == "CRNN":
model = CRNN(device, Dropouts(0,0,0), out_dim, doa_classes, lstm_out)
model.load_state_dict(torch.load(model_path,map_location=device))
model.eval()
model.to(device)
return model,doa_classes
def test_run(network,lstm_out,out_format,data_dirs,log_dirs,model_path):
model,doa_classes = inference_model(network, lstm_out, out_format,model_path)
data_dirs = data_dirs.split(',')
log_dirs = log_dirs.split(',')
tolerance_scores = []
for data_dir,log_dir in zip(data_dirs,log_dirs):
config = Config(data_folder=data_dir,\
model=model,\
doa_classes=doa_classes,\
lstm_output=lstm_out,\
results_dir=log_dir,\
thresholds=[5,10,15],\
batch_size=32)
score = compute_tolerance_score(config)
tolerance_scores.append(score)
return tolerance_scores
def main():
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
parser = argparse.ArgumentParser(prog='evaluate',\
description="""Script to evaluate the DOA estimation system""")
parser.add_argument("--data_dirs", "-d", default="../data", required=True,\
help="Directory where data and labels are", type=str)
parser.add_argument("--log_dirs", "-log", default=".", required=True,\
help="Path to log results", type=str)
parser.add_argument("--model_path", "-m", required=True,\
help="Path to saved model")
parser.add_argument("--network", "-n", required=True,\
choices=["CNN", "CRNN"],\
help="Specify network type", type=str)
parser.add_argument("--lstm_out", "-lo", default=LSTM_FULL, required=True,\
choices=[LSTM_FULL, LSTM_FIRST, LSTM_LAST],\
help="Choose which LSTM output the model uses", type=str)
parser.add_argument("--out_format", "-of", type=str,\
choices=["reg", "class"], required=True,\
help="Choose output format")
args = parser.parse_args()
test_run(args.network,args.lstm_out,args.out_format,\
args.data_dirs,args.log_dirs,args.model_path)
if __name__ == "__main__":
main()
|
def noonerize(numbers):
if not isinstance(numbers[0], int) or not isinstance(numbers[1], int):
return 'invalid array'
a, b = numbers
return abs(int(str(b)[0] + str(a)[1:]) - int(str(a)[0] + str(b)[1:]))
'''
Spoonerize... with numbers... numberize?... numboonerize?... noonerize? ...anyway!
You will create a function which takes an array of two positive integers,
spoonerizes them, and returns the positive difference between them as a
single number or 0 if the numbers are equal:
[123, 456] = 423 - 156 = 267
Your code must test that all array items are numbers and return "invalid array"
if it finds that either item is not a number. The provided array will always contain 2 elements.
When the inputs are valid, they will always be integers, no floats will be passed.
However, you must take into account that the numbers will be of varying magnitude,
between and within test cases
'''
|
"""
Print start, end, elapsed times
"""
import atexit
from time import time, strftime, localtime
from datetime import timedelta
def sec2str(elapsed=None):
""" Pring duration in seconds to HH:MM:SS, current time will be returned if elapsed is None
"""
if elapsed is None:
return strftime("%Y-%m-%d %H:%M:%S", localtime())
else:
return str(timedelta(seconds=elapsed))
def log(s, elapsed=None):
""" Write log information
"""
line = '='*60
print(line)
print(sec2str(), '--', s)
if elapsed is not None:
print("Elapsed time:", elapsed)
print(line)
print()
pass
def exitlog(start, func=''):
""" Write log information at exit
"""
end = time()
elapsed = end-start
log("End Program "+func, sec2str(elapsed))
pass
def runtime(func=''):
""" Get running time information for program, run time information will be written out upon normal interpreter termination
"""
start = time()
atexit.register(exitlog, start, func)
log("Start Program "+func)
return
def runtimef(func, *args, **kwargs):
""" Get running time information for function
"""
if not callable(func):
raise ValueError("'%s' is not a function"%(func))
start = time()
log("Start Program "+func.__name__)
func(*args, **kwargs)
exitlog(start, func.__name__)
return
|
import json
import os.path
from .path import relative
CONFIG_FILENAME = 'config.json'
def _get_config_fp():
if os.path.exists(relative(CONFIG_FILENAME)):
return open(relative(CONFIG_FILENAME))
with open(relative(CONFIG_FILENAME), 'w') as f:
f.write('{}')
return _get_config_fp()
class VVCConfig(dict):
def __init__(self):
super().__init__(
json.load(_get_config_fp()))
def save(self):
json.dump(self, open(relative(CONFIG_FILENAME), 'w'))
def __setitem__(self, key :str, value :str):
super().__setitem__(key, value)
self.save()
CONFIG = VVCConfig()
|
from django.urls import path
from .views import homePageView, messageView, registerView, userPage
urlpatterns = [
path('', homePageView, name='home'),
path('users/<str:username>/messages', messageView, name='messages'),
path('register/', registerView, name='register'),
path('users/<str:username>', userPage, name='userpage'),
] |
import datetime
from django.db import models
from django.db.models import Q
from django.db.models.aggregates import Count
from django.contrib.postgres.search import TrigramSimilarity
class LibroManager(models.Manager):
# manager para el modelo Autor
def listar_libros(self, kword):
resultado = self.filter(
titulo__icontains=kword,
fecha__range=('2000-01-01', '2020-12-30')
)
return resultado
def listar_libros2(self, kword, fecha1, fecha2):
date1 = datetime.datetime.strptime(fecha1, "%Y-%m-%d")
date2 = datetime.datetime.strptime(fecha2, "%Y-%m-%d")
resultado = self.filter(
titulo__icontains=kword,
fecha__range=(date1, date2)
).order_by('fecha')
return resultado
def listar_libros_categoria(self, categoria):
return self.filter(categoria__id=categoria).order_by('titulo')
def add_autor_libro(self, libro_id, autor):
libro = self.get(id=libro_id)
libro.autor.add(autor)
return libro
def libros_num_prestamos(self):
resultado = self.aggregate(
num_prestamos=Count('libro_prestamo')
)
return resultado
def listar_libros_trg(self, kword):
if kword:
resultado = self.filter(
titulo__trigram_similar=kword,
)
return resultado
else:
return self.all()[:10]
class CategoriaManager(models.Manager):
""" managers para el mopdelo autor"""
def categoria_por_autor(self, autor):
return self.filter(categoria_libro__autor__id=autor).distinct()
def listar_categoria_libros(self):
resultado = self.annotate(
num_libros=Count("categoria_libro")
)
for r in resultado:
print('*************')
print(r, r.num_libros)
return resultado
|
#!/usr/bin/env python2.7
from distutils.core import setup
import py2exe
setup(console=['ADSToOrigin.py'])
|
String = ("T1.B=>T2.B AND T2.B!=T3.B OR T4.B=T5.B AND T1.B=+4").split(" ")
joinCondition=""
for i in String:
if (i.__contains__("=") and i.count(".")==2) and (not i.__contains__("!")) and (not i.__contains__("<")) and (not i.__contains__(">")):
joinCondition=i
break
|
from torch import gt
from backpack.core.derivatives.elementwise import ElementwiseDerivatives
class LeakyReLUDerivatives(ElementwiseDerivatives):
def hessian_is_zero(self):
"""`LeakyReLU''(x) = 0`."""
return True
def df(self, module, g_inp, g_out):
"""First LeakyReLU derivative:
`LeakyReLU'(x) = negative_slope if x < 0 else 1`."""
df_leakyrelu = gt(module.input0, 0).float()
df_leakyrelu[df_leakyrelu == 0] = module.negative_slope
return df_leakyrelu
|
import yfinance as yf
import streamlit as st
import pandas as pd
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier
from datetime import date
import matplotlib as plt
from termcolor import colored
st.write("""
# Beat the market with Hades
This app tries to predict the future price of any company, cryptocurrency or currency and its future trend.
This model only takes historical price values as input. In the coming days, we will be incorporating fundamental analysis variables into the model.
That said, and considering the market as an unpredictable phenomenon, this model should not be taken as an investment strategy. \n
............................................................................................................................................................ \n
**Input: **History data from yahoo finance library \n
**Output**: Future Price of any company or currency \n
**Model**: Neural Network with LSTM architecture \n
**Libraries:** yfinance, pandas, tensorflow and keras, matplotlib, sklearn, numpy.
............................................................................................................................................................
**Choose the company** you want in the **left panel**. You can also change the parameters that train the neuroanl network. Then click on the button below
""")
st.sidebar.header('User Input Parameters')
add_selectbox = st.sidebar.selectbox(
"What company or cryptocurrency are you interested in?",
("AAPL", "GOOGL", "MELI","GGAL", "TSLA","MSFT", "AMZN", "FB", "V", "JNJ", "MA", "JPM", "NVDA", "INTC", "NFLX", "KO", "ADBE", "PYPL", "PEP", "TM", "ORCL", "CVX", "MCD", "IBM", "SNEJF", "BTC-USD", "ETH-USD", "ETH-BTC", "USDARS=X"),
)
today = date.today()
input_data = {"company" : add_selectbox,
}
features = pd.DataFrame(input_data, index=[0])
# Neural Network model to predict the stock market
# Libraries
import pandas_datareader as web
import numpy as np
import math
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
from datetime import datetime
from datetime import timedelta, date
days_prev = []
for i in range(1,500):
days_prev.append(i)
# Input data
stock = input_data["company"]
start = "2000-01-01"
end = datetime.today()
tomorrow = pd.to_datetime(end) + pd.DateOffset(days=1)
previous_days = st.sidebar.selectbox(
"How many days used for train the model?",
days_prev,
19
)
post_days = st.sidebar.selectbox(
"How many days do you want to predict?",
days_prev,
9
)
days_before_in_graph = st.sidebar.selectbox(
"How many days do you want to see in the graph with the predition?",
days_prev,
100
)
button = st.button("Click here to get the prediction!")
if button:
with st.spinner('Wait for it...'):
# Import the stock dataset
df = web.DataReader(stock, data_source = "yahoo", start = start, end = end)
# Dataset only with Close column
data = df.filter(["Close"]) # New dataset with only "Close" column
dataset = data.values # transform the data into a numpy array
# Decide the amount of instances for train and test
train_examples = math.ceil(len(dataset)*0.8) # amount of train instances
test_explames = len(dataset) - train_examples# amount of test instances
# Generate the train and test dataset
train_data = dataset[0:train_examples, :]
test_data = dataset[train_examples-previous_days: , : ]
# Dataset scaled (values between 0-1)
scaler = MinMaxScaler(feature_range = (0,1))
train_data_scaled = scaler.fit_transform(train_data)
test_data_scaled = scaler.fit_transform(test_data)
# Split the train set into inputs (x) and outputs (y)
x_train = []
y_train = []
for i in range(previous_days, len(train_data_scaled)):
if i < len(train_data_scaled)-post_days:
x_train.append(train_data_scaled[i-previous_days:i, 0])
y_train.append(train_data_scaled[i:i+post_days,0])
# COnvert into numpy arrays
x_train = np.array(x_train)
y_train = np.array(y_train)
# Split the test set into inputs(x) and outputs(y)
x_test = []
y_test = []
#y_test = scaler.fit_transform(dataset[train_examples: , : ])
for i in range(previous_days, len(test_data_scaled)):
if i < len(test_data_scaled)-post_days:
x_test.append(test_data_scaled[i-previous_days:i, 0])
y_test.append(test_data_scaled[i:i+post_days,0])
# COnvert into numpy arrays
x_test = np.array(x_test)
y_test = np.array(y_test)
# Reshape the data
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
# Deep Learning model
model = Sequential() #creation of the model
model.add(LSTM(50, return_sequences=True, input_shape = (x_train.shape[1], 1))) # input layer
model.add(LSTM(50, return_sequences=False)) # hidden layer
model.add(Dense(25)) # hidden layer
model.add(Dense(post_days)) # output layer
# Compile the neural network
model.compile(optimizer = "adam", loss = "mean_squared_error")
# Train the model
model.fit(x_train, y_train, batch_size = 1, epochs = 1,verbose=0)
#Evaluate the model with the test set
prediction_test = model.predict(x_test) # output of the model
prediction_test = scaler.inverse_transform(prediction_test) # output of the model
y_test = scaler.inverse_transform(y_test)
RMSE = np.sqrt(np.mean((prediction_test- y_test)**2))
# FUTURE PRICE PREDICTION
df_future = df.filter(["Close"])
last_60_days = df_future[-previous_days:].values
last_60_days_scaled = scaler.transform(last_60_days)
x_future = []
x_future.append(last_60_days_scaled)
x_future = np.array(x_future)
x_future = np.reshape(x_future, (x_future.shape[0], x_future.shape[1], 1))
price_future = model.predict(x_future)
price_future = scaler.inverse_transform(price_future)
new_dataset = []
dates = []
for i in range(0,len(dataset)):
new_dataset.append(dataset[i][0])
for j in range(0,post_days):
new_dataset.append(price_future[0][j])
for k in range(0,len(new_dataset)):
dates.append(k)
# Success message
st.success("Price predicted succsessfully!")
# Displaying the Top 10 similar profiles
#Graphs
st.write(f"""
## **{input_data["company"]} Closing Price**
""")
fig = plt.figure(figsize = (16,8))
plt.title(f"${stock} Close Price History", size = 30)
plt.plot(df["Close"])
plt.xlabel("Date", fontsize = 18)
plt.ylabel("Close Price", fontsize = 18)
plt.legend([f"${stock} Close Price"], fontsize=20)
plt.grid()
plt.grid(linestyle='-', linewidth='0.5', color='red')
#plt.show()
st.pyplot()
st.write(f"""
## **{input_data["company"]} Volume Price**
""")
fig = plt.figure(figsize = (16,8))
plt.title(f"${stock} Volume", size = 30)
plt.plot(df["Volume"])
plt.xlabel("Date", fontsize = 18)
plt.ylabel("Volume", fontsize = 18)
plt.legend([f"${stock} Volume"], fontsize=20)
plt.grid()
plt.grid(linestyle='-', linewidth='0.5', color='red')
#plt.show()
st.pyplot()
st.write(f"""
## **{input_data["company"]} Price Predicted for the next {post_days} days**:
**Last Price Values**:
""")
st.dataframe(df["Close"].tail(), width=300, height=900)
st.write(f"""**Future Stock Price:**""")
st.write(f"The percentages of each day indicate how much the price will be compared to today.")
total_percentages = []
for j in range(0,post_days):
percent = ((price_future[0][j])-(last_60_days[previous_days-1][0]))*100/(price_future[0][j])
total_percentages.append(percent)
if percent < 0:
st.write(f"""{pd.to_datetime(end)+pd.DateOffset(days=j-1)}: ${price_future[0][j]} ({round(percent,2)}%)""")
else:
st.write(f"""{pd.to_datetime(end)+pd.DateOffset(days=j-1)}: ${price_future[0][j]} **({round(percent,2)}%)**""")
st.write(f"""**Graph**""")
plt.figure(figsize = (16,8))
plt.title(f"${stock} Close Price History with {post_days} days future prediction", size = 30)
plt.plot(dates[len(new_dataset)-post_days-days_before_in_graph:len(new_dataset)-post_days], new_dataset[len(new_dataset)-post_days-days_before_in_graph:len(new_dataset)-post_days], c="b",label="Close Price Data")
plt.plot(dates[len(new_dataset)-post_days-1:],new_dataset[len(new_dataset)-post_days-1:], c="g",label=f"Future Close Price for the next {post_days} days")
plt.xlabel("Date", fontsize = 18)
plt.ylabel("Close Price", fontsize = 18)
plt.legend([f"${stock} Close Price"], fontsize=20)
plt.grid()
plt.grid(linestyle='-', linewidth='0.5', color='red')
plt.legend( prop={'size': 20})
st.pyplot()
posittive_trend = 0
negative_trend = 0
for i in total_percentages:
if i < 0:
negative_trend = negative_trend + 1
else:
posittive_trend = posittive_trend + 1
if posittive_trend > negative_trend:
st.write(f"**This stock shows a positive trend, it could be a good investment!**")
else:
st.write(f"**This stock doesn´t show a positive trend.**")
|
from flask import Flask, render_template, jsonify, request
app = Flask(__name__)
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)
client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbsparta # 'dbsparta'라는 이름의 db를 만듭니다.
## HTML을 주는 부분
@app.route('/')
def home():
return render_template('index.html')
## API 역할을 하는 부분
@app.route('/sresults', methods=['POST'])
def searching():
keyword_receive = request.form['keyword_give']
response = requests.get("https://openapi.naver.com/v1/search/shop.json",
params={"query": keyword_receive, "display": 1},
headers={"X-Naver-Client-Id": "3T2wQJ3_WgsPtjM1hqgp", "X-Naver-Client-Secret": "BrHLav3UBB"})
print(response.status_code)
return jsonify({'result':'success', 'msg': '검색이 완료되었습니다'})
if __name__ == '__main__':
app.run('0.0.0.0',port=5000,debug=True) |
import networkx as nx
f = open("input", "r")
orbits = [x.strip('\n').split(')') for x in f.readlines()]
def part1():
G = nx.DiGraph()
for o in orbits:
G.add_node(o[0])
G.add_node(o[1])
G.add_edge(o[1], o[0])
total = 0
for n in G.nodes:
if n != 'COM':
# There is just one path, so the shortest path to COM is fine
total += nx.shortest_path_length(G, n, 'COM')
print (total)
def part2():
G = nx.Graph()
target = None
source = None
for o in orbits:
G.add_node(o[0])
G.add_node(o[1])
G.add_edge(o[1], o[0])
if o[1] == 'SAN':
target = o[0]
if o[1] == 'YOU':
source = o[0]
path_size = nx.shortest_path_length(G, source, target)
print (path_size)
part1()
part2()
f.close() |
#-*- coding: utf-8 -*-
from django.contrib import admin
from models import Cidade, Endereco, Pais, Uf
class CidadeAdmin(admin.ModelAdmin):
list_display = ('nome', 'uf',)
search_fields = ('nome', 'uf', )
class EnderecoAdmin(admin.ModelAdmin):
list_display = ('logradouro', 'complemento', 'bairro', 'cidade', 'cep',)
search_fields = ('logradouro', 'complemento', 'bairro', 'cidade', 'cep',)
class PaisAdmin(admin.ModelAdmin):
list_display = ('nome', 'sigla',)
search_fields = ('nome', 'sigla',)
class UfAdmin(admin.ModelAdmin):
list_display = ('nome', 'sigla', 'pais',)
search_fields = ('nome', 'sigla', 'pais',)
admin.site.register(Cidade, CidadeAdmin)
admin.site.register(Endereco, EnderecoAdmin)
admin.site.register(Pais, PaisAdmin)
admin.site.register(Uf, UfAdmin)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 4 21:46:48 2021
@author: chanchanchan
"""
import streamlit as st
import pandas as pd
from matplotlib import pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import DissertationPlotwithDataMain as main
import FastFouriorTransform as faft
def app():
st.header('Fast Fourior Transfrom')
#generate different select tabs with frequencies
signal= st.sidebar.selectbox('Frequency of Input Signal:', ['3kHz', '4kHz', '5kHz', '6kHz', '7kHz'])
#plotting graphs
if signal == '3kHz':
input3_faft = go.Figure()
input3_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.mag_fft_data3_input, mode = 'lines', name = '3 kHz'))
input3_faft.update_layout( title={'text':"Input Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
input3_faft.update_xaxes(range = [0, 20])
input3_faft.update_yaxes(range = [0, 700])
st.write(input3_faft)
output3_faft = go.Figure()
output3_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.mag_fft_data3_output, mode = 'lines', name = '3 kHz'))
output3_faft.update_layout( title={'text':"Output Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
output3_faft.update_xaxes(range = [0, 20])
output3_faft.update_yaxes(range = [0, 700])
st.write(output3_faft)
stacked3_faft = go.Figure()
stacked3_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.stacked_phase3out, mode = 'markers', name = '3 kHz'))
stacked3_faft.update_layout( title={'text':"Output Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Stacked Phase (Degrees)")
stacked3_faft.update_xaxes(range = [0, 20])
stacked3_faft.update_yaxes(range = [0, 8000])
st.write(stacked3_faft)
elif signal == '4kHz':
input4_faft = go.Figure()
input4_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.mag_fft_data4_input, mode = 'lines', name = '4 kHz'))
input4_faft.update_layout( title={'text':"Input Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
input4_faft.update_xaxes(range = [0, 20])
#input4_faft.update_yaxes(range = [0, 700])
st.write(input4_faft)
output4_faft = go.Figure()
output4_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.mag_fft_data4_output, mode = 'lines', name = '4 kHz'))
output4_faft.update_layout( title={'text':"Output Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
output4_faft.update_xaxes(range = [0, 20])
#output4_faft.update_yaxes(range = [0, 700])
st.write(output4_faft)
stacked4_faft = go.Figure()
stacked4_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.stacked_phase4out, mode = 'markers', name = '4 kHz'))
stacked4_faft.update_layout( title={'text':"Output Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Stacked Phase (Degrees)")
stacked4_faft.update_xaxes(range = [0, 20])
stacked4_faft.update_yaxes(range = [0, 8000])
st.write(stacked4_faft)
elif signal == '5kHz':
input5_faft = go.Figure()
input5_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.mag_fft_data5_input, mode = 'lines', name = '5 kHz'))
input5_faft.update_layout( title={'text':"Input Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
input5_faft.update_xaxes(range = [0, 20])
#input4_faft.update_yaxes(range = [0, 700])
st.write(input5_faft)
output5_faft = go.Figure()
output5_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.mag_fft_data5_output, mode = 'lines', name = '5 kHz'))
output5_faft.update_layout( title={'text':"Output Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
output5_faft.update_xaxes(range = [0, 20])
#output4_faft.update_yaxes(range = [0, 700])
st.write(output5_faft)
stacked5_faft = go.Figure()
stacked5_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.stacked_phase5out, mode = 'markers', name = '5 kHz'))
stacked5_faft.update_layout( title={'text':"Output Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Stacked Phase (Degrees)")
stacked5_faft.update_xaxes(range = [0, 20])
stacked5_faft.update_yaxes(range = [0, 8000])
st.write(stacked5_faft)
elif signal == '6kHz':
input6_faft = go.Figure()
input6_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.mag_fft_data6_input, mode = 'lines', name = '6 kHz'))
input6_faft.update_layout( title={'text':"Input Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
input6_faft.update_xaxes(range = [0, 20])
#input6_faft.update_yaxes(range = [0, 700])
st.write(input6_faft)
output6_faft = go.Figure()
output6_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.mag_fft_data6_output, mode = 'lines', name = '6 kHz'))
output6_faft.update_layout( title={'text':"Output Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
output6_faft.update_xaxes(range = [0, 20])
#output6_faft.update_yaxes(range = [0, 700])
st.write(output6_faft)
stacked6_faft = go.Figure()
stacked6_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.stacked_phase6out, mode = 'markers', name = '6 kHz'))
stacked6_faft.update_layout( title={'text':"Output Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Stacked Phase (Degrees)")
stacked6_faft.update_xaxes(range = [0, 20])
stacked6_faft.update_yaxes(range = [0, 8000])
st.write(stacked6_faft)
elif signal == '7kHz':
input7_faft = go.Figure()
input7_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.mag_fft_data7_input, mode = 'lines', name = '7 kHz'))
input7_faft.update_layout( title={'text':"Input Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
input7_faft.update_xaxes(range = [0, 20])
#input7_faft.update_yaxes(range = [0, 700])
st.write(input7_faft)
output7_faft = go.Figure()
output7_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.mag_fft_data7_output, mode = 'lines', name = '7 kHz'))
output7_faft.update_layout( title={'text':"Output Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Magnitude (Arbitary Units)")
output7_faft.update_xaxes(range = [0, 20])
#output7_faft.update_yaxes(range = [0, 700])
st.write(output7_faft)
stacked7_faft = go.Figure()
stacked7_faft.add_trace(go.Scatter(x = faft.change_in_frequency, y = faft.stacked_phase7out, mode = 'markers', name = '7 kHz'))
stacked7_faft.update_layout( title={'text':"Output Signals",'y':0.85,'x':0.5,'xanchor': 'center','yanchor': 'top'}, xaxis_title="Frequency (kHz)", yaxis_title="Stacked Phase (Degrees)")
stacked7_faft.update_xaxes(range = [0, 20])
stacked7_faft.update_yaxes(range = [0, 8000])
st.write(stacked7_faft)
|
import os
from vibepy.load_logger import load_logger
from vibepy.read_config import read_config
import vibepy.class_postgres as class_postgres
from run_single_batch import process_batch
from traineval.output_to_postgres import truncate_all_sm_tables
def main():
load_logger(log_config_folder=os.path.dirname(__file__))
CONFIG = read_config(ini_filename='application.ini', ini_path=os.path.dirname(__file__))
DB = class_postgres.PostgresManager(CONFIG, 'database')
# Either process [all] since Jan 1st 2016 or only [new] ones.
# batch_id, list_mot_id = get_all_ids(DB) # ALL
# batch_id, list_mot_id = get_new_ids(DB) # NEW
# batch_id, list_mot_id = get_all_ids_since_date(DB) # SINCE DATE
# batch_id, list_mot_id = get_specific_vid(DB)
list_mot_id = ['28d1cf6e-21f2-4309-ac00-165ab93a59f7','90d34597-d901-4d64-873d-f16caac3a219']
# list_mot_id = list_mot_id[:6]
process_batch(list_mot_id, CONFIG, DB)
# for li in list_mot_id:
# try:
# process_batch(batch_id, [li], CONFIG, DB)
# except:
# print 'error with id', li
return
def get_specific_vid(DB):
sql = """SELECT DISTINCT mot_segments.id
FROM mot_segments
JOIN locations
ON locations.mot_segment_id=mot_segments.id
AND locations.datetime_created>'2016-06-01T00:00:00'
WHERE locations.vid IN ('AVANDDE3-6C12-5288-C199-B032CFC3269E')
;
"""
query_output = DB.query_fetchall(sql)
list_mot_id = [x[0] for x in query_output]
return list_mot_id
def get_all_ids_since_date(DB):
sql = """SELECT DISTINCT mot_segments.id
FROM mot_segments
JOIN locations
ON locations.mot_segment_id=mot_segments.id
AND locations.datetime_created>'2016-05-14T00:00:00'
WHERE mot='train';
"""
query_output = DB.query_fetchall(sql)
list_mot_id = [x[0] for x in query_output]
return list_mot_id
def get_all_ids(DB):
sql = """SELECT DISTINCT mot_segments.id
FROM mot_segments
JOIN locations
ON locations.mot_segment_id=mot_segments.id
AND locations.timezone_coordinate='Europe/Zurich'
AND locations.datetime_created>'2016-01-01T00:00:00'
JOIN vid_whitelist w on w.vid=locations.vid
WHERE mot='train'
--AND locations.vid NOT LIKE 'AVAND%';
"""
query_output = DB.query_fetchall(sql)
list_mot_id = [x[0] for x in query_output]
return list_mot_id
def get_new_ids(DB):
# TODO this is at risk of constantly re-running failed points... (when get_stops returns nothing...) double check!
# Get new MoT IDs
sql_mot = """
SELECT DISTINCT m.id
FROM mot_segments m
JOIN locations l
ON l.mot_segment_id=m.id
AND l.timezone_coordinate='Europe/Zurich'
AND l.datetime_created>'2016-01-01T00:00:00'
WHERE mot='train'
AND vid LIKE 'AVAND%'
AND NOT EXISTS (
SELECT 1 -- it's mostly irrelevant what you put here
FROM sbb.sm_trips t
WHERE m.id = t.mot_segment_id::uuid
);
"""
query_output = DB.query_fetchall(sql_mot)
list_mot_id = [x[0] for x in query_output]
return list_mot_id
if __name__ == "__main__":
main()
|
Test = int(input())
for _ in range(Test):
n, k = map(int, input().split())
array = []
num = 1
while len(array) < k:
if num % n != 0:
array.append(num)
num += 1
print(array[-1])
|
from typing import List
from pydantic import BaseModel
class ArticlesBase(BaseModel):
title: str
key_words: List[str] = []
class ArticlesCreate(ArticlesBase):
tags: List[str] = []
class Articles(ArticlesBase):
id: int
tags: List[str] = []
is_hot: bool = False
user_id: int
class Config:
orm_mode = True
|
# Pattern Generator
import argparse, itertools
def generatePattern(letters, length):
nchar = 1
i = 26 ** nchar
while i < length:
nchar += 1
i = 26 ** nchar
pattern = itertools.product(letters, repeat=nchar)
pattern_str = ""
i = 0
for p in pattern:
for n in range(nchar):
pattern_str += p[n]
i += 1
if i >= length:
return pattern_str
'''
MAIN
'''
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--length", type=int, help="Length to generate")
parser.add_argument("-f", "--find", type=str, help="String to find")
args = parser.parse_args()
length = args.length
find = args.find
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
print "\nGenerating a string of " + str(length) + " chars\n"
pattern = generatePattern(letters, length)
if find:
position = pattern.find(find)
if position >= 0:
print "[ " + find + " ] is at " + str(position)
else:
print "Could not find [ " + find + " ]"
else:
print pattern
|
def evenator(s):
return ' '.join([x if len(x)%2==0 else x+x[-1] for x in s.translate(None,'.,?!_').split()])
'''
Mr. E Ven only likes even length words. Please create a translator so that he
doesn't have to hear those pesky odd length words. For some reason he also
hates punctuation, he likes his sentences to flow.
Your translator should take in a string and output it with all odd length
words having an extra letter (the last letter in the word). It should also
remove all punctuation (.,?!) as well as any underscores (_).
"How did we end up here? We go?" translated becomes-> "Howw didd we endd up here We go"
'''
|
"""
Vehicles - TO BE TESTED
Automated Vehicles
"""
from evennia import utils, settings, CmdSet
from typeclasses.objects import Object
COMMAND_DEFAULT_CLASS = utils.class_from_module(settings.COMMAND_DEFAULT_CLASS)
# ------------------------------------------------------------------------------
# Vehicle Commands - Allows departing from vehicles.
# ------------------------------------------------------------------------------
class TestCmdSet(CmdSet):
def at_cmdset_creation(self):
self.add(CmdTest1())
self.add(CmdTest2())
class CmdTest1(COMMAND_DEFAULT_CLASS):
"""
entering the train
Usage:
board
This will be available to players in the same location
as the vehicle and allows them to embark.
"""
key = "test"
locks = "cmd:not cmdinside()"
def func(self):
self.caller.msg("You board the train.")
class CmdTest2(COMMAND_DEFAULT_CLASS):
"""
leaving the train
Usage:
depart
This will be available to everyone inside the
vehicle. It allows them to exit to the vehicle's
current location.
"""
key = "depart"
locks = "cmd:cmdinside()"
def func(self):
self.caller.msg("You must wait until you reach your destination.")
|
from marshmallow import Schema, fields, post_dump
class SchemaWithoutNoneFields(Schema):
"""Prevent serialized fields that have None value"""
SKIP_VALUES = set([None])
@post_dump
def remove_skip_values(self, data):
return {
key: value for key, value in data.items()
if value not in self.SKIP_VALUES
} |
# features.py
# -----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import numpy as np
import util
import samples
DIGIT_DATUM_WIDTH=28
DIGIT_DATUM_HEIGHT=28
def basicFeatureExtractor(datum):
"""
Returns a binarized and flattened version of the image datum.
Args:
datum: 2-dimensional numpy.array representing a single image.
Returns:
A 1-dimensional numpy.array of features indicating whether each pixel
in the provided datum is white (0) or gray/black (1).
"""
features = np.zeros_like(datum, dtype=int)
features[datum > 0] = 1
return features.flatten()
def enhancedFeatureExtractor(datum):
"""
Returns a feature vector of the image datum.
Args:
datum: 2-dimensional numpy.array representing a single image.
Returns:
A 1-dimensional numpy.array of features designed by you. The features
can have any length.
## DESCRIBE YOUR ENHANCED FEATURES HERE...
Calculate the number of connected regions. Add a 3 dimensional vector to
the feature vector, with the value of the ith entry equal to 1 iff there
are i connected regions in the image.
##
"""
features = basicFeatureExtractor(datum)
"*** YOUR CODE HERE ***"
arr = features.tolist()
num_connected_regions = get_connected_regions(datum)
for i in range(3):
if i != num_connected_regions:
arr.append(0)
else:
arr.append(1)
return np.array(arr)
def get_connected_regions(datum):
visited = set()
num_connected_regions = 0
for x in range(DIGIT_DATUM_WIDTH):
for y in range(DIGIT_DATUM_HEIGHT):
if datum[x, y] > 0 or (x, y) in visited:
continue
bfs(datum, (x, y), visited)
num_connected_regions += 1
return num_connected_regions
def bfs(datum, start, visited):
queue = [start]
while len(queue) != 0:
node = queue.pop(0)
if node in visited:
continue
visited.add(node)
node_x = node[0]
node_y = node[1]
dx = [-1, 0, 1]
dy = [-1, 0, 1]
for x in dx:
for y in dy:
if node_x + x < 0 or node_x + x >= DIGIT_DATUM_WIDTH:
continue
if node_y + y < 0 or node_y + y >= DIGIT_DATUM_HEIGHT:
continue
if datum[node_x + x, node_y + y] > 0:
continue
queue.append((node_x + x, node_y + y))
def analysis(model, trainData, trainLabels, trainPredictions, valData, valLabels, validationPredictions):
"""
This function is called after learning.
Include any code that you want here to help you analyze your results.
Use the print_digit(numpy array representing a training example) function
to the digit
An example of use has been given to you.
- model is the trained model
- trainData is a numpy array where each row is a training example
- trainLabel is a list of training labels
- trainPredictions is a list of training predictions
- valData is a numpy array where each row is a validation example
- valLabels is the list of validation labels
- valPredictions is a list of validation predictions
This code won't be evaluated. It is for your own optional use
(and you can modify the signature if you want).
"""
# Put any code here...
# Example of use:
# for i in range(len(trainPredictions)):
# prediction = trainPredictions[i]
# truth = trainLabels[i]
# if (prediction != truth):
# print "==================================="
# print "Mistake on example %d" % i
# print "Predicted %d; truth is %d" % (prediction, truth)
# print "Image: "
# print_digit(trainData[i,:])
## =====================
## You don't have to modify any code below.
## =====================
def print_features(features):
str = ''
width = DIGIT_DATUM_WIDTH
height = DIGIT_DATUM_HEIGHT
for i in range(width):
for j in range(height):
feature = i*height + j
if feature in features:
str += '#'
else:
str += ' '
str += '\n'
print(str)
def print_digit(pixels):
width = DIGIT_DATUM_WIDTH
height = DIGIT_DATUM_HEIGHT
pixels = pixels[:width*height]
image = pixels.reshape((width, height))
datum = samples.Datum(samples.convertToTrinary(image),width,height)
print(datum)
def _test():
import datasets
train_data = datasets.tinyMnistDataset()[0]
for i, datum in enumerate(train_data):
print_digit(datum)
if __name__ == "__main__":
_test()
|
#!/usr/bin/python
from __future__ import division
import sys
import os
import math
import numpy as np
sequence_names = sys.argv[1]
names = []
with open(sequence_names,'r') as f:
lines = f.readlines()
for name in lines:
names.append(name.strip('\n'))
f.close()
threshold = 1
for name in names:
files = []
for filename in os.listdir('./'+name):
if filename.startswith(name+'_CORRELATION'):
files.append(filename)
for corr_file in files:
with open('./'+name+'/'+corr_file,'r') as corr:
items = []
above_threshold = 0
#above_percentage = 0
#under_threshold = 0
#under_percentage = 0
#mean_value = 0
lines = corr.readlines()
for line in lines:
items.append(float(line))
for i in items:
if(i >= threshold):
above_threshold +=1
if(above_threshold > 0):
is_conserved = 'korelovane'
else:
is_conserved = 'nekorelovane'
#above_percentage = above_threshold / (len(items))
#under_percentage = under_threshold / (len(items))
print(corr_file+' :'+ is_conserved)
corr.close()
|
a=[3,6,1,0]
m=max(a)
b=a.copy()
b.pop(a.index(m))
c=0
for i in b:
if m>=2*i:
c+=1
if c==len(b):
print(a.index(m))
else:
print(-1)
|
from django.urls import path
from . import views
app_name = "serah_terima"
urlpatterns = [
# path("", views.index, name="index"),
# path("cari", views.cari, name="cari"),
# path("tambah", views.tambah, name="tambah"),
# path("tampil/<int:id>", views.tampil, name="tampil"),
# path("ubah/<int:id>", views.ubah, name="ubah"),
# path("hapus/<int:id>", views.hapus, name="hapus"),
] |
"""OctreeLoader class.
Uses ChunkLoader to load data into OctreeChunks in the octree.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, List, Set
from napari.layers.image.experimental._chunk_set import ChunkSet
from napari.layers.image.experimental.octree import Octree
if TYPE_CHECKING:
from napari.components.experimental.chunk import (
ChunkRequest,
LayerRef,
OctreeLocation,
)
from napari.layers.image.experimental.octree_chunk import OctreeChunk
LOGGER = logging.getLogger("napari.octree.loader")
LOADER = logging.getLogger("napari.loader.futures")
# TODO_OCTREE make this a config. This is how many levels "up" we look
# for tiles to draw at levels above the ideal how. These tiles give
# us lots of coverage quickly, so we load and draw then even before
# the ideal level
NUM_ANCESTOR_LEVELS = 3
class OctreeLoader:
"""Load data into the OctreeChunks in the octree.
The loader is given drawn_set, the chunks we are currently drawing, and
ideal_chunks, the chunks which are in view at the desired level of the
octree.
The ideal level was chosen because its image pixels best match the
screen pixels. Using higher resolution than that is okay, but it's
wasted time and memory. Using lower resolution is better than nothing,
but it's going to be blurrier than the ideal level.
Our get_drawable_chunks() method iterates through the ideal_chunks
choosing what chunks to load, in what order, and producing the set of
chunks the visual should draw.
Choosing what chunks to load and draw is the heart of octree rendering.
We use the tree structure to find child or parent chunks, or chunks
futher up the tree: ancestor chunks.
The goal is to pretty quickly load all the ideal chunks, since that's
what we really want to draw. But in the meantime we load and display
chunks at lower or high resolutions. In some cases because they already
loaded and even already being drawn. In other cases though we load
chunk from high level because they provide "coverage" quickly.
As you go up to higher levels from the ideal level, the chunks on those
levels cover more and more chunks on the ideal level. As you go up
levels they cover this number of ideal chunks: 4, 16, 64.
The data from higher levels is blurry compared to the ideal level, but
getting something "reasonable" on the screen quickly often leads to the
best user experience. For example, even "blurry" data is often good
enough for them to keep navigating, to keep panning and zooming looking
for whatever they are looking for.
Parameters
----------
octree : Octree
We are loading chunks for this octree.
layer_ref : LayerRef
A weak reference to the layer the octree lives in.
Attributes
----------
_octree : Octree
We are loading chunks for this octree.
_layer_ref : LayerRef
A weak reference to the layer the octree lives in.
"""
def __init__(self, octree: Octree, layer_ref: LayerRef) -> None:
self._octree = octree
self._layer_ref = layer_ref
def get_drawable_chunks(
self,
drawn_set: Set[OctreeChunk],
ideal_chunks: List[OctreeChunk],
ideal_level: int,
) -> List[OctreeChunk]:
"""Return the chunks that should be drawn.
The ideal chunks are within the bounds of the OctreeView, but they
may or may not be in memory. We only return chunks which are in
memory.
Generally we want to draw the "best available" data. However, that
data might not be at the ideal level.
So we look in two directions:
1) Up, to find a chunk at a higher (coarser) level.
2) Down, to look for a drawable chunk at a lower (finer) level.
The TiledImageVisual can draw overlapping tiles/chunks. For example
suppose below B and C are ideal chunks, but B is drawable while C
is not. We search up from C and find A.
----------
| A |
| --- ---|
| B | C |
|---------
TiledImageVisual will render A first, because it's at a higher
level, and then B. So the visual will render B and A with B on top.
The region defined by C is showing A, until C is ready to draw.
The first thing we do is find the best single chunk in memory that will
cover all the ideal chunks, and draw that. Drawing this chunk will happen
right away and ensure that something is always drawn and the canvas never
flickers to empty. Worst case we draw the root tile.
Next we look through all the ideal chunks and see what are the already drawn
chunks that we should just leave there. If the ideal chunk has been drawn then
it does not need any additional coverage and we move on. We next look to see if
all four children are already drawn, this happens most often when zooming out,
and if so we leave them there. If the ideal chunk is in memory we'll draw it too.
If not, we then look to see what the closet in memory ancestor and closet drawn ancestors
are. If they are the same then we'll draw that chunk, along with the ideal chunk if
it is in memory too. If they are not the same then we'll draw the closest drawn ancestor
and either the closest in memory chunk or the ideal chunk if
it is in memory too.
Finally, we will start loading any ideal chunks that aren't in memory that we want to
draw.
Parameters
----------
drawn_set : Set[OctreeChunk]
The chunks which the visual is currently drawing.
ideal_chunks : List[OctreeChunk]
The chunks which are visible to the current view.
Returns
-------
List[OctreeChunk]
The chunks that should be drawn.
"""
LOGGER.debug(
"get_drawable_chunks: Starting with draw_set=%d ideal_chunks=%d",
len(drawn_set),
len(ideal_chunks),
)
# This is an ordered set. It's a set because many ideal chunks will
# have the same ancestors, but we only want them in here once.
seen = ChunkSet()
# Find the closest ancestor that will cover all the ideal chunks
# that is in memory. Worst case take the root tile. This chunk
# ensures that the best thing that we can immediately draw will
# always be drawn and so the canvas will never flicker to empty
# which is very disconcerting.
seen.add(self._get_closest_ancestor(ideal_chunks))
# Now get coverage for the ideal chunks. The coverage chunks might
# include the ideal chunk itself and/or chunks from other levels.
for ideal_chunk in ideal_chunks:
seen.add(self._get_coverage(ideal_chunk, drawn_set))
# Add the ideal chunks AFTER all the coverage ones, we want to load
# these after, because the coverage ones cover a much bigger area,
# better to see them first, even though they are lower resolution.
seen.add(ideal_chunks)
# Cancel in-progress loads for any chunks we can no long see. When
# panning or zooming rapidly, it's very common that chunks fall out
# of view before the load was even started. We need to cancel those
# loads or it will tie up the loader loading chunks we aren't even
# going to display.
self._cancel_unseen(seen)
drawable = []
# Load everything in seen if needed.
for chunk in seen.chunks():
# The ideal level is priority 0, 1 is one level above idea, etc.
priority = chunk.location.level_index - ideal_level
if chunk.needs_load:
self._load_chunk(chunk, priority)
if chunk.in_memory:
drawable.append(chunk) # It was a sync load, ready to draw.
# Useful for debugging but very spammy.
# log_chunks("drawable", drawable)
return drawable
def _get_closest_ancestor(
self, ideal_chunks: List[OctreeChunk]
) -> List[OctreeChunk]:
"""Get closest in memory ancestor chunk.
Look through all the in memory ancestor chunks to determine the closest one. If
none are found then use the root tile.
Parameters
-------
ideal_chunks : List[OctreeChunk]
Ideal chunks.
Returns
-------
List[OctreeChunk]
Closest in memory ancestor chunk.
"""
ancestors = []
for ideal_chunk in ideal_chunks:
# Get the in memory ancestors of the current chunk
chunk_ancestors = self._octree.get_ancestors(
ideal_chunk, create=False, in_memory=True
)
ancestors.append(chunk_ancestors)
common_ancestors = list(set.intersection(*map(set, ancestors)))
if len(common_ancestors) > 0:
# Find the common ancestor with the smallest level, i.e. the highest
# resolution
level_indices = [c.location.level_index for c in common_ancestors]
best_ancestor_index = level_indices.index(min(level_indices))
# Take the last common ancestor which will be the most recent
return [common_ancestors[best_ancestor_index]]
# No in memory common ancestors were found so return the root tile.
# We say create=True because the root is not part of the current
# intersection. However since it's permanent once created and
# loaded it should always be available. As long as we don't garbage
# collect it!
root_tile = self._octree.levels[-1].get_chunk(0, 0, create=True)
return [root_tile]
def _get_permanent_chunks(self) -> List[OctreeChunk]:
"""Get any permanent chunks we want to always draw.
Right now it's just the root tile. We draw this so that we always
have at least some minimal coverage when the camera moves to a new
place. On a big enough dataset though when zoomed in we might be
"inside" a single pixel of the root tile. So it's just providing a
background color at that point.
Returns
-------
List[OctreeChunk]
Any extra chunks we should draw.
"""
# We say create=True because the root is not part of the current
# intersection. However since it's permanent once created and
# loaded it should always be available. As long as we don't garbage
# collect it!
root_tile = self._octree.levels[-1].get_chunk(0, 0, create=True)
return [root_tile]
def _get_coverage(
self, ideal_chunk: OctreeChunk, drawn_set: Set[OctreeChunk]
) -> List[OctreeChunk]:
"""Return the chunks to draw for this one ideal chunk.
If the ideal chunk is already being drawn, we return it alone. It's
all we need to draw to cover the chunk. If it's not being draw we
look up down the tree to find what chunks we can to draw to "cover"
this chunk.
Note that drawn_set might be smaller than what get_drawable_chunks
has been returning, because it only contains chunks that are
actually got drawn to the screen. That are in VRAM.
The visual might take time to load chunks into VRAM. So we might
return the same chunks from get_drawable_chunks() many times
in a row before it gets drawn. It might only one chunk per
frame into VRAM, for example.
Parameters
----------
ideal_chunk : OctreeChunk
The ideal chunk we'd like to draw.
drawn_set : Set[OctreeChunk]
The chunks which the visual is currently drawing.
Returns
-------
List[OctreeChunk]
The chunks that should be drawn to cover this one ideal chunk.
"""
# If the ideal chunk is already being drawn, that's all we need,
# there is no point in returning more than that.
if ideal_chunk.in_memory and ideal_chunk in drawn_set:
return [ideal_chunk]
# If not, get alternates for this chunk, from other levels.
# If the ideal chunk is in memory then we'll want to draw that one
# too though
best_in_memory_chunk = [ideal_chunk] if ideal_chunk.in_memory else []
# First get any direct children which are in memory. Do not create
# OctreeChunks or use children that are not already in memory
# because it's better to create and load higher levels.
children = self._octree.get_children(
ideal_chunk, create=False, in_memory=True
)
# Only keep the children which are already drawn, as drawing is
# expensive don't want to draw them unnecessarily.
children = [chunk for chunk in children if chunk in drawn_set]
# If all children are in memory and are already drawn just return them
# as they will cover the whole chunk.
ndim = 2 # right now we only support a 2D quadtree
if len(children) == 2**ndim:
return children + best_in_memory_chunk
# Get the closest ancestor that is already in memory that
# covers the ideal chunk. Don't create chunks because it is better to
# just create the ideal chunks. Note that the most distant ancestor is
# returned first, so need to look at the end of the list to get closest
# one.
ancestors = self._octree.get_ancestors(
ideal_chunk, create=False, in_memory=True
)
# Get the drawn ancestors
drawn_ancestors = [chunk for chunk in ancestors if chunk in drawn_set]
# Get the closest in memory ancestor
if len(ancestors) > 0:
ancestors = [ancestors[-1]]
# Get the closest drawn ancestor
if len(drawn_ancestors) > 0:
drawn_ancestors = [drawn_ancestors[-1]]
# If the closest ancestor is drawn just take that one
if len(ancestors) > 0 and ancestors == drawn_ancestors:
return children + drawn_ancestors + best_in_memory_chunk
# If the ideal chunk is in memory take that one
if len(best_in_memory_chunk) > 0:
return children + drawn_ancestors + best_in_memory_chunk
# Otherwise that the close in memory ancestor
return children + drawn_ancestors + ancestors
def _load_chunk(self, octree_chunk: OctreeChunk, priority: int) -> bool:
"""Load the data for one OctreeChunk.
Parameters
----------
octree_chunk : OctreeChunk
Load the data for this chunk.
"""
# We only want to load a chunk if it's not already in memory, if a
# load was not started on it.
assert not octree_chunk.in_memory
assert not octree_chunk.loading
# The ChunkLoader takes a dict of chunks that should be loaded at
# the same time. Today we only ever ask it to a load a single chunk
# at a time. In the future we might want to load multiple layers at
# once, so they are in sync, or load multiple locations to bundle
# things up for efficiency.
chunks = {'data': octree_chunk.data}
# Mark that this chunk is being loaded.
octree_chunk.loading = True
from napari.components.experimental.chunk import (
ChunkRequest,
chunk_loader,
)
# Create the ChunkRequest and load it with the ChunkLoader.
request = ChunkRequest(octree_chunk.location, chunks, priority)
satisfied_request = chunk_loader.load_request(request)
if satisfied_request is None:
# An async load was initiated. The load will probably happen in a
# worker thread. When the load completes QtChunkReceiver will call
# OctreeImage.on_chunk_loaded() with the data.
return False
# The load was synchronous. Some situations were the
# ChunkLoader loads synchronously:
#
# 1) The force_synchronous config option is set.
# 2) The data already was an ndarray, there's nothing to "load".
# 3) The data is Dask or similar, but based on past loads it's
# loading so quickly that we decided to load it synchronously.
# 4) The data is Dask or similar, but we already loaded this
# exact chunk before, so it was in the cache.
#
# Whatever the reason, the data is now ready to draw.
octree_chunk.data = satisfied_request.chunks.get('data')
# The chunk has been loaded, it's now a drawable chunk.
assert octree_chunk.in_memory
return True
def _cancel_unseen(self, seen: ChunkSet) -> None:
"""Cancel in-progress loads not in the seen set.
Parameters
----------
seen : ChunkSet
The set of chunks the loader can see.
"""
from napari.components.experimental.chunk import chunk_loader
def _should_cancel(chunk_request: ChunkRequest) -> bool:
"""Cancel if we are no longer seeing this location."""
return not seen.has_location(chunk_request.location)
cancelled = chunk_loader.cancel_requests(_should_cancel)
for request in cancelled:
self._on_cancel_request(request.location)
def _on_cancel_request(self, location: OctreeLocation) -> None:
"""Request for this location was cancelled.
Parameters
----------
location : OctreeLocation
Set that this chunk is no longer loading.
"""
# Get chunk for this location, don't create the chunk, but it ought
# to be there since there was a load in progress.
chunk: OctreeChunk = self._octree.get_chunk_at_location(
location, create=False
)
if chunk is None:
LOADER.error("_cancel_load: Chunk did not exist %s", location)
return
# Chunk is no longer loading.
chunk.loading = False
|
#!/usr/bin/env python
import sys
from os import path
from setuptools import setup, find_packages
sys.path.append(path.join(path.dirname(__file__), 'src'))
from graph_db import __version__ as version
setup(
name='not4oundGraph DB',
version=version,
description='Simple Distributed Graph Database',
long_description=open(path.join(path.dirname(__file__), 'README.md')).read(),
author='ilya16, zyfto & Borisqa',
url='https://github.com/ilya16/graph-db',
packages=find_packages('src', include=['graph_db', 'graph_db.*']),
package_dir={'graph_db': 'src/graph_db'},
entry_points={
'console_scripts':
['n4Graph = graph_db.console.console:run']
},
test_suite='src.tests',
install_requires=['rpyc'],
package_data={'': ['configs/*']}
)
|
#!/bin/python
from socket import *
PORT = 24600
s = socket(AF_INET, SOCK_DGRAM)
s.bind(('', PORT))
while(True):
t, addr = s.recvfrom(200)
print "[%s] %s" % (addr[0], t.strip())
|
n = 5
matrix = [[0 for i in range(n)] for j in range(n+1)]
arr = [5,4,3,2,1]
stor = [1,2,3,4,5]
for i in range(n+1):
for j in range(n):
matrix[0][j] = arr[j]
for i in range(1,n+1,+1):
for j in range(n):
if matrix[i][j] == arr[j]:
matrix[i][j] = stor[j]+1
for i in range(n+1):
for j in range(n):
print(matrix[i][j],end=" ")
print()
|
import requests
import unittest
from common.logger import Log
from lxml import etree
class Test(unittest.TestCase):
'''sdk'''
log=Log()
def login(self):
url1 = 'https://cas.zuoyebang.cc/login?service=http://qa-adx2.suanshubang.com/adx-admin/auth-callback'
url2 = 'https://cas.zuoyebang.cc/login'
r1 = requests.get(url1, verify=False)
demo = etree.HTML(r1.content)
nodes = demo.xpath("//input[@id='lt']")
lt = nodes[0].get('value')
nodes2 = demo.xpath('//input[@id="service"]')
service = nodes2[0].get('value')
body = {
'username': 'yanlingyu',
'password': 'Yanly6900-',
'lt': lt,
'service': service,
'from': ''
}
r = requests.post(url2, data=body, allow_redirects=False, verify=False)
location = r.headers['Location']
print(location)
r2 = requests.get(location, allow_redirects=False)
return r2.cookies['sid']
def setUp(self):
sid=self.login()
self.headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134',
'cookie': 'sid=%s'%sid
}
def test_modify(self):
'''sdk修改,修改编号1048的sdk'''
self.url='http://qa-adx2.suanshubang.com/adx-resource/sdk/position/update'
self.log.info("测试的url地址:%s"%self.url)
body = {
"adxPositionId": "",
"app": "",
"system": "",
"type": 0,
"requestCount": '-1',
"status": 1,
"remark": "1",
"priority": 1,
"sdkPositionId": "6040440949805533",
"id": "1048"
}
r=requests.post(self.url,headers=self.headers,json=body)
result=r.json()
self.log.info("获取请求结果 %s"%result)
data=result['data']
self.log.info("获取%s值与1进行比较" % data)
self.assertTrue(data,1)
if __name__=='__main__':
unittest.main()
|
import numpy as np
import h5py
from scipy import stats, mgrid, c_, reshape
from dateutil import parser
from prime_utils import runningAvg, prediction_filename
def getKDE(spl,nskip=0,nthin=1,npts=100,bwfac=1.0):
r"""
Compute 1D and 2D marginal PDFs via Kernel Density Estimate
Parameters
----------
spl: numpy array
MCMC chain [number of samples x number of parameters]
nskip: int
number of initial samples to skip when sampling the MCMC chain
nthin: int
use every 'nthin' samples
npts: int
number of grid points
bwfac: double
bandwidth factor
Returns
-------
dict: dictionary with results
'x1D': list of numpy arrays with grids for the 1D PDFs;
'p1D': list of numpy arrays with 1D PDFs;
'x2D': list of numpy arrays of x-axis grids for the 2D PDFs;
'y2D': list of numpy arrays of y-axis grids for the 2D PDFs;
'p2D': list of numpy arrays containing 2D PDFs
"""
spl=spl[nskip::nthin,:]
# Compute 1D pdf's
x1D=[];
p1D=[];
for i in range(spl.shape[1]):
print(" - Evaluate 1D pdf for variable",i+1)
spls = spl[:,i]
x1D.append(np.linspace(spls.min(),spls.max(),npts));
kern=stats.kde.gaussian_kde(spls);
p1D.append(kern(x1D[i]));
# Compute 2D joint pdf's
npts = 1j*npts
x2D = []
y2D = []
p2D = []
for j in range(1,spl.shape[1]):
for i in range(j):
print(" - Evaluate 2D pdf for variables",i+1,j+1)
kern=stats.kde.gaussian_kde(c_[spl[:,i],spl[:,j]].T)
kern.set_bandwidth(bw_method=kern.factor*bwfac)
xmin = spl[:,i].min(); xmax = spl[:,i].max()
ymin = spl[:,j].min(); ymax = spl[:,j].max()
x,y = mgrid[xmin:xmax:npts,ymin:ymax:npts]
z = reshape(kern(c_[x.ravel(), y.ravel()].T).T, x.T.shape)
x2D.append(x);
y2D.append(y);
p2D.append(z);
return {'x1D':x1D,'p1D':p1D,'x2D':x2D,'y2D':y2D,'p2D':p2D}
def distcorr(spl):
r"""
Compute distance correlation between random vectors
Parameters
----------
spl: numpy array [number of samples x number of variables]
first dimension is the number of samples,
second dimension is the number of random vectors
Returns
-------
Returns a 2D array of distance correlations between pairs of random vectors;
only entries 0<=j<i<no. of random vectors are populated
References:
http://en.wikipedia.org/wiki/Distance_correlation
"""
nspl,nvars = spl.shape
if (nspl>5000):
print('Warning ! This might be a lengthy calculation: nspl=',nspl)
As=[]
for i in range(nvars):
Amat = np.matrix(np.zeros((nspl,nspl)))
for i1 in range(nspl):
for j1 in range(nspl):
Amat[i1,j1] = abs(spl[i1,i]-spl[j1,i])
# compute means
Alin = np.array([np.mean(Amat[i1,:]) for i1 in range(nspl)])
Acol = np.array([np.mean(Amat[:,j1]) for j1 in range(nspl)])
Amn = np.mean(Amat)
#print Amat
#for i in range(nspl):
# print i,Alin[i],Acol[i]
# subtract/add means (linewise, columnwise, overall)
Amat = Amat - Alin.reshape(nspl,1)
Amat = (Amat.T - Acol.reshape(nspl,1)).T
Amat = Amat+Amn
#print Amat
As.append(Amat.copy())
dCor = np.zeros((nvars,nvars))
dVarX = [np.sqrt(np.sum(np.multiply(As[i],As[i]))/(nspl*nspl))
for i in range(nvars)]
print("Variances:")
print(dVarX)
for i in range(1,nvars):
for j in range(i):
dCov = np.sqrt(np.sum(np.multiply(As[i],As[j]))/(nspl*nspl))
dCor[i,j] = dCov/np.sqrt(dVarX[i]*dVarX[j])
As=[]
return dCor
def computeAICandBIC(run_setup,verbose=0):
"""
Compute Akaike Information Criterion (AIC) and Bayesian Information Criterion (BIC)
Parameters
----------
run_setup: dictionary with run settings; see the Examples section in the manual
Returns
-------
AIC: float
BIC: float
"""
#-------------------------------------------------------
# definitions
fdata = run_setup["regioninfo"]["regionname"]+".dat"
fchno = run_setup["regioninfo"]["fchain"]
#-------------------------------------------------------
# retrieve log likelihoods
file = h5py.File(fchno, 'r')
loglik = np.array(file["minfo"][:,1])
file.close()
if verbose>0:
print(loglik.shape)
#-------------------------------------------------------
# compute AIC and BIC
# Read in initial parameter guess to obtain number of parameters
Nparam = len(run_setup["mcmcopts"]["cini"])
# compute number of data points used (number of days)
rawdata = np.loadtxt(fdata,dtype=str)
ndays = rawdata.shape[0]
AIC = 2 * Nparam - 2 * np.max(loglik)
BIC = 2 * Nparam * np.log(ndays) - 2 * np.max(loglik)
return AIC, BIC
def computeCRPS(run_setup):
"""
Compute Continuous Rank Predictive Score (CRPS)
Parameters
----------
run_setup: dictionary with run settings; see the Examples section in the manual
Returns
-------
CRPS: float
"""
#-------------------------------------------------------
# definitions
fdata = run_setup["regioninfo"]["regionname"]+".dat"
fchno = run_setup["regioninfo"]["fchain"]
day0 = run_setup["regioninfo"]["day0"]
#-------------------------------------------------------
# extract data from raw data
rawdata = np.loadtxt(fdata,dtype=str)
ndays_data = rawdata.shape[0]
days_since_day0 = np.array([(parser.parse(rawdata[i,0])-parser.parse(day0)).days for i in range(ndays_data)])
new_cases = runningAvg(np.array([float(rawdata[i,1]) for i in range(ndays_data)]),7)
#------------------------------------------------------
# read model predictions
filename = prediction_filename(run_setup)
file = h5py.File(filename, 'r')
pred = np.array(file["predictions"])
file.close()
# only need predictions for days on which data is available
pred = pred[:,:ndays_data]
#------------------------------------------------------
# compute integrals on each day
CRPS_daily = np.zeros(ndays_data)
for i in range(ndays_data):
pred_i = pred[:,i]
# compute empirical pdf and cdf
y = np.sort(pred_i)
ecdf = np.array(range(1,len(pred_i)+1)) / float(len(pred_i))
# compute heaviside function
h_i = np.ones(ecdf.shape) * (y > new_cases[i])
# compute integrand
integrand = (ecdf - h_i) ** 2
# average for trapezoid rule integration
integrand = (integrand[1:] + integrand[:-1]) / 2
# compute inegral for day i
CRPS_daily[i] = np.sum( (y[1:] - y[:-1]) * integrand)
CRPS = np.sum(CRPS_daily) / ndays_data
return CRPS
|
class Solution:
def checkValidString(self, s: str) -> bool:
lower=0
upper=0
for ch in s:
if ch=='(':
lower+=1
upper+=1
elif ch ==')':
lower-=1
upper-=1
if lower< 0:
lower=0
elif ch =='*':
lower-=1
upper+=1
if lower<0:
lower=0
if upper<0:
return False
return lower==0
|
h = open('Day12/numbers.txt', 'r')
# Reading from the file
content = h.readlines()
for x in range(len(content)):
if content[x].endswith('\n'):
content[x] = content[x][:-1]
def turn(command, direction):
degree = direction
if command[0] == 'R':
degreeR = (degree - int(command[1:])) % 360
direction = degreeR
if command[0] == 'L':
degreeL = (degree + int(command[1:])) % 360
direction = degreeL
return direction
def move(command, direction):
movingpos = []
movingpos.append(0)
movingpos.append(0)
if command[0] == 'N':
movingpos[1] = int(command[1:])
elif command[0] == 'S':
movingpos[1] = -int(command[1:])
elif command[0] == 'E':
movingpos[0] = int(command[1:])
elif command[0] == 'W':
movingpos[0] = -int(command[1:])
else:
if direction == 90:
movingpos[1] = int(command[1:])
elif direction == 270:
movingpos[1] = -int(command[1:])
elif direction == 0:
movingpos[0] = int(command[1:])
elif direction == 180:
movingpos[0] = -int(command[1:])
return movingpos
direction = 0
degree = 0
position = []
position.append(0)
position.append(0)
for x in range(len(content)):
if str(content[x][0]) == 'R' or str(content[x][0]) == 'L' :
direction = turn(content[x], direction)
else:
movingpos = move(content[x], direction)
position[0] = position[0] + movingpos[0]
position[1] = position[1] + movingpos[1]
print(position)
print(int(position[0]) + int(position[1]))
|
#Setup
import praw, re, csv, random
#Validate Reddit Access
reddit = praw.Reddit(client_id='Dn_ef002ikq0dw',
client_secret='B_8gGLkYtz6aDmZ4tkP5Dj3BFIo',
password='zzzzzz',
user_agent='pix3lbot_scrape by /u/pix3lbot',
username='pix3lbot')
subreddit = reddit.subreddit('pix3lspace')
space = "\n"
sep = "----------"
#Download
def download(channel):
for submission in subreddit.hot(limit=1000):
if re.search(channel, submission.title, re.IGNORECASE):
foreigncomments = {}
counter = 0
for top_level_comment in submission.comments:
comment = top_level_comment.body
counter = counter + 1
foreigncomments[counter] = comment + space
return foreigncomments
#Sync
def sync(collect,channel):
foreigncomments = download(channel)
collect = 0
for key in foreigncomments:
with open("localcopy.txt", "a") as fp:
fp.write(foreigncomments[key])
collect = collect + 1
with open("localcopy.txt", "r") as fp:
counter = 0
for row in fp:
counter = counter + 1
if collect == counter:
return
else:
with open("localcopy.txt", "a") as fp:
fp.write(foreigncomments[collect])
print((foreigncomments[collect]))
#Perform
def perform(collect,command,prechannel):
if command == "1":
channel = input(space)
sync(collect,channel)
run(collect,"1n",channel)
elif command == "2":
channel == input(space)
elif command == "1n":
sync(collect,prechannel)
run(collect,"1n",prechannel)
else:
return
#Run
def run(collect,command,channel):
if command == "1n":
perform(collect,command,channel)
else:
command = input(space)
perform(collect,command,0)
collect = collect + 1
run(collect,"",channel)
with open("localcopy.txt", "w") as fp:
fp.write("")
run(0,"",0)
|
# -*- coding: utf-8 -*-
class Solution:
def threeSum(self, nums):
nums.sort()
result = set()
for k, _ in enumerate(nums):
i, j = 0, len(nums) - 1
while i < k and j > k:
if nums[i] + nums[j] + nums[k] == 0:
result.add((nums[i], nums[k], nums[j]))
i += 1
j -= 1
elif nums[i] + nums[j] + nums[k] < 0:
i += 1
elif nums[i] + nums[j] + nums[k] > 0:
j -= 1
return [list(el) for el in result]
if __name__ == "__main__":
solution = Solution()
nums = [-1, 0, 1, 2, -1, -4]
expected = [
[-1, 0, 1],
[-1, -1, 2],
]
result = solution.threeSum(nums)
assert sorted(expected) == sorted(result)
nums = [3, 0, -2, -1, 1, 2]
expected = [
[-2, -1, 3],
[-2, 0, 2],
[-1, 0, 1],
]
result = solution.threeSum(nums)
assert sorted(expected) == sorted(result)
|
def match(usefulness, months):
return "Match!" if sum(usefulness) >= 100*(1-0.15)**months else "No match!"
'''
It is 2050 and romance has long gone, relationships exist solely for practicality.
MatchMyHusband is a website that matches busy working women with perfect house husbands.
You have been employed by MatchMyHusband to write a function that determines who matches!!
The rules are... a match occurs providing the husband's "usefulness" rating is greater
than or equal to the woman's "needs".
The husband's "usefulness" is the SUM of his cooking, cleaning and childcare abilities
and takes the form of an array .
usefulness example --> [15, 26, 19] (15 + 26 + 19) = 60
Every woman that signs up, begins with a "needs" rating of 100. However, it's realised
that the longer women wait for their husbands, the more disatisfied they become with our
service. They also become less picky, therefore their needs are subject to exponential
decay of 15% per month. https://en.wikipedia.org/wiki/Exponential_decay
Given the number of months since sign up, write a function that returns "Match!" if the
husband is useful enough, or "No match!" if he's not.
'''
|
from numpy.core.umath import sign
from numpy.ma import exp
import numpy
from pylab import *
from scipy import linalg
from PIL import Image
class Camera(object):
""" Class for representing pin-hole cameras. """
def __init__(self, P):
""" Initialize P = K[R|t] camera model. """
self.P = P
self.K = None # calibration matrix
self.R = None # rotation
self.t = None # translation
self.c = None # camera center
def project(self, X):
""" Project points in X (4*n array) and normalize coordinates. """
x = dot(self.P, X)
for i in range(3):
x[i] /= x[2]
return x
def factor(self):
""" Factorize the camera matrix into K,R,t as P = K[R|t]. """
# factor first 3*3 part
K, R = linalg.rq(self.P[:, :3])
# make diagonal of K positive
T = diag(sign(diag(K)))
if linalg.det(T) < 0:
T[1, 1] *= -1
self.K = dot(K, T)
self.R = dot(T, R) # T is its own inverse
self.t = dot(linalg.inv(self.K), self.P[:, 3])
return self.K, self.R, self.t
def center(self):
""" Compute and return the camera center. """
if self.c is not None:
return self.c
else:
# compute c by factoring
self.factor()
self.c = -dot(self.R.T, self.t)
return self.c
def rotation_matrix(a):
""" Creates a 3D rotation matrix for rotation around the axis of the vector a. """
R = eye(4)
R[:3,:3] = linalg.expm([[0,-a[2],a[1]],[a[2],0,-a[0]],[-a[1],a[0],0]])
return R
def my_calibration(sz):
""" calculates K based on the given image size.
sz: size of the image """
row, col = sz
fx = 2555 * col / 2592 # 2555 is the fx calculated using an image with width 2592
fy = 2586 * row / 1936 # 2586 is the fy calculated using an image with height 1936
K = diag([fx, fy, 1])
K[0, 2] = 0.5 * col # assumming the optical axis intersects with the image at the center of the image
K[1, 2] = 0.5 * row
return K
def example_project_3d():
# load points
points = loadtxt('house.p3d').T
points = vstack((points, ones(points.shape[1])))
# setup camera
P = hstack((eye(3), array([[0], [0], [-10]])))
cam = Camera(P)
x = cam.project(points)
# plot projection
figure()
plot(x[0], x[1], 'k.')
# create transformation
r = 0.05 * numpy.random.random(3)
rot = rotation_matrix(r)
# rotate camera and project
figure()
for t in range(20):
cam.P = dot(cam.P, rot)
x = cam.project(points)
plot(x[0], x[1], 'k.')
show()
def cube_points(c,wid):
""" Creates a list of points for plotting
a cube with plot. (the first 5 points are
the bottom square, some sides repeated). """
p = []
# bottom
p.append([c[0] - wid, c[1] - wid, c[2] - wid])
p.append([c[0] - wid, c[1] + wid, c[2] - wid])
p.append([c[0] + wid, c[1] + wid, c[2] - wid])
p.append([c[0] + wid, c[1] - wid, c[2] - wid])
p.append([c[0] - wid, c[1] - wid, c[2] - wid]) # same as first to close plot
# top
p.append([c[0] - wid, c[1] - wid, c[2] + wid])
p.append([c[0] - wid, c[1] + wid, c[2] + wid])
p.append([c[0] + wid, c[1] + wid, c[2] + wid])
p.append([c[0] + wid, c[1] - wid, c[2] + wid])
p.append([c[0] - wid, c[1] - wid, c[2] + wid]) # same as first to close plot
# vertical sides
p.append([c[0] - wid, c[1] - wid, c[2] + wid])
p.append([c[0] - wid, c[1] + wid, c[2] + wid])
p.append([c[0] - wid, c[1] + wid, c[2] - wid])
p.append([c[0] + wid, c[1] + wid, c[2] - wid])
p.append([c[0] + wid, c[1] + wid, c[2] + wid])
p.append([c[0] + wid, c[1] - wid, c[2] + wid])
p.append([c[0] + wid, c[1] - wid, c[2] - wid])
return array(p).T
def example_pose_estimation():
from n2_image_to_image_mappings import homography
from n1_local_image_descriptors import sift
# compute features
sift.process_image('book_frontal.JPG', 'im0.sift')
l0, d0 = sift.read_features_from_file('im0.sift')
sift.process_image('book_perspective.JPG', 'im1.sift')
l1, d1 = sift.read_features_from_file('im1.sift')
# match features and estimate homography
matches = sift.match_twosided(d0, d1)
ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l1[ndx2, :2].T)
model = homography.RansacModel()
H = homography.H_from_ransac(fp, tp, model)[0]
# camera calibration
K = my_calibration((300, 400))
# 3D points at plane z=0 with sides of length 0.2
box = cube_points([0, 0, 0.1], 0.1)
# project bottom square in first image
cam1 = Camera(hstack((K, dot(K, array([[0], [0], [-1]])))))
# first points are the bottom square
box_cam1 = cam1.project(homography.make_homog(box[:, :5]))
# compute second camera matrix from cam1 and H
cam2 = Camera(dot(H, cam1.P))
A = dot(linalg.inv(K), cam2.P[:, :3])
A = array([A[:, 0], A[:, 1], cross(A[:, 0], A[:, 1])]).T
cam2.P[:, :3] = dot(K, A)
# project with the second camera
box_cam2 = cam2.project(homography.make_homog(box))
im0 = array(Image.open('book_frontal.JPG'))
im1 = array(Image.open('book_perspective.JPG'))
# 2D projection of bottom square
figure()
imshow(im0)
plot(box_cam1[0, :], box_cam1[1, :], linewidth=3)
# 3D cube
figure()
imshow(im1)
plot(box_cam2[0, :], box_cam2[1, :], linewidth=3)
show()
# import pickle
#
# with open('ar_camera.pkl', 'bw') as f:
# pickle.dump(K, f)
# pickle.dump(dot(linalg.inv(K), cam2.P), f) # Rt
# example_project_3d()
# example_pose_estimation()
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def stride_repeat_0(a, stride, repeat):
o = []
it = len(a)/stride
for item in range(0,it):
for r in range(0,repeat):
for offset in range(0,stride):
j = item*stride + offset
o.append(a[j])
return o
def stride_repeat_1(a, stride, repeat):
o = []
sr = stride*repeat
it = len(a)/stride
n = sr*it
for _ in range(0,n):
j = stride*(_/sr) + (_ % stride)
o.append(a[j])
pass
return o
def repeat_0(a, repeat):
"""
"""
o = []
for i in range(0,len(a)):
for r in range(0,repeat):
o.append(a[i])
return o
def repeat_1(a, repeat):
"""Unnest the repeat loop"""
o = []
n = len(a)*repeat
for _ in range(0,n):
o.append( a[_/repeat])
return o
def stride_0(a, stride, offset):
o = []
n = len(a)/stride
for _ in range(0,n):
o.append( stride*a[_] + offset)
return o
if __name__ == '__main__':
a = [0,1,2,3]
s20 = [0,2]
s21 = [1,3]
sr23 = [0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3]
r2 = [0,0,1,1,2,2,3,3]
assert stride_repeat_0(a, 2,3) == sr23
assert stride_repeat_1(a, 2,3) == sr23
assert repeat_0(a,2) == r2
assert repeat_1(a,2) == r2
assert stride_0(a,2,0) == s20
assert stride_0(a,2,1) == s21
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 4 11:19:37 2021
@author: anusk
"""
import cv2
import numpy as np
import scipy.interpolate as spi
from matplotlib import pyplot as plt
def EBMA(targetFrame, anchorFrame, blocksize):
accuracy = 1
p =16
frameH, frameW = anchorFrame.shape
print(anchorFrame.shape)
predictFrame = np.zeros(anchorFrame.shape)
k=0
dx =np.zeros(int(frameH*frameW/blocksize**2))
dy=np.zeros(int(frameH*frameW/blocksize**2))
ox = np.zeros(int(frameH*frameW/blocksize**2))
oy =np.zeros(int(frameH*frameW/blocksize**2))
rangestart = [0,0]
rangeEnd =[0,0]
for n in range(0, frameH, blocksize):
rangestart[0] = n*accuracy -p*accuracy
rangeEnd[0] = n*accuracy + blocksize*accuracy + p*accuracy
if rangestart[0] < 0:
rangestart[0] =0
if rangeEnd[0]> frameH*accuracy:
rangeEnd[0] = frameH*accuracy
for m in range(0, frameW, blocksize):
rangestart[1] = m*accuracy -p*accuracy
rangeEnd[1] = m*accuracy + blocksize*accuracy + p*accuracy
if rangestart[1] < 0:
rangestart[1] =0
if rangeEnd[1]> frameW*accuracy:
rangeEnd[1] = frameW*accuracy
"""
EBMA ALGORITHM
"""
anchorblock = anchorFrame[n:n+blocksize, m:m+blocksize]
mv_x = 0
mv_y = 0
error = 255*blocksize*blocksize*100
for x in range(rangestart[1], rangeEnd[1]-blocksize):
for y in range(rangestart[0], rangeEnd[0]-blocksize):
targetblock = targetFrame[y:y+blocksize, x:x+blocksize]
anchorblock = np.float64(anchorblock)
targetblock = np.float64(targetblock)
temp_error = np.sum(np.uint8(np.absolute(anchorblock -targetblock)))
if temp_error < error:
error = temp_error
mv_x = x/accuracy-m
mv_y = y/accuracy-n
predictFrame[n:n+blocksize, m:m+blocksize] = targetblock
dx[k]= mv_x
dy[k]= mv_y
ox[k] = m
oy[k] = n
k = k + 1
mv_d = [dx, dy]
mv_o = [ox, oy]
return np.uint8(predictFrame), mv_o, mv_d
def HBMA(targetFrame, anchorFrame, blocksize,L):
anchorFrame = anchorFrame.astype('uint16')
targetFrame = targetFrame.astype('uint16')
predictFrame = np.zeros(anchorFrame.shape)
accuracy = 1
p =16
frameH, frameW = anchorFrame.shape
accuracy = 1
rangs = np.array([-32,-32])
rang6= np.array([32,32])
m=0
factor=2**(L-1)
e = 0.0000000000000000000001
#initial motion vectors
mv_x = 0
mv_y = 0
dx =[]
dy=[]
ox = np.zeros(int(frameH*frameW/blocksize**2))
oy=np.zeros(int(frameH*frameW/blocksize**2))
error = 255*blocksize*blocksize*100
#Upownsample
upanchorframe = np.zeros([frameH*2,frameW*2], dtype = np.uint16)
upanchorframe[0:(frameH*2-1):2, 0:(frameW*2-1):2] = anchorFrame
upanchorframe[0:(frameH*2-1):2, 1:(frameW*2-2):2] = (anchorFrame[:,0:frameW-1]+anchorFrame[:,1:frameW])/2
upanchorframe[1:(frameH*2-2):2, 0:(frameW*2-1):2] = (anchorFrame[0:frameH-1, :]+anchorFrame[1:frameH, :])/2
upanchorframe[1:(frameH*2-2):2, 1:(frameW*2-2):2] = (anchorFrame[0:frameH-1,0:frameW-1]+ anchorFrame[0:frameH-1, 1:frameW]+anchorFrame[1:frameH, 0:frameW-1]+anchorFrame[1:frameH,1:frameW])/4
#Downsample
anchorDown1 = np.copy(anchorFrame)
targetDown1 = np.copy(targetFrame)
targetDown2 = np.zeros([int(frameH/2),int(frameW/2)], dtype = np.uint16)
targetDown2[0:int(frameH/2),0:int(frameW/2)] = targetFrame[0:frameH:2,0:frameW:2]
targetDown3 = np.zeros([int(frameH/4),int(frameW/4)], dtype = np.uint16)
targetDown3[0:int(frameH/4),0:int(frameW/4)] = targetDown2[0:int(frameH/2):2,0:int(frameW/2):2]
anchorDown2 = np.zeros([int(frameH/2),int(frameW/2)], dtype = np.uint16)
anchorDown2[0:int(frameH/2),0:int(frameW/2)] = anchorFrame[0:frameH:2,0:frameW:2]
anchorDown3 = np.zeros([int(frameH/4),int(frameW/4)], dtype = np.uint16)
anchorDown3[0:int(frameH/4),0:int(frameW/4)] = anchorDown2[0:int(frameH/2):2,0:int(frameW/2):2]
predictFrame = np.copy(anchorFrame)
#Search fields range for each level
rangs = rangs/(factor+e)
rang6 =rang6/(factor+e)
frameH = int(frameH/(factor+e))
frameW = int(frameW/(factor+e))
rangestart = [0,0]
rangeEnd =[0,0]
for i in range(0, frameH-blocksize+1, blocksize):
rangestart[0] = int(i + rangs[0])
rangeEnd[0] = int(i + blocksize + rang6[0]) #-1
if rangestart[0] < 0:
rangestart[0] =0
if rangeEnd[0]> frameH:
rangeEnd[0] = frameH
for j in range(0, frameW-blocksize+1, blocksize):
rangestart[1] = int(j + rangs[1])
rangeEnd[1] = int(j + blocksize + rang6[1]) #-1
if rangestart[1] < 0:
rangestart[1] =0
if rangeEnd[1]> frameW*accuracy:
rangeEnd[1] = int(frameW*accuracy)
tmpt = np.zeros(targetDown3.shape, dtype = np.int16)
tmpa = np.zeros(targetDown3.shape, dtype = np.int16)
tmpt[:,:] = targetDown3[:,:]
tmpa[:,:] = anchorDown3[:,:]
#EBMA SCRIPT
anchorBlock = np.zeros([blocksize,blocksize], np.int16)
anchorBlock = tmpa[i:i+blocksize, j:j+blocksize]
for y in range(rangestart[0], rangeEnd[0]-blocksize+1):
for x in range(rangestart[1], rangeEnd[1]-blocksize+1):
downtargetFrame = tmpt[y:y+accuracy*blocksize:accuracy, x:x+accuracy*blocksize:accuracy]
#calculate error
temp_error = np.sum(np.absolute(anchorBlock -downtargetFrame))
if temp_error < error:
error = temp_error
while len(dx)<=m:
dx.append(0)
dy.append(0)
mv_x = x/accuracy-j
mv_y = y/accuracy-i
dx[m]= mv_x
dy[m]= mv_y
ox[m] =i
oy[m] =j
m= m+1
dy = np.asarray(dy)
dx = np.asarray(dx)
for ii in range(L-1 , 0, -1):
print(ii)
dx= dx*2
dy = dy*2
frameH = frameH*2
lineW = np.floor(frameW/blocksize)
frameW = frameW*2
ttt = dy.size -1
m = 0
dxx =np.zeros(int(frameH*frameW/blocksize**2))
dyy=np.zeros(int(frameH*frameW/blocksize**2))
for i in range(0, frameH-blocksize+1, blocksize):
baseline = round(((i+1)/2)/blocksize) * lineW
for j in range(0, frameW-blocksize+1, blocksize):
mindx = int(np.floor(baseline+ round(((j+1)/2)/blocksize)+1))
if mindx>ttt:
mindx = ttt
rangestart[0] = np.int16(i+dy[mindx]+rangs[0])
rangeEnd[0]= np.int16(i+dy[mindx]+blocksize+rang6[0])
if rangestart[0] < 0:
rangestart[0] =0
if rangeEnd[0]> frameH:
rangeEnd[0] = frameH
rangestart[1] = np.int16(j + dx[mindx]+rangs[1])
rangeEnd[1] = np.int16(j + dx[mindx] + blocksize +rang6[1])
if rangestart[1] < 0:
rangestart[1] =0
if rangeEnd[1]> frameW*accuracy:
rangeEnd[1] = int(frameW*accuracy)
#Level 2
if ii==2:
tmpt=targetDown2[:,:]
tmpa = anchorDown2[:,:]
if ii==1:
tmpt=targetDown1[:,:]
tmpa = anchorDown1[:,:]
tmpt = np.int16(tmpt)
tmpa = np.int16(tmpa)
anchorBlock = tmpa[i:i+blocksize, j:j+blocksize]
mv_x =0
mv_y=0
error = 255*blocksize*blocksize*100
for y in range(rangestart[0], rangeEnd[0]-blocksize+1):
for x in range(rangestart[1], rangeEnd[1]-blocksize+1):
downtargetFrame = tmpt[y:y+accuracy*blocksize:accuracy, x:x+accuracy*blocksize:accuracy]
temp_error = np.sum(np.absolute(anchorBlock -downtargetFrame))
if temp_error<error:
error = temp_error
mv_x = x/accuracy-j
mv_y = y/accuracy-i
dxx[m]= mv_x
dyy[m]= mv_y
predictFrame[i:i+blocksize, j:j+blocksize] = downtargetFrame
ox[m] = j
oy[m] =i
m = m+1
dx = dxx
dy = dyy
mv_d = [dx,dy]
mv_o = [ox, oy]
return [np.uint8(predictFrame), mv_o, mv_d]
def PhaseCorrelation(anchorFrame, targetFrame):
anchorFrame =np.double(anchorFrame)
targetFrame =np.double(targetFrame)
frame = np.dstack((anchorFrame, targetFrame))
dimy = anchorFrame.shape[0]
dimx = anchorFrame.shape[1]
blockx = 16
blocky = 16
matchy=np.zeros([int(dimy/blocky),int(dimx/blockx)], np.double)
matchx=np.zeros([int(dimy/blocky),int(dimx/blockx)], np.double)
halfy=np.zeros([int(dimy/blocky),int(dimx/blockx)], np.double)
halfx=np.zeros([int(dimy/blocky),int(dimx/blockx)], np.double)
#window de fft
T = 32
winv=np.arange(32)
alpha=0
a =(winv-(T/2))/T
b = np.cos(alpha*np.pi*((winv-(T/2))/T))
c = 1-np.square(2*alpha*(winv-(T/2))/T)
window= np.array([np.sinc(a*b/c)])
windowT = window.T
windowT.T
window = windowT @ window
for loopi in range(2,int(dimy/blocky)):
for loopj in range(2,int(dimx/blockx)):
ybound1 = (loopi-1)*blocky
ybound2 = loopi*blocky
xbound1 = (loopj-1)*blockx
xbound2 = loopj*blockx
#divide frame into blocks
previous = anchorFrame[ybound1-8:ybound2+8, xbound1-8:xbound2+8]
block = targetFrame[ybound1-8:ybound2+8, xbound1-8:xbound2+8]
B_prev = np.fft.fft2(previous,[blocky*2,blockx*2])
B_curr = np.fft.fft2(block*window,[blocky*2,blockx*2])
mul = B_curr*np.conj(B_prev)
mag = np.abs(mul)
mag[mag==0] = 1e-31
C = mul/mag
c=np.fft.fftshift(np.abs(np.fft.ifft2(C)))
[tempy,tempx] = np.where(c==c.max())
matchy[loopi-1,loopj-1]=tempy[0]-blocky
matchx[loopi-1,loopj-1]=tempx[0]-blockx
if tempy[0]>=1 and tempy[0]+1<=31:
tt = np.arange(-1,2)
ppy = np.array([c[tempy[0]-1,tempx[0]],
c[tempy[0],tempx[0]],
c[tempy[0]+1,tempx[0]]])
ii=np.arange(-1,1.5,0.5)
iiy= spi.interp1d(tt,ppy,kind="quadratic", fill_value="extrapolate")(ii)
if iiy[1]>c[tempy[0],tempx[0]]:
halfy[loopi-1,loopj-1]=-1
elif iiy[3]>c[tempy[0],tempx[0]]:
halfy[loopi-1,loopj-1]=-1
if tempx[0]>=1 and tempx[0]+1<31:
tt = np.arange(-1,2)
ppx = np.array([c[tempy[0],tempx[0]-1],
c[tempy[0],tempx[0]],
c[tempy[0],tempx[0]+1]])
ii=np.arange(-1,1.5,0.5)
iix= spi.interp1d(tt,ppx,kind="quadratic", fill_value="extrapolate")(ii)
if iix[1]>c[tempy[0],tempx[0]]:
halfx[loopi-1,loopj-1]=-1
elif iix[3]>c[tempy[0],tempx[0]]:
halfx[loopi-1,loopj-1]=-1
fig,ax = plt.subplots()
ax.quiver(matchx,matchy)
plt.show()
#MC prediction
predict = np.zeros([dimy,dimx], np.double)
for loopi in range(1, int(dimy/blocky)+1):
for loopj in range(1, int(dimx/blockx)+1):
ybound1 = (loopi-1)*blocky
ybound2 = loopi*blocky
xbound1 = (loopj-1)*blockx
xbound2 = loopj*blockx
offy = -matchy[loopi-1,loopj-1]
offx = -matchx[loopi-1,loopj-1]
pred = anchorFrame[abs(int(ybound1+offy)):abs(int(ybound2+offy)), abs(int(xbound1+offx)):abs(int(xbound2+offx))]
if halfy[loopi-1,loopj-1] == 1:
average = anchorFrame[abs(int(ybound1+offy))-1:abs(int(ybound2+offy))-1,
abs(int(xbound1+offx)):abs(int(xbound2+offx))]
pred = 0.5*(pred+average)
elif halfy[loopi-1,loopj-1] ==-1:
average = anchorFrame[abs(int(ybound1+offy))+1:abs(int(ybound2+offy))+1,
abs(int(xbound1+offx)):abs(int(xbound2+offx))]
pred = 0.5*(pred+average)
predict[ybound1:ybound2,xbound1:xbound2] = pred
plt.figure()
plt.imshow(predict,cmap='gray')
plt.show()
matchyy= matchy +0.5*halfy
matchxx = matchx +0.5*halfx
dy = matchyy[1:int(dimy/blocky)-1, 1:int(dimx/blockx)-1]
dx = matchxx[1:int(dimy/blocky)-1, 1:int(dimx/blockx)-1]
rangey = np.arange(np.min(dy), np.max(dy)+0.5,.05)
|
'''
# -*- coding: utf-8 -*-
Copyright of DasPy:
Author - Xujun Han (Forschungszentrum Juelich, Germany)
x.han@fz-juelich.de, xujunhan@gmail.com
DasPy was funded by:
1. Forschungszentrum Juelich, Agrosphere (IBG 3), Juelich, Germany
2. Cold and Arid Regions Environmental and Engineering Research Institute, Chinese Academy of Sciences, Lanzhou, PR China
3. Centre for High-Performance Scientific Computing in Terrestrial Systems: HPSC TerrSys, Geoverbund ABC/J, Juelich, Germany
Please include the following references related to DasPy:
1. Han, X., Li, X., He, G., Kumbhar, P., Montzka, C., Kollet, S., Miyoshi, T., Rosolem, R., Zhang, Y., Vereecken, H., and Franssen, H. J. H.:
DasPy 1.0 : the Open Source Multivariate Land Data Assimilation Framework in combination with the Community Land Model 4.5, Geosci. Model Dev. Discuss., 8, 7395-7444, 2015.
2. Han, X., Franssen, H. J. H., Rosolem, R., Jin, R., Li, X., and Vereecken, H.:
Correction of systematic model forcing bias of CLM using assimilation of cosmic-ray Neutrons and land surface temperature: a study in the Heihe Catchment, China, Hydrology and Earth System Sciences, 19, 615-629, 2015a.
3. Han, X., Franssen, H. J. H., Montzka, C., and Vereecken, H.:
Soil moisture and soil properties estimation in the Community Land Model with synthetic brightness temperature observations, Water Resour Res, 50, 6081-6105, 2014a.
4. Han, X., Franssen, H. J. H., Li, X., Zhang, Y. L., Montzka, C., and Vereecken, H.:
Joint Assimilation of Surface Temperature and L-Band Microwave Brightness Temperature in Land Data Assimilation, Vadose Zone J, 12, 0, 2013.
'''
import shutil
def copyLargeFile(src, dest, buffer_size=1024*1024*1024):
try:
with open(src, 'rb') as fsrc:
with open(dest, 'wb') as fdest:
shutil.copyfileobj(fsrc, fdest, buffer_size)
except:
shutil.copyfile(src, dest)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 04 15:53:04 2017
11/01/2017
add_oc - changed xtalk dependence from 2*sqrt(i) i
12/01/2017
removed round() in add_oc
improved hparm() to allow bins, range[0] and binwidth to define
thresh set to 0 - threshing only necessary for prior processed area data
@author: Jon
"""
import numpy as np
import matplotlib.pyplot as plt
from utils.draw import draw_gaussian
from scipy.special import factorial
from scipy.signal import find_peaks
from lmfit import Model, Parameters
def draw_gaussian(x, area, centre, sigma):
"""
Generate a Gaussian distribution with area = 1.
Parameters
----------
x : array
An array of the x coordinates
area : scalar int
The area under the Gaussian = the number of events constituting
centre : scalar
The centre of the Gaussian
sigma : scalar
The standard deviation
Returns
-------
data : ndarray
The Gaussian distribution
"""
step = (x[-1] - x[0])/(len(x) - 1)
s2pi = np.sqrt(2.0*np.pi)
return (area * step) / (sigma * s2pi) * np.exp(-1.0*(x-centre)**2 /(2*sigma**2))
def poisson(mu, arrsz):
"""
Array of Poisson probabilities for a given mean number per event.
Parameters
----------
mu : float scalar
The mean number per event
arrsz : int scalar
The array size to return probabilities
Returns
-------
pdist : ndarray
Discrete Poisson distribution of size 'arrsz'.
"""
pts = range(arrsz)
mu = np.ones_like(pts)*mu
pdist = np.exp(-mu) * np.power(mu,pts) / factorial(pts)
return pdist
def genpoisson(mu, lmbda, n):
"""
Array of Generalized Poisson probabilities for a given mean number per event and per xtalk event.
Parameters
----------
mu : float scalar
The mean number per event
lmbda : float scalar
The mean number per xtalk event
n : int scalar
The array size to return probabilities
Returns
-------
gpdist : ndarray
Generalized Poisson distribution of size 'n'.
"""
k = np.arange(n)
mu = np.ones_like(k, dtype=np.float)*mu
gpdist = mu * np.power(mu+k*lmbda,k-1) * np.exp(-mu-k*lmbda) / factorial(k)
return gpdist
# phd fitting function
def sipm_fitfunc(x, xoff, mu, nev, xtalk, v_pe, v_n, v_gain, thresh=0, nz_pe=0):
arrsz = max(int(mu*10), 30)
p = poisson(mu, arrsz)
gp = genpoisson(mu, xtalk, arrsz)
if nz_pe == 1: # exclude the 0 pe peak
gp[0] = 0 # in case there's no pedestal data i.e. triggered only on events
#sipm_dist = gp*nev*p[1]/gp[1] # normalise gp[1] - 1 pe peak has no xtalk --> OCT increases average number of pe
sipm_dist = gp*nev# don't normalise
output = np.zeros_like(x)
for i in range(len(sipm_dist)):
gausswid = np.sqrt(v_n**2 + i*v_gain**2)
output += draw_gaussian(x, sipm_dist[i], i*v_pe+xoff, gausswid)
output[0:thresh] = 0
return output
def sipm_phdfit(x, y, npk, nz_pe=0):
"""
Function to fit a SiPM phd array with a Generalized Poisson
Parameters
----------
x : float array
Typically the pulse heigth in mV
y : int array
The number of events per bin
npk : int scalar
The number of distinguishable peaks in the PHD - for initial parameter guess
nz_pe : If 0 - include pedestal in PHD
If 1 - exclude pedestal
Returns
-------
result : lmfit result object
Including initial guess, best fit, and all fit parameters
"""
ymax = y.max()
#find peaks in the PHD to npk
for i in range(50):
peaks, p_prop = find_peaks(y, prominence=ymax*(1-i/50), height=ymax/10)
if len(peaks)>=npk: break
#now estimate the initial fit parameters
mu = np.sum(p_prop['peak_heights']*np.arange(nz_pe,npk+nz_pe))/np.sum(p_prop['peak_heights'])
nev = np.sum(y)
xtalk = 0.5 #based on CHEC-S devices
v_pe = np.mean(np.diff(x[peaks]))
xoff = x[peaks[0]] - v_pe*nz_pe
v_n = v_pe*0.2
v_gain = v_pe*0.1
thresh = 0
smod = Model(sipm_fitfunc)
pars = Parameters()
# parameter constraints
# (Name, Value, Vary, Min, Max, Expr, Brute Step)
pars.add_many(('xoff', xoff, True,-50.0, 50.0, None, None),
('mu', mu, True, 0.01, 50.0, None, None),
('nev', nev, True, 1, 1e8, None, None),
('xtalk', xtalk, True, 0.0, 0.75, None, None),
('v_pe', v_pe, True, 0.0, 50.0, None, None),
('v_n', v_n, True, 0.0, 50.0, None, None),
('v_gain', v_gain, True, 0.0, 50.0, None, None),
('thresh', thresh, False, 0, 500, None, None),
('nz_pe', nz_pe, False, 0, 1, None, None))
#solve
result = smod.fit(y, params=pars, x=x, method='leastsq')
return result
def main():
#a simple test procedure
filename = './set2_67.4_C1_peaks.npz'
npzfile = np.load(filename)
x, y = npzfile['arr_0'], npzfile['arr_1']
#input number of distinguishable peaks
npk = 4
result = sipm_phdfit(x, y, npk, nz_pe=1)
# plot measured data
plt.plot(x, y, 'b', linewidth=1, label='raw data')
plt.plot(x, result.init_fit, 'k-', linewidth=1, label='guess')
plt.plot(x, result.best_fit, 'r-', linewidth=1, label='best fit')
plt.legend(loc='best')
plt.show()
#save fit parameters file
datafile = open(initdir+"\phd_fit_params.txt", 'a')
datafile.write('{0}, '.format(filename))
datafile.write('{0}, '.format(result.params['xoff'].value))
datafile.write('{0}, '.format(result.params['mu'].value))
datafile.write('{0}, '.format(result.params['nev'].value))
datafile.write('{0}, '.format(result.params['xtalk'].value))
datafile.write('{0}, '.format(result.params['v_pe'].value))
datafile.write('{0}, '.format(result.params['v_n'].value))
datafile.write('{0}, '.format(result.params['v_gain'].value))
datafile.write('{0}, '.format(result.params['thresh'].value))
datafile.write('{0}, '.format(result.params['nz_pe'].value))
datafile.write('{0}, '.format(result.redchi))
datafile.write('{0}, '.format(np.sum(y)))
datafile.write('{0}, '.format(np.sum(result.best_fit)))
datafile.close()
#print result
print(result.fit_report())
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -
from .api import ClientAPI
from .api import TodoAppApiException
from .api import Token
__all__ = ["ClientAPI", "TodoAppApiException", "Token"]
|
### stacked RNN ###
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
tf.set_random_seed(777)
tf.reset_default_graph() ## reset
sentence = ("if you want to build a ship, don't drum up people together to "
"collect wood and don't assign them tasks and work, but rather "
"teach them to long for the endless immensity of the sea.")
char_set = list(set(sentence))
char_dic = {w: i for i, w in enumerate(char_set)}
dataX = []
dataY = []
seq_length = 10 # 임의의 sequence length --> 영향 받는 데이터
for i in range(0, len(sentence) - seq_length):
x_str = sentence[i: i + seq_length]
y_str = sentence[i + 1: i + seq_length + 1]
x = [char_dic[c] for c in x_str]
y = [char_dic[c] for c in y_str]
dataX.append(x)
dataY.append(y)
# hyper parameter #
learning_rate = 0.1
batch_size = len(dataX) # 데이터 개수
seq_length = 10 # 25 --> sequence (영향받는 데이터 개수) --> 기억개수
hidden_size = len(char_dic) # 25 --> 출력 25 (LSTM에서의 아웃풋)
num_classes = len(char_dic) # 25 --> 출력 개수 (최종 결과물)
## RNN multicell ##
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_size)
cell = tf.contrib.rnn.MultiRNNCell([cell] * 3, state_is_tuple=True)
# tf building #
X = tf.placeholder(dtype=tf.int64, shape=[None, seq_length]) # seq_lengTh만 지정
Y = tf.placeholder(dtype=tf.int64, shape=[None, seq_length])
X_one_hot = tf.one_hot(X, num_classes) # x_one_hot
# 초기화 #
init_state = cell.zero_state(batch_size, dtype=tf.float32)
# cell 통과 #
output, _state = tf.nn.dynamic_rnn(cell, X_one_hot, initial_state=init_state, dtype=tf.float32)
## reshape for softmax ##
X_for_softmax = tf.reshape(output, shape=[-1, hidden_size])
outputs = tf.contrib.layers.fully_connected(X_for_softmax, num_classes, activation_fn=None)
#softmax_W = tf.Variable([hidden_size, num_classes], dtype=tf.float32, name='softmax_w')
#softmax_b = tf.Variable([num_classes], dtype=tf.float32, name='softmax_b')
#logits = tf.matmul(X_for_softmax, softmax_W) + softmax_b
#hypothesis = tf.nn.softmax(logits)
# output reshape #
output = tf.reshape(outputs, shape=[batch_size, seq_length, hidden_size])
## cost ##
weights = tf.ones([batch_size, seq_length])
seq_loss = tf.contrib.seq2seq.sequence_loss(logits=output, targets=Y, weights=weights)
mean_loss = tf.reduce_mean(seq_loss)
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
train = opt.minimize(mean_loss)
# session #
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(1000):
cost_var, _, y_hat = sess.run([mean_loss, train, output],
feed_dict={X: dataX, Y: dataY})
if i % 100 == 0:
print(cost_var)
for j, res in enumerate(y_hat):
index = np.argmax(res, axis=1)
print(i, j, ''.join([char_set[t] for t in index]), 1)
results = sess.run(output, feed_dict={X: dataX})
for j, result in enumerate(results):
index = np.argmax(result, axis=1)
if j == 0:
print(''.join([char_set[t] for t in index]), 1)
else:
print(char_set[index[-1]], end='')
sess.close()
|
from os import EX_OSFILE
import sqlite3
db_name = "table_edu.db"
def sql(query, values=(), return_data=False):
""" return_data=False.. insert or update
return_data=True select data... list[2]: 0-> keys 1-> values """
if return_data == True:
data = None
with sqlite3.connect(db_name) as db:
cur = db.cursor()
cur.execute("pragma foreign_keys = on")
cur.execute(query, values)
keys = [d[0] for d in cur.description]
data = [keys, cur.fetchall()]
return data
else:
with sqlite3.connect(db_name) as db:
cur = db.cursor()
cur.execute(query, values)
def view_table(query):
""" view table """
data = None
try:
data = sql(query, (), True)
except:
return "Error: SQLite3 Error view()"
return data
def validations(data, condition, optionals=[]):
""" data: data-> dict {key: snigle_value or list ()}
condition: {name: type_condition}
optionals: ["key String"]
return--> tuple Or nested_tuple Or string
"""
verified_data = ()
for key in condition:
try:
if type(data[key]) == list:
data_list = list(map(int, data[key]))
condition_list = dict.fromkeys(range(len(data[key])), int)
verified_list = validations(data_list, condition_list)
verified_data += (verified_list, )
continue
# Check not found keys optional
if (not key in data and key in optionals) or not data[key]: # why 2 Condition
# if (not key in data and key in optionals) :
verified_data += (None, )
continue
# Convert str to int
if condition[key] == int:
data[key] = int(data[key])
# Check Values
if type(data[key]) == condition[key]:
# Important: When you use =+ a values order problem will occur
verified_data += (data[key], )
else:
return f"{key} value is incorrect - input type: {type(data[key])}, expected type: {condition[key]}"
except:
return "Not all required information has been entered"
return verified_data
def add_edit_data(request_type, query, data, condition, optionals=[]):
""" request_type: 1-> Add - 2-> Edit
query: SQL update Or insert
data: data-> {}
condition: {name: type_condition}
optionals: ["key String"]
return--> string
NOTE: (If Edit) Do not write the ID condition because it works with it automatically
"""
# Check input Validation
verified_data = validations(data, condition, optionals)
if type(verified_data) == str:
return verified_data
if request_type == 2 and not "id" in data:
return "The ID has not been entered"
try:
if request_type == 2:
# Check ID validation
id = int(data["id"])
verified_data += (id, )
# Query
sql(query, verified_data)
except:
if request_type == 2:
return "Not all required information has been entered OR Incorrect ID number"
return "This name or other data already exists"
if request_type == 2:
return f"successfully data has been modified successfully - ID: {data['id']}"
return f"successfully data has been modified successfully"
|
import numpy as np
class Fmeasure:
def __init__(self, relnum, beta = 1.0):
self.relnum = relnum
self.beta = beta
def gain(self, node):
# original f
original_f = self._f(node.instances)
# prepare
data_dict = {}
for d in node.instances:
data_dict[d.id] = d
gains = []
for w in node.dictionary:
pos = [data_dict[d] for d in node.index[w] if d in data_dict]
neg = [data_dict[d] for d in data_dict.keys() if not d in node.index[w]]
if len(pos) == 0 or len(neg) == 0:
max_f = 0.0
else:
max_f = self._max_f([pos, neg])
max_gain = max_f - original_f
gains.append((w, max_gain))
return gains
def _max_f(self, datas):
result = np.max([self._f(data) for data in datas])
return result
def _f(self, data):
if len(data) == 0:
return 0
tdata = [d for d in data if d.label == 1]
precision = float(len(tdata)) / len(data)
recall = float(len(tdata)) / self.relnum
if precision == 0 or recall == 0:
return 0.0
else:
result = (1.0 + self.beta ** 2) / ((self.beta ** 2 / recall) + (1.0 / precision))
return result
|
fruit="banana"
letter=fruit[1]
print(letter)
x=3
w=fruit[x-1]
print(w)
|
from datetime import datetime
from collections import namedtuple
from django.shortcuts import render
from django.views.generic import TemplateView
from django.shortcuts import redirect
from django.db.models import Q
from .models import Room, Booking, Checkin
class AdminView(TemplateView):
template_name = 'admin.html'
class AddBookingView(TemplateView):
template_name = 'add_booking.html'
def get(self, request, *args, **kwargs):
context = {}
context['rooms'] = Room.objects.all()
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
context = {}
post = request.POST
print(post)
context['rooms'] = Room.objects.all()
try:
action = post['action']
room = Room.objects.get(id=int(post['room']))
customer = post['customer']
starting_date = post['starting_date']
end_date = post['end_date']
booking = Booking()
context['booking'] = booking
booking.room = room
booking.customer = customer
booking.start_datetime = datetime.strptime(str(starting_date),'%Y-%m-%d')#starting_date
booking.end_datetime = datetime.strptime(str(end_date),'%Y-%m-%d')
booking.last_modified = datetime.now()
Qd = Q() # & ~Q(id=booking.id)
Qd |= Q(Q(Q(start_datetime__range=[starting_date, end_date]) | Q(end_datetime__range=[starting_date, end_date])) & Q(room__id=booking.room.id) & ~Q(id=booking.id))
Qd |= Q(Q(Q(start_datetime__lte=starting_date) & Q(end_datetime__gte=end_date)) & Q(room__id=booking.room.id) & ~Q(id=booking.id))
Qd |= Q(Q(Q(start_datetime__gte=starting_date) & Q(end_datetime__lte=end_date)) & Q(room__id=booking.room.id) & ~Q(id=booking.id))
current_bookings = Booking.objects.filter(Qd)
objs = current_bookings.count()
print("\n\n")
print(objs)
if(objs==0):
booking.save()
context['booking'] = booking
context['success'] = "The change was applied succesfully!"
else:
context['error'] = "Some old bookings overlap! Booking with ID "+str(current_bookings.first().id)
except Exception as e:
context['error'] = "One problem occured: "+str(e)
print(e.__dict__)
return render(request, self.template_name, context)
class BookingEditView(TemplateView):
template_name = 'edit_booking.html'
def get(self, request, *args, **kwargs):
booking_id = kwargs['id']
booking = Booking.objects.get(id=booking_id)
context = {}
context['booking'] = booking
context['rooms'] = Room.objects.all()
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
context = {}
context['rooms'] = Room.objects.all()
post = request.POST
print(post)
try:
action = post['action']
booking_id = kwargs['id']
if action == 'delete':
booking = Booking.objects.get(id=booking_id)
bookingbooking.delete()
return redirect('bookings')
else:
room = Room.objects.get(id=post['room'])
customer = post['customer']
starting_date = post['starting_date']
end_date = post['end_date']
booking = Booking.objects.get(id=booking_id)
context['booking'] = booking
booking.room = room
booking.customer = customer
booking.start_datetime = datetime.strptime(str(starting_date),'%Y-%m-%d')#starting_date
booking.end_datetime = datetime.strptime(str(end_date),'%Y-%m-%d')
booking.last_modified = datetime.now()
Qd = Q() # & ~Q(id=checkin.id)
Qd |= Q(Q(Q(start_datetime__range=[starting_date, end_date]) | Q(end_datetime__range=[starting_date, end_date])) & Q(room__id=booking.room.id) & ~Q(id=booking.id))
Qd |= Q(Q(Q(start_datetime__lte=starting_date) & Q(end_datetime__gte=end_date)) & Q(room__id=booking.room.id) & ~Q(id=booking.id))
Qd |= Q(Q(Q(start_datetime__gte=starting_date) & Q(end_datetime__lte=end_date)) & Q(room__id=booking.room.id) & ~Q(id=booking.id))
current_bookings = Booking.objects.filter(Qd)
objs = current_bookings.count()
print("\n\n")
print(objs)
if(objs==0):
booking.save()
context['booking'] = booking
context['success'] = "The change was applied succesfully!"
else:
context['error'] = "Some old checkins overlap! Booking with ID "+str(current_bookings.first().id)
except Exception as e:
context['error'] = "One problem occured: "+str(e)
return render(request, self.template_name, context)
class AddCheckinView(TemplateView):
template_name = 'add_checkin.html'
def get(self, request, *args, **kwargs):
context = {}
context['rooms'] = Room.objects.all()
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
context = {}
post = request.POST
print(post)
context['rooms'] = Room.objects.all()
try:
action = post['action']
room = Room.objects.get(id=int(post['room']))
customer = post['customer']
starting_date = post['starting_date']
end_date = post['end_date']
checkin = Checkin()
context['checkin'] = checkin
checkin.room = room
checkin.customer = customer
checkin.start_datetime = datetime.strptime(str(starting_date),'%Y-%m-%d')#starting_date
checkin.end_datetime = datetime.strptime(str(end_date),'%Y-%m-%d')
checkin.last_modified = datetime.now()
Qd = Q() # & ~Q(id=checkin.id)
Qd |= Q(Q(Q(start_datetime__range=[starting_date, end_date]) | Q(end_datetime__range=[starting_date, end_date])) & Q(room__id=checkin.room.id) & ~Q(id=checkin.id))
Qd |= Q(Q(Q(start_datetime__lte=starting_date) & Q(end_datetime__gte=end_date)) & Q(room__id=checkin.room.id) & ~Q(id=checkin.id))
Qd |= Q(Q(Q(start_datetime__gte=starting_date) & Q(end_datetime__lte=end_date)) & Q(room__id=checkin.room.id) & ~Q(id=checkin.id))
current_checkins = Checkin.objects.filter(Qd)
objs = current_checkins.count()
print("\n\n")
print(objs)
if(objs==0):
checkin.save()
context['checkin'] = checkin
context['success'] = "The change was applied succesfully!"
else:
context['error'] = "Some old checkins overlap! Checkin with ID "+str(current_checkins.first().id)
except Exception as e:
context['error'] = "One problem occured: "+str(e)
print(e.__dict__)
return render(request, self.template_name, context)
class CheckinEditView(TemplateView):
template_name = 'edit_checkin.html'
def get(self, request, *args, **kwargs):
checkin_id = kwargs['id']
checkin = Checkin.objects.get(id=checkin_id)
context = {}
context['rooms'] = Room.objects.all()
context['checkin'] = checkin
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
context = {}
post = request.POST
print(post)
context['rooms'] = Room.objects.all()
try:
action = post['action']
checkin_id = kwargs['id']
if action == 'delete':
checkin = Checkin.objects.get(id=checkin_id)
checkin.delete()
return redirect('checkins')
else:
room = Room.objects.get(id=post['room'])
customer = post['customer']
starting_date = post['starting_date']
end_date = post['end_date']
checkin = Checkin.objects.get(id=checkin_id)
context['checkin'] = checkin
checkin.room = room
checkin.customer = customer
checkin.start_datetime = datetime.strptime(str(starting_date),'%Y-%m-%d')#starting_date
checkin.end_datetime = datetime.strptime(str(end_date),'%Y-%m-%d')
checkin.last_modified = datetime.now()
Qd = Q() # & ~Q(id=checkin.id)
Qd |= Q(Q(Q(start_datetime__range=[starting_date, end_date]) | Q(end_datetime__range=[starting_date, end_date])) & Q(room__id=checkin.room.id) & ~Q(id=checkin.id))
Qd |= Q(Q(Q(start_datetime__lte=starting_date) & Q(end_datetime__gte=end_date)) & Q(room__id=checkin.room.id) & ~Q(id=checkin.id))
Qd |= Q(Q(Q(start_datetime__gte=starting_date) & Q(end_datetime__lte=end_date)) & Q(room__id=checkin.room.id) & ~Q(id=checkin.id))
current_checkins = Checkin.objects.filter(Qd)
objs = current_checkins.count()
print("\n\n")
print(objs)
if(objs==0):
checkin.save()
context['checkin'] = checkin
context['success'] = "The change was applied succesfully!"
else:
context['error'] = "Some old checkins overlap! Checkin with ID "+str(current_checkins.first().id)
except Exception as e:
context['error'] = "One problem occured: "+str(e)
return render(request, self.template_name, context)
class BookingsListView(TemplateView):
template_name = 'bookings_list.html'
def get(self, request, *args, **kwargs):
context = {}
context['bookings'] = Booking.objects.all()
return render(request, self.template_name, context)
class CheckinsListView(TemplateView):
template_name = 'checkins_list.html'
def get(self, request, *args, **kwargs):
context = {}
context['checkins'] = Checkin.objects.all()
return render(request, self.template_name, context)
class RoomsListView(TemplateView):
template_name = 'roomslist.html'
def get(self, request, *args, **kwargs):
context = {}
context['rooms'] = Room.objects.all()
return render(request, self.template_name, context)
class RoomAddView(TemplateView):
template_name = 'add_room.html'
def get(self, request, *args, **kwargs):
context = {}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
context = {}
post = request.POST
print(post)
try:
bed_type = post['bed_type']
facilities = post['facilities']
price = post['price']
room = Room()
room.bed_type = bed_type
room.facilities = facilities
room.price = price
room.last_modified = datetime.now()
room.save()
# context['room'] = room
context['success'] = "The change was applied succesfully!"
except Exception as e:
context['error'] = "One problem occured: "+str(e)
return render(request, self.template_name, context)
class RoomDetailView(TemplateView):
template_name = 'room.html'
def get(self, request, *args, **kwargs):
room_id = kwargs['id']
room = Room.objects.get(id=room_id)
context = {}
context['room'] = room
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
context = {}
post = request.POST
print(post)
try:
action = post['action']
room_id = kwargs['id']
if action == 'delete':
room = Room.objects.get(id=room_id)
room.delete()
return redirect('rooms')
else:
bed_type = post['bed_type']
facilities = post['facilities']
price = post['price']
room = Room.objects.get(id=room_id)
room.bed_type = bed_type
room.facilities = facilities
room.price = price
room.last_modified = datetime.now()
room.save()
context['room'] = room
context['success'] = "The change was applied succesfully!"
except Exception as e:
context['error'] = "One problem occured: "+str(e)
return render(request, self.template_name, context)
|
"""boloIndya URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from audio.views import main, songs, songpost, deleteSong, uploadSong, UpdateSong, filterSong, UpdateMood, UpdateLanguage, UpdateAlbum, UpdateVocalist, UpdateHashtag
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('', main, name='main'),
path('upload/', uploadSong, name='uploadSong'),
path('audio/', include('audio.urls')),
path('songs/<int:id>', songpost, name='songpost'),
path('songs/update-album/<int:id>', UpdateAlbum, name="UpdateAlbum"),
path('songs/update-hashtag/<int:id>', UpdateHashtag, name="UpdateHashtag"),
path('songs/update-mood/<int:id>', UpdateMood, name="UpdateMood"),
path('songs/update-title/<int:id>', UpdateSong, name="UpdateSong"),
path('songs/delete/<int:id>', deleteSong, name="deleteSong"),
path('songs/update-language/<int:id>',
UpdateLanguage, name="UpdateLanguage"),
path('songs/update-vocalist/<int:id>',
UpdateVocalist, name="UpdateVocalist"),
path('songs/', songs, name='songs'),
path('search/', filterSong, name="filterSong"),
path('admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from .serializer import resident, securityGuard
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': resident.UserSerializer(user).data,
}
def sjwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': securityGuard.SecuritySerializer(user).data,
} |
# -*- coding: utf-8 -*-
#폴더 내 각각의 .json파일에 대괄호로 묶기.
import os
import glob
print("이름입력하시오!:"+'\n')
names = input()
path = '/Users/junha_lee/Documents/Junha/School/Projects/SentimentName/sentiment/tmp_twitter/'+names
extension = 'json'
os.chdir(path)
result = [i for i in glob.glob('*.{}'.format(extension))]
for file in result:
open_file = open(file,'r')
read_file = open_file.read()
new_content = "[" + read_file + "]"
new_content2 = new_content.replace(",]","]")
write_file = open(file,'w')
write_file.write(new_content2)
write_file.close()
|
cars = {
'Ford' : 'Mustang',
'Nissan' : 'Sunny',
'Toyota' : 'Corolla',
'Bugatti' : 'Veyron'
}
def add_car(make: str, model: str):
cars[make] = model
while True:
choice = input(
"""
Here are some cars.
To display the cars, select 1.
To add a car, select 2.
To quit, select 0\n
""")
if choice == '1':
for key in cars.keys():
print(key + ', ' + cars[key])
elif choice == '2':
make = input('Type the make of your car: ')
model = input('Type the model of your car: ')
add_car(make, model)
elif choice == '0':
break
else:
print('Invalid input detected. Please type 0, 1, or 2') |
"""
SceneManager is a collection of classes and functions written in Python for use with Pygame.
SceneManager is pronounced "pig helpers".
Developed by Irv Kalb - Irv at furrypants.com
Full documentation at: https://SceneManager.readthedocs.io/en/latest/
SceneManager contains the following classes:
- Timer - a simple timer
- CountUpTimer - a timer that counts up from zero
- CountDownTimer - a timer that counts down from a starting point
- SceneMgr - allows for a Pygame program with multiple scenes
- Scene - base class for a scene managed by the SceneMgr
SceneManager also contains the following functions:
- textYesNoDialog - a text-based dialog box allowing for one or two answers (yes/no, or just OK)
- customYesNoDialog - a dialog box with custom graphics (yes/no, or just OK)
- textAnswerDialog - a text-based dialog box allowing the user to enter a string
- customAnswerDialog - a dialog box with custom graphics that allows the user to enter a string
- fileExists - find out if a file at a given path exists
- readFile - reads from a (text) file
- writeFile - writes to a (text) file
- openFileForWriting - opens a (text) file for writing line by line
- writeALine - writes a line of text to an open file
- openFileForReading - opens a text file for reading line by line
- readALine - reads a line of text from an open file
- closeFile - closes an open file
"""
import pygame
from pygame.locals import *
import pygwidgets
import sys
import time
# Timer classes:
# Timer (simple)
# CountUpTimer
# CountDownTimer
#
# Timer
#
class Timer():
"""
This class is used to create a very simple Timer.
Typical use:
1) Create a Timer object:
myTimer = SceneManager.Timer(10)
2) When you want the timer to start running, make this call:
myTimer.start()
3) In your big loop, check to see if the timer has finished:
finished = myTimer.update()
Parameters:
| timeInSeconds - the duration of the timer, in seconds (integer or float)
Optional keyword parameters:
| nickname - an internal name to associate with this timer
| callback - a function or object.method to be called back when the timer is finished
| The nickname of the timer will be passed in when the callback is made
"""
def __init__(self, timeInSeconds, nickname=None, callBack=None):
self.timeInSeconds = timeInSeconds
self.nickname = nickname
self.callBack = callBack
self.running = False
def start(self):
"""Start the timer running (starts at zero)"""
self.running = True
self.startTime = time.time()
def update(self):
"""Call this in every frame to update the timer
Returns:
| False - most of the time
| True - when the timer is finished
| (you can use this indication, or set up a callback)
"""
if not self.running:
return False
timeElapsed = time.time() - self.startTime
if timeElapsed < self.timeInSeconds:
return False # running but not reached limit
else: # Timer has finished
self.running = False
if self.callBack is not None:
self.callBack(self.nickname)
return True # True here means that the timer has ended
#
# CountUpTimer class
#
class CountUpTimer():
"""
This class is used to create a Timer that counts up (starting at zero).
Typical use:
1) Create a CountUpTimer object:
myTimer = SceneManager.CountUpTimer()
2) When you want the timer to start running, make this call:
myTimer.start()
This method also be made to restart the timer.
3) Whenever you want to get the current time (in seconds since start), you can call any of:
theTime = SceneManager.getTime() # gets time as a float
theTime = SceneManager.getTimeInSeconds() # gets the time as an integer number of seconds
theTime = SceneManager.getTimeInHHMMSS() # gets the time in HH:MM:SS string format
One of the above should be called every time through your main loop.
4) If you want to stop the timer, call:
myTimer.stop()
Parameters:
| none
"""
NSECONDS_PER_HOUR = 60 * 60
NSECONDS_PER_MINUTE = 60
def __init__(self):
self.running = False
self.savedSecondsElapsed = 0
def start(self):
"""Start the timer running (starts at zero)"""
self.secondsStart = time.time() # get the current seconds, and save it away
self.running = True
self.savedSecondsElapsed = 0
def getTime(self):
"""Returns the time elapsed as a float"""
if self.running:
secondsNow = time.time()
secondsElapsed = secondsNow - self.secondsStart
else:
secondsElapsed = self.savedSecondsElapsed
return secondsElapsed # returns a float
def getTimeInSeconds(self):
"""Returns the time elapsed as an integer number of seconds"""
nSeconds = self.getTime()
nSeconds = int(nSeconds)
return nSeconds
def getTimeInHHMMSS(self):
"""Returns the elapsed time as a HH:MM:SS formatted string"""
nSeconds = self.getTime()
nSeconds = int(nSeconds)
output = ''
if nSeconds > CountUpTimer.NSECONDS_PER_HOUR:
nHours = nSeconds // CountUpTimer.NSECONDS_PER_HOUR
nSeconds = nSeconds - (nHours * CountUpTimer.NSECONDS_PER_HOUR)
output = str(nSeconds) + ":"
if nSeconds > CountUpTimer.NSECONDS_PER_MINUTE:
nMinutes = nSeconds // CountUpTimer.NSECONDS_PER_MINUTE
nSeconds = nSeconds - (nMinutes * CountUpTimer.NSECONDS_PER_MINUTE)
if (output != '') and (nMinutes < 10):
output = output + '0' + str(nMinutes) + ":"
else:
output = output + str(nMinutes) + ":"
if (output != '') and (nSeconds < 10):
output = output + '0' + str(nSeconds)
else:
output = output + str(nSeconds)
return output
def stop(self):
"""Stops the timer from running"""
self.running = False
secondsNow = time.time()
self.savedSecondsElapsed = secondsNow - self.secondsStart
#
# CountDownTimer class
#
class CountDownTimer():
"""
This class is used to create a Timer that counts down from a given starting number of seconds.
Typical use:
1) Create a CountDownTimer object:
myTimer = SceneManager.CountDownTimer(60) # start the timer at 60 seconds
2) When you want the timer to start running, make this call:
myTimer.start()
This method also be used to restart the timer.
3) Whenever you want to get the current time (in seconds since start), you can call any of:
theTime = SceneManager.getTime() # gets time as a float
theTime = SceneManager.getTimeInSeconds() # gets the time as an integer number of seconds
theTime = SceneManager.getTimeInHHMMSS() # gets the time in HH:MM:SS string format
4) If you want to stop the timer, call:
myTimer.stop()
Parameters:
| nStartingSeconds - the starting point for the timer, in seconds (integer or float)
Optional keyword parameters:
| stopAtZero - should the timer stop when it reaches zero (defaults to True)
| nickname - an internal name used to refer to this timer (defaults to None)
| callback - a function or object.method to be called back when the timer is finished
| The nickname of the timer will be passed in when the callback is made
"""
NSECONDS_PER_HOUR = 60 * 60
NSECONDS_PER_MINUTE = 60
def __init__(self, nStartingSeconds, stopAtZero=True, nickname=None, callBack=None):
self.running = False
self.secondsSavedRemaining = 0
self.nStartingSeconds = nStartingSeconds
self.stopAtZero = stopAtZero
self.nickname = nickname
self.callBack = callBack
def start(self):
"""Start the timer running (starts at nStartingSeconds)"""
secondsNow = time.time()
self.secondsEnd = secondsNow + self.nStartingSeconds
self.reachedZero = False
self.running = True
def getTime(self):
"""Returns the elapsed time as a float number of seconds"""
if self.running:
secondsNow = time.time()
secondsRemaining = self.secondsEnd - secondsNow
if self.stopAtZero and (secondsRemaining <= 0):
secondsRemaining = 0
self.running = False
self.reachedZero = True
if self.callBack is not None:
self.callBack(self.nickname)
else:
secondsRemaining = self.secondsSavedRemaining
return secondsRemaining # returns a float
def getTimeInSeconds(self):
"""Returns the elapsed time as an integer number of seconds"""
nSeconds = self.getTime()
nSeconds = int(nSeconds)
return nSeconds
def getTimeInHHMMSS(self):
"""Returns the elapsed time as a HH:MM:SS formatted string"""
nSeconds = self.getTime()
nSeconds = int(nSeconds)
output = ''
if nSeconds > CountDownTimer.NSECONDS_PER_HOUR:
nHours = nSeconds // CountDownTimer.NSECONDS_PER_HOUR
nSeconds = nSeconds - (nHours * CountDownTimer.NSECONDS_PER_HOUR)
output = str(nSeconds) + ":"
if nSeconds > CountDownTimer.NSECONDS_PER_MINUTE:
nMinutes = nSeconds // CountDownTimer.NSECONDS_PER_MINUTE
nSeconds = nSeconds - (nMinutes * CountDownTimer.NSECONDS_PER_MINUTE)
if (output != '') and (nMinutes < 10):
output = output + '0' + str(nMinutes) + ":"
else:
output = output + str(nMinutes) + ":"
if (output != '') and (nSeconds < 10):
output = output + '0' + str(nSeconds)
else:
output = output + str(nSeconds)
return output
def stop(self):
"""Stops the timer from running"""
self.running = False
secondsNow = time.time()
self.secondsSavedRemaining = self.secondsEnd - secondsNow
def ended(self):
"""Call to see if the timer has reached zero"""
return self.reachedZero
#
#
# Scene Manager
#
#
class SceneMgr():
"""SceneMgr (Scene Manager) allows you to build a program with multiple scenes.
The SceneMgr manages any number of scenes built as subclasses of the "Scene" class.
For more details, see the "Scene" class.
Typical use:
1) Instantiate as many Scenes as you want:
|
| oScene1 = Scene("StartingScene")
| oScene2 = Scene("MainScene")
| oScene3 = Scene('SometherScene")
2) Build a dictionary of these scenes with unique keys:
mySceneDict = {'Splash': oScene1, 'Main': oScene2, 'Other': oScene3}
3) Instantiate *one* SceneMgr (a singleton):
oSceneMgr = SceneMgr(mySceneDict, 'Splash', 30)
4) Call the run method to start the SceneMgr running:
oSceneMgr.run()
Parameters:
| scenesDict - is a dictionary that consists of:
| {<sceneKey>:<sceneObject>, <sceneKey:<sceneObject>, ...}
| where each sceneKey is a unique string identifying the scene
| and each sceneObject is an object instantiated from a scene class
| (For details on Scenes, see the Scene class)
| startingSceneKey - is the string identifying which scene is the starting scene
| fps - is the frames per second at which the program should run
Based on a concept of a "Scene Manager" by Blake O'Hare of Nerd Paradise (nerdparadise.com)
"""
def __init__(self, scenesDict, startingSceneKey, fps):
self.scenesDict = scenesDict
if startingSceneKey not in self.scenesDict:
raise Exception("The starting scene '" + startingSceneKey + \
"' is not a key in the dictionary of scenes.")
self.currentSceneKey = startingSceneKey
self.oCurrentScene = self.scenesDict[startingSceneKey]
self.framesPerSecond = fps
# Give each scene a reference back to the SceneMgr.
# This allows any scene to do a goToScene, request,
# and send back to the Scene Manager
for key in self.scenesDict:
oScene = self.scenesDict[key]
oScene._setRefToSceneMgr(self)
def run(self):
""" This method implements the main pygame loop.
It should typically be called as the last line of your main program.
It is designed to call a standardized set of methods in the current scene.
Therefore, all scenes must implement these methods (polymorphism):
| handleInputs # called in every frame
| draw # called in every frame
The following methods can be implemented in a scene. If they are not
implemented, then the default version in the Scene subclass will be used.
(Those methods do not do anything):
| enter # called once whenever the scene is entered
| update # called in every frame
| leave # called once whenever the scene is left
"""
clock = pygame.time.Clock()
# 6 - Loop forever
while True:
keysDownList = pygame.key.get_pressed()
# 7 - Check for and handle events
eventsList = []
for event in pygame.event.get():
if (event.type == pygame.QUIT) or \
((event.type == pygame.KEYDOWN) and (event.key == pygame.K_ESCAPE)):
self.oCurrentScene.leave() # tell current scene we are leaving
pygame.quit()
sys.exit()
eventsList.append(event)
# Here, we let the current scene process all events,
# do any 'per frame' actions in its update method,
# and draw everything that needs to be drawn.
self.oCurrentScene.handleInputs(eventsList, keysDownList)
self.oCurrentScene.update()
self.oCurrentScene.draw()
# 11 - Update the screen
pygame.display.update()
# 12 - Slow things down a bit
clock.tick(self.framesPerSecond)
def _goToScene(self, nextSceneKey, dataForNextScene):
"""Internal method, called by a Scene tells the SceneMgr to go to another scene
(From the Scene's point of view, it just needs to call its own goToScene method)
This method:
- Tells the current scene that it is leaving, calls leave method
- Gets any data the leaving scene wants to send to the new scene
- Tells the new scene that it is entering, calls enter method
"""
if nextSceneKey is None: # meaning, exit
pygame.quit()
sys.exit()
else:
# Call the leave method of the old scene to allow it to clean up
# Look up the new scene (based on the key),
# Call the enter method of the new scene.
self.oCurrentScene.leave()
if nextSceneKey not in self.scenesDict:
raise Exception("Trying to go to unknown scene '" + nextSceneKey + \
"' but that key is not in the dictionary of scenes.")
self.oCurrentScene = self.scenesDict[nextSceneKey]
self.oCurrentScene.enter(dataForNextScene)
def _request_respond(self, targetSceneKey, infoRequested):
"""Internal method, called by a Scene tells SceneMgr to query another scene for information.
(From the Scene's point of view, it just needs to call its own request method)
The target scene must implement a method named "respond"
"""
oTargetScene = self.scenesDict[targetSceneKey]
info = oTargetScene.respond(infoRequested)
return info
def _send_receive(self, targetSceneKey, infoType, info):
"""TInternal method, called by a Scene tells the Scene Manager to send information to another scene
(From the sending scene's point of view, it just needs to call its own send method)
The target scene must implement a method named "receive"
"""
oTargetScene = self.scenesDict[targetSceneKey]
oTargetScene.receive(infoType, info)
def _sendAll_receive(self, oSenderScene, infoType, info):
"""Internal method, called by a Scene tells the Scene Manager to send information to all scenes (other than itself)
(From the sending scene's point of view, it just needs to call its own sendAll method)
All scenes must implement a method named "receive"
"""
for sceneKey in self.scenesDict:
oTargetScene = self.scenesDict[sceneKey]
if oTargetScene != oSenderScene:
oTargetScene.receive(infoType, info)
class Scene():
"""The Scene class is an abstract class to be used as a base class for any scenes that you want to create.
Each scene must be created with a key (which is a unique string) to identify itself.
The code creating a scene does so by instantiating a scene object from your scene subclass.
That code must pass in a windows to draw into, and a unique key to identify the scene.
In the __init__ method of your scene subclass, you will receive a window and a sceneKey.
You must copy those into instance variables by starting your __init__ method like this:
| def __init__(self, window, sceneKey):
| self.window = window
| self.sceneKey = sceneKey
| # Add any initialization you want to do here.
When your scene is active, the SceneManager calls a standard set of methods in the current scene.
Therefore, all scenes must implement these methods (polymorphism):
| handleInputs # called in every frame
| draw # called in every frame
The following methods can optionally be implemented in a scene. If they are not
implemented, then the default version in the Scene subclass will be used.
(The Scene class' default versions do not do anything, they just return):
| enter # called once whenever the scene is entered
| update # called in every frame
| leave # called once whenever the scene is left
When you want to go to a new scene:
| Call self.goToScene and pass in the sceneKey of the scene you want to go to,
| and optionally, pass any data you want the next scene to receive in its enter method.
If you want to quit the program from your scene, call:
| self.quit()
"""
def __del__(self):
"""Internal method, called when the scene is about to die."""
self.oSceneMgr = None # eliminate the reference to the SceneMgr
def _setRefToSceneMgr(self, oSceneMgr):
"""Internal method to save a reference to the SceneMgr object
This exists so each class built from this base class can call methods in the Scene Manager
That reference is used by the goToScene, request, and send methods in each Scene
Do not change or override this method
"""
self.oSceneMgr = oSceneMgr
def enter(self, data):
"""This method is called whenever the user enters a scene
Should be overridden if you expect data when your scene is entered.
Add any code you need to start or re-start the scene
Parameters:
| data - can be of any type agreed to by the old and new scenes
"""
pass
def handleInputs(self, events, keyPressedList):
"""This method is called in every frame of the scene to handle events and key presses
Your code MUST override this method.
Parameters:
| events - a list of events your method should handle
| keyPressedList - a list of keys that are pressed (a Boolean for each key).
"""
raise NotImplementedError('Your scene subclass must implement the method: handleInput')
def update(self):
"""This method is called in every frame of the scene do any processing you need to do here"""
pass
def draw(self):
"""This method is called in every frame of the scene to draw anything that needs to be drawn
Your code must override this method.
"""
raise NotImplementedError('Your scene subclass must implement the method: draw')
def leave(self):
"""This method is called whenever the user leaves a scene
Override this method, and add any code you need to clean up the scene before leaving
"""
pass
def quit(self):
"""Call this method if you want to quit, from inside a scene"""
self.goToScene(None)
def goToScene(self, nextSceneKey, data=None):
"""Call this method whenever you want to go to a new scene
Parameters:
| nextSceneKey - the scene key (string) of the scene to go to
| data - any data you want sent to the next scene (defaults to None)
| (The data can be a single value, a list, dictionary, object, etc.)
"""
self.oSceneMgr._goToScene(nextSceneKey, data)
def request(self, targetSceneKey, infoRequested):
"""Call this method to get information from another scene
The target scene must implement a method named: respond,
it can return any info in any way the two scenes agree upon
Parameters:
| targetSceneKey - the scene key (string) to ask for data
| infoRequested - the data you want from the target scene (typically a string)
"""
info = self.oSceneMgr._request_respond(targetSceneKey, infoRequested)
return info
def send(self, targetSceneKey, infoType, info):
"""Call this method to send information to another scene
The other scene must implement a method named: receive.
You can pass any info the two scenes agree upon
Parameters:
| targetSceneKey - the scene key (string) to ask for data
| infoType - the type of data you are sending the target scene (typically a string)
| info - the actual data to send (can be any type)
"""
self.oSceneMgr._send_receive(targetSceneKey, infoType, info)
def sendAll(self, infoType, info):
"""Call this method to send information to all other scenes
The other scenes must implement a method named: receive.
You can pass any info that the sender and all other scenes agree upon
Parameters:
| infoType - the type of data you are sending the target scene (typically a string)
| info - the actual data to send (can be any type)
"""
self.oSceneMgr._sendAll_receive(self, infoType, info) # pass in self to identify sender
def respond(self, infoRequested):
"""Respond to a request for information from some other scene
You must override this method if your scene expects to handle
requests for information from other scenes via calls to: request
Parameters:
| infoRequested - the actual data to be sent back to the caller
"""
raise NotImplementedError('Your scene subclass must implement the method: respond')
def receive(self, infoType, info):
"""Receives information from another scene.
You must override this method if your scene expects to respond to
other scenes sending information via calls to: send
Parameters:
| infoType - an identifier for what type of information is being received
| info - the information sent from another scene
"""
raise NotImplementedError('Your scene subclass must implement the method: receive')
#
# DIALOG Functions
#
DIALOG_BACKGROUND_COLOR = (0, 200, 200)
DIALOG_BLACK = (0, 0, 0)
def textYesNoDialog(theWindow, theRect, prompt, trueButtonText='OK', \
falseButtonText='Cancel', backgroundColor=DIALOG_BACKGROUND_COLOR):
"""Puts up a text-based two-button modal dialog (typically Yes/No or OK/Cancel)
It can also be used to put up a single button alert dialog (typically with an OK button)
Parameters:
| theWindow - the window to draw in
| theRect - the rectangle of the dialog box in the application window
| prompt - prompt (title) string to be displayed in the dialog box
Optional keyword parameters:
| trueButtonText - text on the True button (defaults to 'OK')
| falseButtonText - text on the False button (defaults to 'Cancel')
| Note: If falseButtonText is None or the empty string, the false button will not be drawn
| This way, you can present an "alert" box with only an 'OK' button
| backgroundColor - rgb background color for the dialog box (defaults to (0, 200, 200))
Returns:
| trueOrFalse - True means true button was pressed, False means false button was pressed
"""
dialogLeft = theRect[0]
dialogTop = theRect[1]
dialogWidth = theRect[2]
dialogHeight = theRect[3]
frameRect = pygame.Rect(dialogLeft + 1, dialogTop + 1, dialogWidth - 2, dialogHeight - 2)
INSET = 30 # inset buttons from the edges of the dialog box
promptText = pygwidgets.DisplayText(theWindow, (dialogLeft, dialogTop + 30), prompt,
fontSize=24, width=dialogWidth, justified='center')
# Create buttons, fix locations after finding out the size of the button(s)
hideFalseButton = (falseButtonText is None) or (falseButtonText == '')
showFalseButton = not hideFalseButton
if showFalseButton:
falseButton = pygwidgets.TextButton(theWindow, (0, 0), falseButtonText)
trueButton = pygwidgets.TextButton(theWindow, (0, 0), trueButtonText)
trueButtonRect = trueButton.getRect()
trueButtonHeight = trueButtonRect[3]
trueButtonWidth = trueButtonRect[2] # get width
xPos = dialogLeft + dialogWidth - trueButtonWidth - INSET
buttonsY = dialogTop + dialogHeight - trueButtonHeight - 20
if showFalseButton:
falseButton.setLoc((dialogLeft + INSET, buttonsY))
trueButton.setLoc((xPos, buttonsY))
#print('In dialogYesNo')
#print('theRect is', theRect)
#print('frameRect is', frameRect)
# 6 - Loop forever
while True:
# 7 - Check for and handle events
for event in pygame.event.get():
if (event.type == QUIT) or \
((event.type == KEYDOWN) and (event.key == K_ESCAPE)):
pygame.quit()
sys.exit()
if showFalseButton:
if falseButton.handleEvent(event):
return False
if trueButton.handleEvent(event):
return True
# 8 - Do any "per frame" actions
# 9 - Clear the screen area before drawing it again
pygame.draw.rect(theWindow, backgroundColor, theRect)
pygame.draw.rect(theWindow, DIALOG_BLACK, frameRect, 1)
# 10 - Draw the screen elements
promptText.draw()
if showFalseButton:
falseButton.draw()
trueButton.draw()
# 11 - Update the screen
pygame.display.update()
# 12 - Slow things down a bit
#clock.tick(FRAMES_PER_SECOND) # no need for this
def customYesNoDialog(theWindow, oDialogImage, oPromptText, oTrueButton, oFalseButton):
"""Puts up a custom two-button modal dialog (typically Yes/No or OK/Cancel)
It can also be used to put up a single button alert dialog (with a typcial OK button)
Parameters:
| theWindow - the window to draw in
| oDialogImage - an Image object (from pygwidgets) with the background of the dialog box
| oPromptText - a TextDisplay object (from pygwidgets) containing the prompt to display
| oTrueButton - a CustomButton object (from pygwidgets) representing True or OK, etc.
| oFalseButton - a CustomButton object (from pygwidgets) representing False or Cancel, etc.
| Note: If oFalseButton is None or the empty string, the false button will not be drawn
| This way, you can present an "alert" box with only an 'OK' button
Returns:
| trueOrFalse - True means true button was pressed, False means false button was pressed
"""
dialogImageRect = oDialogImage.getRect()
hideFalseButton = (oFalseButton is None) or (oFalseButton == '')
showFalseButton = not hideFalseButton
# 6 - Loop forever
while True:
# 7 - Check for and handle events
for event in pygame.event.get():
if (event.type == QUIT) or \
((event.type == KEYDOWN) and (event.key == K_ESCAPE)):
pygame.quit()
sys.exit()
if showFalseButton:
if oFalseButton.handleEvent(event):
return False
if oTrueButton.handleEvent(event):
return True
# 8 - Do any "per frame" actions
# 9 - Clear the screen area before drawing it again
# 10 - Draw the screen elements
oDialogImage.draw()
oPromptText.draw()
if showFalseButton:
oFalseButton.draw()
oTrueButton.draw()
# 11 - Update the screen
pygame.display.update()
# 12 - Slow things down a bit
#clock.tick(FRAMES_PER_SECOND) # no need for this
def textAnswerDialog(theWindow, theRect, prompt, trueButtonText='OK',\
falseButtonText='Cancel', backgroundColor=DIALOG_BACKGROUND_COLOR):
"""Puts up a text-based two-button answerable modal dialog (typically Yes/No or OK/Cancel)
Parameters:
| theWindow - the window to draw in
| theRect - the rectangle of the dialog box in the application window
| prompt - prompt (title) string to be displayed in the dialog box
Optional keyword parameters:
| trueButtonText - text on the True button (defaults to 'OK')
| falseButtonText - text on the False button (defaults to 'Cancel')
| backgroundColor - rgb background color for the dialog box (defaults to (0, 200, 200))
Returns:
| trueOrFalse - True means true button was pressed, False means false button was pressed
| userText - if above is True, then this contains the text that the user typed.
"""
dialogLeft = theRect[0]
dialogTop = theRect[1]
dialogWidth = theRect[2]
dialogHeight = theRect[3]
frameRect = pygame.Rect(dialogLeft + 1, dialogTop + 1, dialogWidth - 2, dialogHeight - 2)
INSET = 30 # inset buttons from the edges of the dialog box
promptText = pygwidgets.DisplayText(theWindow, (dialogLeft, dialogTop + 30), prompt,
fontSize=24, width=dialogWidth, justified='center')
inputWidth = dialogWidth - (2 * INSET)
inputText = pygwidgets.InputText(theWindow, (dialogLeft + INSET, dialogTop + 80),
width=inputWidth, initialFocus=True)
falseButton = pygwidgets.TextButton(theWindow, (0, 0), falseButtonText)
trueButton = pygwidgets.TextButton(theWindow, (0, 0), trueButtonText)
trueButtonRect = trueButton.getRect()
trueButtonHeight = trueButtonRect[3]
trueButtonWidth = trueButtonRect[2] # get width
xPos = dialogLeft + dialogWidth - trueButtonWidth - INSET
buttonsY = dialogTop + dialogHeight - trueButtonHeight - 20
falseButton.setLoc((dialogLeft + INSET, buttonsY))
trueButton.setLoc((xPos, buttonsY))
# 6 - Loop forever
while True:
# 7 - Check for and handle events
for event in pygame.event.get():
if (event.type == QUIT) or \
((event.type == KEYDOWN) and (event.key == K_ESCAPE)):
pygame.quit()
sys.exit()
if inputText.handleEvent(event):
theAnswer = inputText.getValue()
return True, theAnswer
if trueButton.handleEvent(event):
theAnswer = inputText.getValue()
return True, theAnswer
if falseButton.handleEvent(event):
return False, None
# 8 - Do any "per frame" actions
# 9 - Clear the screen area before drawing it again
pygame.draw.rect(theWindow, backgroundColor, theRect)
pygame.draw.rect(theWindow, DIALOG_BLACK, theRect, 1)
# 10 - Draw the screen elements
promptText.draw()
inputText.draw()
falseButton.draw()
trueButton.draw()
# 11 - Update the screen
pygame.display.update()
# 12 - Slow things down a bit
#clock.tick(FRAMES_PER_SECOND) # no need for this
def customAnswerDialog(theWindow, oDialogImage, oPromptText, oAnswerText, oTrueButton, oFalseButton):
"""Puts up a custom two-button modal dialog (typically Yes/No or OK/Cancel)
Parameters:
| theWindow - the window to draw in
| oDialogImage - an Image object (from pygwidgets) containing the background of the dialog box
| oPromptText - a TextDisplay object (from pygwidgets) containing the prompt to display
| oAnswerText - an InputDisplay object (from pygwidgets) where the user types their answer
| oTrueButton - a CustomButton object (from pygwidgets) representing True or OK, etc.
| oFalseButton - a CustomButton object (from pygwidgets) representing False or Cancel, etc.
Returns:
| trueOrFalse - True means true button was pressed, False means false button was pressed
| userText - if trueOrFalse above is True, then this contains the text that the user typed.
"""
dialogImageRect = oDialogImage.getRect()
# 6 - Loop forever
while True:
# 7 - Check for and handle events
for event in pygame.event.get():
if (event.type == QUIT) or \
((event.type == KEYDOWN) and (event.key == K_ESCAPE)):
pygame.quit()
sys.exit()
if oAnswerText.handleEvent(event):
userResponse = oAnswerText.getValue()
return True, userResponse
if oTrueButton.handleEvent(event):
userResponse = oAnswerText.getValue()
return True, userResponse
if oFalseButton.handleEvent(event):
return False, None
# 8 - Do any "per frame" actions
# 9 - Clear the screen area before drawing it again
# 10 - Draw the screen elements
oDialogImage.draw()
oAnswerText.draw()
oPromptText.draw()
oFalseButton.draw()
oTrueButton.draw()
# 11 - Update the screen
pygame.display.update()
# 12 - Slow things down a bit
#clock.tick(FRAMES_PER_SECOND) # no need for this
#
# File input output functions
#
# Originally: FileReadWrite.py
import os
# Functions for checking if a file exists, read from a file, write to a file
def fileExists(filePath):
"""Check if a file at a given path exists
Parameters:
| filePath - a path to a file (typically a relative path)
Returns:
| trueOrFalse - True if the file exists, False if the file does not exist
"""
exists = os.path.exists(filePath)
return exists
def writeFile(filePath, textToWrite):
"""Writes a string to a file
The text can contain newline characters which will indicate separate lines
Parameters:
| filePath - a path to a file (typically a relative path)
| textToWrite - a string to be written out
"""
fileHandle = open(filePath, 'w')
fileHandle.write(textToWrite)
fileHandle.close()
def readFile(filePath):
"""Read the contents of a text file into a string
Parameters:
| filePath - a path to a file (typically a relative path)
Returns:
| textRead - a string contaning the contents of the file
| Note: If the file does not exist, you will get an error message printed
| and the function will return the empty string
"""
if not fileExists(filePath):
print('The file, ' + filePath + ' does not exist - cannot read it.')
return ''
fileHandle = open(filePath, 'r')
data = fileHandle.read()
fileHandle.close()
return data
# Functions for opening a file, writing & reading a line at a time, and closing the file
def openFileForWriting(filePath):
"""Opens a file for writing
Parameters:
| filePath - a path to a file (typically a relative path)
Returns:
| fileHandle - a file handle for the file that was opened
| (this should be used in subsequent calls to writeALine and closeFile)
"""
fileHandle = open(filePath, 'w')
return fileHandle
def writeALine(fileHandle, lineToWrite):
"""Writes a line of text to the already opened file
Parameters:
| fileHandle - a fileHandle to an already opened file (from openFileForWriting)
| lineToWrite - a line of text to be written out
"""
# Add a newline character '\n' at the end and write the line
lineToWrite = lineToWrite + '\n'
fileHandle.write(lineToWrite)
def openFileForReading(filePath):
"""Opens a file for reading
Parameters:
| filePath - a path to a file (typically a relative path)
Returns:
| fileHandle - a file handle for the file that was opened
| (this should be used in sutsequent calls to readALine and closeFile)
"""
if not fileExists(filePath):
print('The file, ' + filePath + ' does not exist - cannot read it.')
return ''
fileHandle = open(filePath, 'r')
return fileHandle
def readALine(fileHandle):
"""Writes a line of text to the already opened file
Parameters:
| fileHandle - a fileHandle to an already opened file (from openFileForReading)
Returns:
| lineOrFalse - if a line is available, returns the next line of text in the file
| Otherwise, returns False to indicate end of file has been reached.
"""
theLine = fileHandle.readline()
# This is a special check for attempting to read past the end of the file (EOF).
# If this occurs, let's return something unusual: False (which is not a string)
# If the caller wishes to check, their code can easily detect the end of the file like this:
# if returnedValue is False: # reached EOF
if theLine == '': # found End Of File, return False
return False
# If the line ends with a newline character '\n', then strip that off the end
if theLine.endswith('\n'):
theLine = theLine.rstrip('\n')
return theLine
def closeFile(fileHandle):
"""Close a file that was opened earlier with openFileForWriting or openFileForReading
Parameter:
| fileHandle - a handle to an already opened file
"""
fileHandle.close()
|
import boto3
import os
import random, string
from dotenv import load_dotenv
load_dotenv(".env")
dynamodb = boto3.resource("dynamodb",
aws_access_key_id= os.getenv("ACCESS_KEY_ID"),
aws_secret_access_key= os.getenv("ACCESS_SECRET_KEY"),
region_name= os.getenv("REGION")
#aws_session_token= keys.ACCESS_SESSION_TOKEN
)
from boto3.dynamodb.conditions import Key, Attr
class User_info:
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = password
res = ''.join(random.choices(string.ascii_uppercase + string.digits, k = 10))
user_id = name + str(res)
self.id = user_id
def insert_item(self):
table = dynamodb.Table('userdata')
table.put_item(
Item={
'name': self.name,
'email': self.email,
'password': self.password,
'id': self.id
}
)
class Check_info:
def __init__(self, email, password):
self.email = email
self.password = password
self._list = []
def check_user(self):
table = dynamodb.Table('userdata')
response = table.query(
KeyConditionExpression=Key('email').eq(self.email)
)
items = response['Items']
if items and self.password == items[0]['password']:
checked = True
name = items[0]['name']
user_id = items[0]['id']
else:
checked = False
name, user_id = None, None
self._list.extend([checked, name, user_id])
def get_list(self):
return self._list
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from typing import Any, Collection, NewType, Sequence, Union, overload
from .basic import to_str
__all__ = ["Party", "to_party", "to_parties"]
Party = NewType("Party", str)
def to_party(o: "Any") -> "Party":
return Party(to_str(o))
@overload
def to_parties(__object: None) -> None:
...
@overload
def to_parties(
__object: "Union[str, Party, Collection[str], Collection[Party]]",
) -> "Sequence[Party]":
...
def to_parties(
__object: "Union[None, str, Party, Collection[str], Collection[Party]]",
) -> "Union[None, Sequence[Party]]":
"""
Return the specified object as a collection of parties.
:param __object:
The object to convert to a set of parties.
"""
if __object is None:
return None
elif isinstance(__object, str):
return (Party(__object),)
else:
return tuple(Party(p) for p in __object)
|
class Base_Model(object):
default_float = None
default_int = None
def __init__(self):
return
def to_percent_with_diff(self, initial_value: float, difference: float) -> float:
change = initial_value + difference
return round(((change - initial_value) / initial_value) * 100, 2)
def to_percent(self, initial_value: float, new_value: float) -> float:
return round(((new_value - initial_value) / initial_value) * 100, 2)
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
url = "http://pythonscraping.com/pages/page3.html"
html = urlopen(url)
obj = BeautifulSoup(html, "lxml")
def main():
images = obj.findAll("img", {"src": re.compile("\.\.\/img\/gifts/img.*\.jpg")})
print(images)
for image in images:
print(image["src"])
return None
if __name__ == "__main__":
main()
|
__author__ = 'QC1'
from main.page.android.andr_pe_index import *
class ActivityLogout():
def do_logout(self, driver):
index_page = PageIndex(driver)
print("Logging out. . .")
index_page.tap_logout()
|
# Copyright (C) 2012-2013 Claudio Guarnieri.
# Copyright (C) 2014-2018 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import random
import re
import logging
import threading
from lib.common.abstracts import Auxiliary
from lib.common.defines import (
KERNEL32, USER32, WM_GETTEXT, WM_GETTEXTLENGTH, WM_CLOSE, BM_CLICK,
EnumWindowsProc, EnumChildProc, create_unicode_buffer
)
log = logging.getLogger(__name__)
RESOLUTION = {
"x": USER32.GetSystemMetrics(0),
"y": USER32.GetSystemMetrics(1)
}
def click(hwnd):
USER32.SetForegroundWindow(hwnd)
KERNEL32.Sleep(1000)
USER32.SendMessageW(hwnd, BM_CLICK, 0, 0)
def foreach_child(hwnd, lparam):
# List of partial buttons labels to click.
buttons = [
"yes", "oui",
"ok",
"i accept",
"next", "suivant",
"new", "nouveau",
"install", "installer",
"file", "fichier",
"run", "start", "marrer", "cuter",
"extract",
"i agree", "accepte",
"enable", "activer", "accord", "valider",
"don't send", "ne pas envoyer",
"don't save",
"continue", "continuer",
"personal", "personnel",
"scan", "scanner",
"unzip", "dezip",
"open", "ouvrir",
"close the program",
"execute", "executer",
"launch", "lancer",
"save", "sauvegarder",
"download", "load", "charger",
"end", "fin", "terminer",
"later",
"finish",
"end",
"allow access",
"remind me later",
"save", "sauvegarder"
]
# List of complete button texts to click. These take precedence.
buttons_complete = [
"&Ja", # E.g., Dutch Office Word 2013.
]
# List of buttons labels to not click.
dontclick = [
"don't run",
"i do not accept"
]
classname = create_unicode_buffer(50)
USER32.GetClassNameW(hwnd, classname, 50)
# Check if the class of the child is button.
if "button" in classname.value.lower():
# Get the text of the button.
length = USER32.SendMessageW(hwnd, WM_GETTEXTLENGTH, 0, 0)
text = create_unicode_buffer(length + 1)
USER32.SendMessageW(hwnd, WM_GETTEXT, length + 1, text)
if text.value in buttons_complete:
log.info("Found button %r, clicking it" % text.value)
click(hwnd)
return True
# Check if the button is set as "clickable" and click it.
textval = text.value.replace("&", "").lower()
for button in buttons:
if button in textval:
for btn in dontclick:
if btn in textval:
break
else:
log.info("Found button %r, clicking it" % text.value)
click(hwnd)
# Recursively search for childs (USER32.EnumChildWindows).
return True
# Callback procedure invoked for every enumerated window.
# Purpose is to close any office window
def get_office_window(hwnd, lparam):
if USER32.IsWindowVisible(hwnd):
text = create_unicode_buffer(1024)
USER32.GetWindowTextW(hwnd, text, 1024)
# TODO Would " - Microsoft (Word|Excel|PowerPoint)$" be better?
if re.search("- (Microsoft|Word|Excel|PowerPoint)", text.value):
USER32.SendNotifyMessageW(hwnd, WM_CLOSE, None, None)
log.info("Closed Office window.")
return True
# Callback procedure invoked for every enumerated window.
def foreach_window(hwnd, lparam):
# If the window is visible, enumerate its child objects, looking
# for buttons.
if USER32.IsWindowVisible(hwnd):
USER32.EnumChildWindows(hwnd, EnumChildProc(foreach_child), 0)
return True
def move_mouse():
x = random.randint(0, RESOLUTION["x"])
y = random.randint(0, RESOLUTION["y"])
# Originally was:
# USER32.mouse_event(0x8000, x, y, 0, None)
# Changed to SetCurorPos, since using GetCursorPos would not detect
# the mouse events. This actually moves the cursor around which might
# cause some unintended activity on the desktop. We might want to make
# this featur optional.
USER32.SetCursorPos(x, y)
def click_mouse():
# Move mouse to top-middle position.
USER32.SetCursorPos(RESOLUTION["x"] / 2, 0)
# Mouse down.
USER32.mouse_event(2, 0, 0, 0, None)
KERNEL32.Sleep(50)
# Mouse up.
USER32.mouse_event(4, 0, 0, 0, None)
class Human(threading.Thread, Auxiliary):
"""Human after all"""
def __init__(self, options={}, analyzer=None):
threading.Thread.__init__(self)
Auxiliary.__init__(self, options, analyzer)
self.do_run = True
def stop(self):
self.do_run = False
def run(self):
seconds = 0
# Global disable flag.
if "human" in self.options:
self.do_move_mouse = int(self.options["human"])
self.do_click_mouse = int(self.options["human"])
self.do_click_buttons = int(self.options["human"])
else:
self.do_move_mouse = True
self.do_click_mouse = True
self.do_click_buttons = True
# Per-feature enable or disable flag.
if "human.move_mouse" in self.options:
self.do_move_mouse = int(self.options["human.move_mouse"])
if "human.click_mouse" in self.options:
self.do_click_mouse = int(self.options["human.click_mouse"])
if "human.click_buttons" in self.options:
self.do_click_buttons = int(self.options["human.click_buttons"])
while self.do_run:
if seconds and not seconds % 60:
USER32.EnumWindows(EnumWindowsProc(get_office_window), 0)
if self.do_click_mouse:
click_mouse()
if self.do_move_mouse:
move_mouse()
if self.do_click_buttons:
USER32.EnumWindows(EnumWindowsProc(foreach_window), 0)
KERNEL32.Sleep(1000)
seconds += 1
|
import sys
import os
import subprocess
sys.path.insert(0, 'scripts')
import experiments as exp
import fam
def run_concasteroid(dataset, subst_model, is_dna, cores, additional_args = []):
command = []
command.append(exp.python())
command.append(os.path.join(exp.scripts_root, "asteroid/launch_concasteroid.py"))
command.append(dataset)
command.append(subst_model)
if (is_dna):
command.append("1")
else:
command.append("0")
command.append(str(cores))
for arg in additional_args:
command.append(arg)
print("-> Running " + " ".join(command))
subprocess.check_call(command)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.