text
stringlengths 8
6.05M
|
|---|
import csv
import matplotlib.pyplot as plt
my_file = csv.DictReader(open('train.csv'))
def clean_data(datapoint, average):
age = datapoint[0]
pclass = datapoint[1]
cleaned_data = []
if (age == ""):
age = average
else:
age = float(age)
pclass = int(pclass)
cleaned_data.append(age)
cleaned_data.append(pclass)
return cleaned_data
information = []
outcomes = []
for line in my_file:
datapoint = [line['Age'], line['Pclass']]
cleaned_data = clean_data(datapoint, 29)
information.append(cleaned_data)
outcomes.append(line['Survived'])
print information
ages = []
pclasses = []
for datapoint in information:
print datapoint
ages.append(datapoint[0])
pclasses.append(datapoint[1])
colors = []
for outcome in outcomes:
if (int(outcome[0]) == 0):
colors.append('r')
else:
colors.append('g')
plt.scatter(ages, pclasses, c=colors)
|
import re
def pertty(text):
"""替换Markdown中的部分特殊符号,支持代码块,便于wox复制"""
return text.replace("```", "").replace("\n", "")
if __name__ == "__main__":
print(pertty("""`1111`
1234"""))
|
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from builtins import str
import csv
import sys, getopt
import xml.etree.ElementTree as ET
from xml.dom.minidom import parse, Node
import xml.dom.minidom
from numpy import double
from random import seed
from random import randint
seed(1)
#--------------------------------------------------------------------------------------------------------------------
def return_random_number(begin, end):
return randint(begin, end)
class Synonym:
def __init__(self, first_word, second_word, input_similarity, input_frequency):
self.first_word = first_word
self.second_word = second_word
self.similarity = input_similarity
self.frequency = input_frequency
#--------------------------------------------------------------------------------------------------------------------
input_address = 'Corpus\\Synonyms.csv'
synonym_list = []
with open(input_address) as input_file:
input_data = csv.reader(input_file, delimiter=',')
for row in input_data:
first_word = row[0]
second_word = row[1]
similarity = double(row[2])
frequency = int(row[3])
synonym_pair = Synonym(first_word, second_word, similarity, frequency)
synonym_list.append(synonym_pair)
max_replace = 3
input_address = 'Dataset\\i2b2.tsv'
output_text = 'text' + '\t' + 'label' + '\n'
num_perturbed_samples = 0
with open(input_address) as input_file:
input_data = csv.reader(input_file, delimiter='\t')
line_num = 0
for row in input_data:
if (line_num > 0):
print(row[0], '\t', row[1])
is_sample_perturbed = False
sample_text = row[0]
sample_label = row[1]
sample_tokenized = nltk.word_tokenize(sample_text)
word_replaced = False
perturbed_sample = sample_text
candidate_synonym = []
can_be_replaced_list = []
for i in range(0, len(synonym_list)):
if (synonym_list[i].first_word in sample_tokenized):
candidate_synonym.append(synonym_list[i])
if (synonym_list[i].first_word not in can_be_replaced_list):
can_be_replaced_list.append(synonym_list[i].first_word)
elif (synonym_list[i].second_word in sample_tokenized):
candidate_synonym.append(synonym_list[i])
if (synonym_list[i].second_word not in can_be_replaced_list):
can_be_replaced_list.append(synonym_list[i].second_word)
if (len(candidate_synonym) > 0):
print('Words that can be replaced:', can_be_replaced_list)
unique_words = len(can_be_replaced_list)
num_perturbed_words = 0
index = 0
while (num_perturbed_words < max_replace and num_perturbed_words < unique_words):
possible_replacement = []
for i in range(0, len(candidate_synonym)):
if (candidate_synonym[i].first_word == can_be_replaced_list[index] or candidate_synonym[i].second_word == can_be_replaced_list[index]):
possible_replacement.append(candidate_synonym[i])
temp_list = possible_replacement
possible_replacement = []
for i in range(0, len(temp_list)):
repeat = int(temp_list[i].similarity * 100)
for j in range(0, repeat):
possible_replacement.append(temp_list[i])
random_candidate = return_random_number(0, len(possible_replacement)-1)
original_word = ''
new_word = ''
if (possible_replacement[random_candidate].first_word == can_be_replaced_list[index]):
original_word = possible_replacement[random_candidate].first_word
new_word = possible_replacement[random_candidate].second_word
elif (possible_replacement[random_candidate].second_word == can_be_replaced_list[index]):
original_word = possible_replacement[random_candidate].second_word
new_word = possible_replacement[random_candidate].first_word
print(original_word, 'is replaced by', new_word)
perturbed_sample_tokenized = nltk.word_tokenize(perturbed_sample)
replacement_position = -1
for i in range(0, len(perturbed_sample_tokenized)):
if (original_word == perturbed_sample_tokenized[i]):
replacement_position = i
if (replacement_position > -1):
perturbed_sample = ""
for i in range(0, replacement_position):
perturbed_sample += perturbed_sample_tokenized[i] + ' '
perturbed_sample += new_word + ' '
for i in range(replacement_position+1, len(perturbed_sample_tokenized)):
perturbed_sample += perturbed_sample_tokenized[i] + ' '
word_replaced = True
num_perturbed_words += 1
index += 1
elif (len(candidate_synonym) == 0):
print('No word was replaced.')
if (word_replaced == True):
is_sample_perturbed = True
num_perturbed_samples += 1
print('Perturbed sample:', perturbed_sample)
if (is_sample_perturbed == True):
output_text += perturbed_sample + '\t' + sample_label + '\n'
print('----------------------------------------------------------')
line_num += 1
print('\nPerturbed Samples:', num_perturbed_samples)
output_file = open('Dataset\\i2b2-perturbed-word-replace-synonym.tsv', 'w')
output_file.write(output_text)
output_file.close()
if __name__ == '__main__':
pass
|
class Ticket(object):
'''
This class is the parking ticket
'''
__slotId = 0
__registratonNumber = None
__age=None
def __init__(self, slotId, registratonNumber,age):
'''
Constructor
'''
self.__slotId = slotId
self.__registratonNumber = registratonNumber
self.__age = age
def getSlotId(self):
'''
The method to get the slot id
'''
return self.__slotId
def getregistratonNumber(self):
'''
The method to get the registration number
'''
return self.__registratonNumber
def getage(self):
'''
The method to get the car age
'''
return self.__age
def setRegistratonNumber(self,registratonNumber):
'''
The method is to set the registration number
'''
self.__registratonNumber = registratonNumber
def setAge(self,age):
'''
The method is to set the car age
'''
self.__age = age
|
# The code below almost works
name = raw_input("Enter your name")
print('Hello '+str(name))
|
class Subject:
"""
important to note that objects that inherit from this class
will have state, that will then be passed to the 'notify_observers'
method in the event that this state mutates.
"""
def __init__(self):
self.observers = set()
def register_observer(self, observer):
self.observers.add(observer)
def remove_observer(self, observer):
try:
self.observers.remove(observer)
except KeyError:
print("observer not present") # TODO change this
def notify_observers(self, **values):
"""
:param values: series of keyword arguments so that it is clear what values are being represented
:return: none
"""
for observer in self.observers:
observer.update(values)
class Observer:
def __init__(self, subject):
self.subject = subject
self.subject.register_observer(self)
def update(self, values):
"""
children of this class need to override this method.
'values' parameter will be a dictionary.
"""
pass
########################################################################################################################
# BELOW IS A CALCULATOR EXAMPLE USING THESE CONCEPTS
########################################################################################################################
class Calculator(Subject):
def __init__(self):
super().__init__()
self.state = 0
def add(self, *values):
self.state = self.state + sum(values)
self.notify_observers(state=self.state)
def subtract(self, *values):
for val in values:
self.state -= val
self.notify_observers(state=self.state)
def clear(self):
self.state = 0
self.notify_observers(state=self.state)
class Foo(Observer):
"""
for illustrative purposes, this is some class that
needs to be aware of changes in Calc's state
"""
def __init__(self, subject):
"""
remember we pass a reference to the subject so that
we can register this observer to that subject when this
observer is instantiated.
"""
super().__init__(subject)
self.state = 0
def update(self, values):
self.state = values["state"]
class CommandLineInterfaceLoop:
def __init__(self):
self.contents = Calculator()
self.observer1 = Foo(self.contents)
self.observer2 = Foo(self.contents)
self.buffer = ""
def parse_buffer(self):
nums = []
for word in self.buffer.split(" "):
try:
nums.append(int(word))
except:
pass
if "exit" in self.buffer.split(" "):
return 0
elif "add" in self.buffer.split(" "):
self.contents.add(*nums)
return 1
elif "subtract" in self.buffer.split(" "):
self.contents.subtract(*nums)
return 1
elif "clear" in self.buffer.split(" "):
self.contents.clear()
return 1
else:
return 2
def main_loop(self):
self.buffer = input("type something in lowercase to add/subtract")
result = self.parse_buffer()
if result == 0:
print("exiting calc app")
exit()
elif result == 1:
print(f"calc state is now {self.contents.state}")
print(f"obs1 state is now {self.observer1.state}")
print(f"obs2 state is now {self.observer2.state}")
self.main_loop()
elif result == 2:
print(f"error in parsing. buffer is \n{self.buffer}")
self.main_loop()
if __name__ == "__main__":
myloop = CommandLineInterfaceLoop()
myloop.main_loop()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.jvm.resolve.jvm_tool import JvmToolBase
class AvroSubsystem(JvmToolBase):
options_scope = "java-avro"
help = "Avro IDL compiler (https://avro.apache.org/)."
default_version = "1.11.0"
default_artifacts = ("org.apache.avro:avro-tools:{version}",)
default_lockfile_resource = (
"pants.backend.codegen.avro.java",
"avro-tools.default.lockfile.txt",
)
|
import numpy as np
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
# CHECKED
# changed 1 to 1.0
self.activation_function = lambda x : 1.0/(1.0+np.exp(-x)) # Replace 0 with your sigmoid calculation.
self.activation_prime = lambda x: self.activation_function(x) * (1.0-self.activation_function(x))
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0] #number of rows
# delt weights must correspond to the dimensions it is handling
# element wise addition or subtraction
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape) # delta weights in shape of input_to_hidden
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape) #delta weights in shape of hidden_to_output
# for each X,y pair of features and target do
# forward pass and train
# do back propogation and get delta weights
#
for X, y in zip(features, targets):
## comment: final output and hidden output is a single time pass
## comments: delta_weight is accumulative, it accumulates after processing
## each row of data
final_outputs, hidden_outputs = self.forward_pass_train(X) # Implement the forward pass function below
# Implement the backpropagation function below
# comment because this network only has two layers
# just need to pass in final outputs and hidden outputs
# X y
# delta_weights_i_h, delta_weights_h_o
# else needs to pass in even more.
# for each record calculate delta weights and then feed this delta weight into backpropagation
delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y,
delta_weights_i_h, delta_weights_h_o)
# because delta accumulates, it eventually needs to be averaged.
self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)
def forward_pass_train(self, X):
''' Implement forward pass here
Arguments
---------
X: features batch
'''
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
# hidden out is already activated and sigmoided, no need to
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer #f(x) = x
return final_outputs, hidden_outputs
def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):
''' Implement backpropagation
Arguments
---------
final_outputs: output from forward pass
y: target (i.e. label) batch
delta_weights_i_h: change in weights from input to hidden layers
delta_weights_h_o: change in weights from hidden to output layers
'''
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# dimension 1,1
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(self.weights_hidden_to_output, error) #2x1 * 1x1 = 2x1
# dimen weights_hidden_to_output 2x1
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error * 1 # f(x) = x is 1 no sigmoid activation on the output layer
# 1x1
#hidden_error_term = hidden_error * self.activation_prime(hidden_outputs) # is hidden outputs already activated, don't need extra implementation!!
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
#do I also need to multiply by weights?
# dimen weights_hidden_to_output ?
# 2x1 * 1x2 = 2x2 ?
#delta weight is ACCUMULATIVE
# it accumulates over the entire batch of records
# it is later averaged out by n_records during weight update
# X is not necessarily the initial input but each row of record
# Weight step (input to hidden)
#delta_weights_i_h += np.dot(X[:,None],hidden_error_term[None,:]) #line in the middle 1 to 1
delta_weights_i_h += np.dot(X[:,None],hidden_error_term[None,:])
#delta_weights_i_h += np.dot(X,hidden_error_term)
# delta_weights_i_h += np.dot(X,hidden_error_term) ValueError: shapes (3,) and (2,) not aligned: 3 (dim 0) != 2 (dim 0)
# first term dimen weights_hidden_to_output (2, 1)
# second term
# delta_weights_i_h same as weights_input_to_hidden 1x2
# Weight step (hidden to output)
#delta_weights_h_o += np.dot(hidden_outputs, output_error_term)
# forcing inner matrix multiplication dimension to line up
# [:,None] [None,:] pattern
delta_weights_h_o += np.dot(hidden_outputs[:, None], output_error_term[None,:])
#ValueError: shapes (1,) and (2,) not aligned: 1 (dim 0) != 2 (dim 0)
return delta_weights_i_h, delta_weights_h_o
def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):
''' Update weights on gradient descent step
Arguments
---------
delta_weights_i_h: change in weights from input to hidden layers
delta_weights_h_o: change in weights from hidden to output layers
n_records: number of records
'''
# update real weights with only the average, learning rate discounted delta weights
self.weights_hidden_to_output += self.lr * delta_weights_h_o/n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h/n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
#########################################################
# Set your hyperparameters here
##########################################################
iterations = 5000
learning_rate = 0.6
hidden_nodes = 25
output_nodes = 1
'''
good result 2000, 0.1, 40
iterations = 2000
learning_rate = 0.1
hidden_nodes = 20
output_nodes = 1
iterations = 2000
learning_rate = 0.1
hidden_nodes = 15
output_nodes = 1
iterations = 3000
learning_rate = 0.1
hidden_nodes = 20
output_nodes = 1
iterations = 3000
learning_rate = 0.15
hidden_nodes = 30
output_nodes = 1
'''
|
n=input()
list1=[int(x) for x in raw_input().split(" ")]
count=0
def f(l,r,x):
sum=0
for k in range(l,r+1):
if list1[k-1]==x:
sum+=1
return sum
for j in range(1,n+1):
for i in range(1,j):
if f(1,i,list1[i-1])>f(j,n,list1[j-1]):
count+=1
print count
|
'''
252. Meeting Rooms
Given an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei), determine if a person could attend all meetings.
For example,
Given [[0, 30],[5, 10],[15, 20]],
return false.
'''
# Solution is to sort the array first using start time
# since intervals are not sorted and then compare end time of one meeting with end time
# of another meeting
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def sortInterval(self,intervals2):
def merge(a,b):
c = []
while len(a) > 0 and len(b) > 0:
if a[0].start > b[0].start:
c.append(b[0])
b.remove(b[0])
else:
c.append(a[0])
a.remove(a[0])
if len(a) == 0:
c = c + b
else:
c = c + a
return c
if len(intervals2) == 1 or len(intervals2) == 0:
return intervals2
#print len(intervals2)
mid = int(len(intervals2)/2)
#print "mid:", mid
a = self.sortInterval(intervals2[:mid])
b = self.sortInterval(intervals2[mid:])
return merge(a,b)
def canAttendMeetings(self, intervals):
"""
:type intervals: List[Interval]
:rtype: bool
"""
# sort the input using MergeSort Algo
intervals = self.sortInterval(intervals)
for i in range(1,len(intervals)):
if intervals[i-1].end > intervals[i].start:
return False
return True
|
# coding:utf-8
from __future__ import absolute_import, unicode_literals
from jspider.manager import Manager
__author__ = "golden"
__date__ = '2018/6/4'
if __name__ == '__main__':
manager = Manager()
spider = manager.setup_spider('qb')
spider.run_forever = False
spider.run()
# manager.add_spider(spider)
# manager.run_forever()
|
import numpy as np
a_pressure = 101325
a_temperature = 279
e_temperature = 294
k = 1.4
C_v = 718
eta_t = 0.40
V = (1.5*0.001)/5
V_ans = (1-eta_t)**((k-1)**-1)*V
T_1 = a_temperature
T_4 = e_temperature
T_2 = ((1-eta_t)**-1)*T_1
T_3 = (T_4*T_2)/T_1
q_in = C_v*(T_3-T_2)
q_out = C_v*(T_4-T_1)
T_1 = np.array([17+273.15, 23+273.15])
T_2 = np.array([575+273.15, 425+273.15])
k = 1.4
CR=((((1-(T_1/T_2))-1)*-1)**((k-1)**-1))**-1
k = np.array([1.44, 1.47])
eta_t = np.array([0.45, 0.5])
CR = ((1-eta_t)**((k-1)**-1))**-1
CR = np.array([10.5, 7.5])
p_1 = np.array([80, 85])*1000
T_1 = np.array([21+273.15, 26+273.15])
T_3 = np.array([1175+273.15, 1200+273.15])
k = 1.4
C_p = 1004
C_v = C_p/k
T_2 = (((1/CR)**(k-1))**-1)*T_1
q_in = C_v*(T_3-T_2)
T_4 = (T_3*T_1)/T_2
q_out = C_v*(T_4-T_1)
eta_t = 1-T_1/T_2
p_2 = np.array([93000, 93300])
T_2 = np.array([275, 285])
comp = np.array([8, 8.25])
p_4 = np.array([721680, 746633.25])
T_4 = np.array([1440, 1470])
C_p = 1000
m_dot = 3
k = 1.4
T_1 = ((comp**((k-1)/k))**-1)*T_2
T_8 = ((comp**((k-1)/k))**-1)*T_4
T_3 = ((comp**((k-1)/k)))*T_2
W_23 = m_dot*C_p*(T_3-T_2)
W_48 = m_dot*C_p*(T_4-T_8)
W_net = W_48-W_23
|
__author__ = "Narwhala"
import time
def consumer(name):
print('%s来了,准备吃包子!!'%name)
while True:
baozi = yield
print('%s的包子来了,被%s吃掉了'%(baozi,name))
# c = consumer('Narwhala')
# c.__next__()
# c.send('猪肉馅')
def producer(name):
c1 = consumer('A') #只是把consumer()变成生成器
c2 = consumer('B')
c1.__next__() #只有__next__()才会往下走:print('%s来了,准备吃包子!!'%name) 遇到yield会停止运行
c2.__next__()
print('%s要开始做包子了!'%name)
for i in range(10):
time.sleep(1)
print('----------------------------')
print('%s做好了两个猪肉馅的包子!' % name)
c1.send('猪肉馅')
c2.send('猪肉馅')
producer('hsj')
|
import cv2
import numpy as np
from PIL import ImageDraw
import copy
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import imutils
import sys
def plot_img(img):
if len(img.shape) == 3:
plt.imshow(img)
else:
plt.imshow(img, cmap='gray', vmin=0, vmax=255)
def read_rgb(fname):
img = cv2.imread(fname, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def rgb2gray(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
return gray
def resize(img):
ratio = img.shape[0] / 100.0
image = imutils.resize(img, height = 300)
return image
def gray_to_bw(gray, thr=128):
thresh, im_bw = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY )
# im_bw = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
return im_bw
def canny_edge(gray):
edged = cv2.Canny(gray, 20, 100)
return edged
def show(img):
plt.figure(figsize=(10,8))
plt.imshow(img, cmap='gray')
plt.show()
def check_pixel(pixel):
THR = 50
r,g,b = pixel
if np.abs(r-g) < THR and np.abs(r-b) < THR and np.abs(g-b) < THR :
return True
return False
def _persp_transform(img, s_points):
"""Transform perspective from start points to target points."""
# Euclidean distance - calculate maximum height and width
height = max(np.linalg.norm(s_points[0] - s_points[1]),
np.linalg.norm(s_points[2] - s_points[3]))
width = max(np.linalg.norm(s_points[1] - s_points[2]),
np.linalg.norm(s_points[3] - s_points[0]))
# Create target points
t_points = np.array([[0, 0],
[0, height],
[width, height],
[width, 0]], np.float32)
# getPerspectiveTransform() needs float32
if s_points.dtype != np.float32:
s_points = s_points.astype(np.float32)
M = cv2.getPerspectiveTransform(s_points, t_points)
return cv2.warpPerspective(img, M, (int(width), int(height)))
def save_image(fname, img):
cv2.imwrite(fname, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
def run(path):
img = read_rgb(path)
sp = img.shape
img = cv2.resize(img, (int(sp[1]/2), int(sp[0]/2)))
sp = img.shape
_mask = np.zeros((sp[0], sp[1]))
img_b = img[:,:,0]
img_g = img[:,:,1]
img_r = img[:,:,2]
img_sub = np.abs(img_r.astype(int) - img_b.astype(int))
img_sub = img_sub.astype(np.uint8)
bw = gray_to_bw(img_sub, 50)
kernel = np.ones((15,15),np.uint8)
img_erosion = cv2.dilate(bw,kernel,iterations = 1)
img_erosion = 255 - img_erosion
im2, contours, hierarchy = cv2.findContours(img_erosion, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_area = -1
max_contour = None
for cnt in contours:
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
max_contour = cnt
epsilon = 0.1*cv2.arcLength(max_contour,True)
approx = cv2.approxPolyDP(max_contour,epsilon,True)
sp = img_erosion.shape
print (sp)
print (approx)
list_point = []
for x in approx:
list_point.append((x[0][0]/sp[1], x[0][1]/sp[0]))
# list_point = np.array(list_point)
img = read_rgb(path)
_sp = img.shape
list_4_points = [(int(e[0]*_sp[1]), int( e[1]*_sp[0])) for e in list_point]
list_4_points = np.array(list_4_points)
_per = _persp_transform(img, list_4_points)
cv2.imwrite("crop.png", _per)
_sp = _per.shape
bar_img = _per[0:int(_sp[0]*0.12), int(_sp[1]*0.48):int(_sp[1]*0.85) ]
cv2.imwrite("bar.png", bar_img)
# return list_point
return _per
if __name__ == "__main__":
res = run(sys.argv[1])
|
import os
import sys
import subprocess
import shutil
import fam
sys.path.insert(0, 'scripts')
sys.path.insert(0, 'tools/raxml/')
import experiments as exp
import time
import saved_metrics
import run_raxml_supportvalues as raxml
import sequence_model
def run_pargenes(datadir, pargenes_dir, subst_model, starting_trees, cores):
raxml_command = ""
run_modeltest = (subst_model == "bestAA" or subst_model == "bestNT")
if (not run_modeltest):
raxml_command +="--model " + sequence_model.get_raxml_model(subst_model) + " --blopt nr_safe"
command = []
command.append(exp.python())
command.append(exp.pargenes_script_debug)
command.append("-a")
command.append(os.path.join(datadir, "alignments"))
command.append("-o")
command.append(pargenes_dir)
command.append("-c")
command.append(str(cores))
command.append("-s")
command.append(str(starting_trees / 2))
command.append("-p")
command.append(str(starting_trees - starting_trees / 2))
if (len(raxml_command) > 0):
command.append("-R")
command.append(raxml_command)
if (run_modeltest):
command.append("-m")
if (subst_model == "bestAA"):
command.append("-d")
command.append("aa")
command.append("--continue")
try:
subprocess.check_call(command, stdout = sys.stdout)
except:
command[0] = exp.python()
print(" ".join(command))
subprocess.check_call(command, stdout = sys.stdout)
def export_pargenes_trees(pargenes_dir, subst_model, samples, datadir):
families_dir = os.path.join(datadir, "families")
# tca scores
concatenated_dir = os.path.join(pargenes_dir, "concatenated_bootstraps")
if (os.path.isdir(concatenated_dir)):
for concatenation in os.listdir(concatenated_dir):
family = "_".join(concatenation.split("_")[:-1]) # remove everything after the last
src = os.path.join(concatenated_dir, concatenation)
dest = fam.get_plausible_trees(datadir, samples, subst_model, family)
shutil.copyfile(src, dest)
def run_pargenes_and_extract_trees(datadir, subst_model, samples, cores, pargenes_dir = "plausible", extract_trees = True, restart = False):
|
# You are given the array paths, where paths[i] = [cityAi, cityBi]
# means there exists a direct path going from cityAi to cityBi.
# Return the destination city, that is, the city without any path
# outgoing to another city.
#
# It is guaranteed that the graph of paths forms a line without
# any loop, therefore, there will be exactly one destination city.
class Solution:
def destCity(self, paths):
paths_unpkd = sum([couple for couple in paths], [])
return [city for par in paths for city in par
if paths_unpkd.count(city) == 1 and par.index(city) != 0][0]
if __name__ == "__main__":
test_input = [["London", "New York"], ["New York", "Lima"],
["Lima", "Sao Paulo"]]
print(Solution.destCity(Solution, test_input))
|
import cx_Freeze
executables = [cx_Freeze.Executable("slither.py")]
cx_Freeze.setup(
name = "Slither",
options = {"build_exe":{"packages":["pygame"], "include_files":["apple30px.png", "snakehead20px.png"]}},
description = "Slither Game",
executables = executables
)
|
from bs4 import BeautifulSoup
import requests
import os
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
def log_in():
return s.post(url, data=values, verify=False).content
def routine():
source = log_in()
tree = BeautifulSoup(source, 'html.parser')
# Iterate through all current courses (which have 'id' 1)
for t in tree.findAll('div', {'id': '1'}):
# Find course links and titles
courses = t.findAll('div', {'class': 'course_title'})
links = [c.findChildren('a')[0]['href'] for c in courses]
titles = [c.findChildren('a')[0]['title'] for c in courses]
# Create course folders if they do not already exist
for title in titles:
dir_ = directory + '/' + title
if not os.path.exists(dir_):
os.makedirs(dir_)
os.makedirs(dir_ + '/' + 'Labs')
os.makedirs(dir_ + '/' + 'Assignments')
# Check for files that are already downloaded
files = []
for root, subdirs, f in os.walk(directory):
files.append(f)
all_files = []
map(all_files.extend, files)
# Iterate through course contents and download any new material
for link, title in zip(links, titles):
chtml = s.get(link, verify=False).content
ctree = BeautifulSoup(chtml, 'html.parser')
# Check for all resources
for t in ctree.findAll('li', {'class': 'activity resource modtype_resource'}):
r_image = t.findChildren('img')[0]['src']
# File extension
if r_image == powerpoint:
r_type = '.pptx'
elif r_image == pdf:
r_type = '.pdf'
elif r_image == word:
r_type = '.docx'
else:
continue
# File link
r_link = t.findChildren('a')[0]['href']
# File name
r_title = t.findAll('span', {'class': 'instancename'})[0].text + r_type
# Move on if file aready exists
if r_title in all_files:
continue
# Some pattern matching to classify files
if 'lab' in r_title.lower():
r_title = directory + '/' + title + '/Labs/' + r_title
elif 'assignment' in r_title.lower():
r_title = directory + '/' + title + '/Assignments/' + r_title
else:
r_title = directory + '/' + title + '/' + r_title
# If pdf, need to do some extra work to get file link
if r_type == '.pdf':
page = s.get(r_link, verify=False).content
page_tree = BeautifulSoup(page, 'html.parser')
r_link = page_tree.findAll('object', {'type': 'application/pdf'})[0]['data']
print r_title
# Download file now
response = s.get(r_link, stream=True, verify=False)
with open(r_title, "wb") as handle:
for data in tqdm(response.iter_content()):
handle.write(data)
# Create a persistent global session
s = requests.session()
# Load configurations
import config as cfg
directory = cfg.directory
url = cfg.resources['url']
values = {'username': cfg.login['user'],
'password': cfg.login['password']}
powerpoint = cfg.resources['powerpoint']
pdf = cfg.resources['pdf']
word = cfg.resources['word']
routine()
|
#I pledge my honor that I have abided by the Stevens Honor System.
#Zachary Jones
# HW 5 Problem 1
def recursive_square(list):
if not list:
return []
return [list[0] ** 2] + recursive_square(list[1:])
numbers_list = [2, 4, 6, 8, 10, 11]
print('Squared entries: ' + str(recursive_square(numbers_list)))
|
"""A place to make 'leftover' plots for the thesis."""
import numpy as np
import seaborn as sns
import astropy.units as u
import matplotlib.pyplot as plt
sns.set_style('white')
from astropy.constants import c, h, k_B
c, h, k_B = c.value, h.value, k_B.value
def plot_SED():
# These l's are just the exponent for the wavelength
d = 3.315 * 10**18 # distance to hd157587, in m
r_star, r_disk = 6.957 * 10**8, 10**12 # Solar radius and 100 AU, in m
t_star, t_disk = 6000, 50
def planck_nu(l, temp):
# l is wavelength in m
nu = c/l
top = 2. * h * nu**3 * c**-2
bot_exp = h*nu/ (k_B * temp)
B_nu = top / ((np.exp(bot_exp) - 1.))
return B_nu
nfn_star, nfn_disk, nfn_total = [], [], []
l_range = np.logspace(-8, -2, 1000)
nfn_star = planck_nu(l_range, t_star) * (c/l_range * 1e26 * np.pi * (r_star/d)**2)
nfn_disk = planck_nu(l_range, t_disk) * (c/l_range * 1e26 * np.pi * (r_disk/d)**2)
nfn_total = nfn_disk + nfn_star
fig, ax = plt.subplots(figsize=(8, 4))
ax.loglog(l_range, nfn_total, '-k', lw=4, label='Observed SED')
ax.loglog(l_range, nfn_disk, linestyle=":", color='red', lw=2, label='Disk Contribution')
ax.loglog(l_range, nfn_star, linestyle=":", color='orange', lw=2, label='Stellar Contribution')
ax.set_ylim(1e10, 6e14)
ax.set_xlim(10**(-7.1), 10**(-2.5))
def format_func(value, tick_number):
N = value * 1e3
if N < 1:
i = 1
while round(N, i) == 0:
i += 1
N = round(N, i)
else:
N = int(N)
return N
ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax.set_yticklabels([])
ax.set_ylabel(r"$\nu F_{\nu}$ (Jy Hz)", weight='bold')
ax.set_xlabel("Wavelength (mm)", weight='bold')
plt.legend(loc='best')
sns.despine()
plt.savefig('/Volumes/disks/jonas/Thesis/Figures/example_SED.pdf')
plt.show()
plot_SED()
# The End
|
import chainer
import chainer.functions as F
from chainer import testing
import numpy as np
from onnx_chainer.testing import input_generator
from tests.helper import ONNXModelTest
@testing.parameterize(
{'op_name': 'average_pooling_2d',
'in_shape': (1, 3, 6, 6), 'args': [2, 1, 0], 'cover_all': None},
{'op_name': 'average_pooling_2d', 'condition': 'pad1',
'in_shape': (1, 3, 6, 6), 'args': [3, 2, 1], 'cover_all': None},
{'op_name': 'average_pooling_nd',
'in_shape': (1, 3, 6, 6, 6), 'args': [2, 1, 1], 'cover_all': None},
{'op_name': 'max_pooling_2d',
'in_shape': (1, 3, 6, 6), 'args': [2, 1, 1], 'cover_all': False},
{'op_name': 'max_pooling_2d', 'condition': 'coverall',
'in_shape': (1, 3, 6, 5), 'args': [3, (2, 1), 1], 'cover_all': True},
{'op_name': 'max_pooling_nd',
'in_shape': (1, 3, 6, 6, 6), 'args': [2, 1, 1], 'cover_all': False},
{'op_name': 'max_pooling_nd', 'condition': 'coverall',
'in_shape': (1, 3, 6, 5, 4), 'args': [3, 2, 1], 'cover_all': True},
{'op_name': 'unpooling_2d',
'in_shape': (1, 3, 6, 6), 'args': [3, None, 0], 'cover_all': False},
)
class TestPoolings(ONNXModelTest):
def setUp(self):
ops = getattr(F, self.op_name)
self.model = Model(ops, self.args, self.cover_all)
self.x = input_generator.increasing(*self.in_shape)
def test_output(self):
name = self.op_name
if hasattr(self, 'condition'):
name += '_' + self.condition
self.expect(self.model, self.x, name=name,
expected_num_initializers=0)
class Model(chainer.Chain):
def __init__(self, ops, args, cover_all):
super(Model, self).__init__()
self.ops = ops
self.args = args
self.cover_all = cover_all
def __call__(self, x):
if self.cover_all is not None:
return self.ops(*([x] + self.args), cover_all=self.cover_all)
else:
return self.ops(*([x] + self.args))
class TestROIPooling2D(ONNXModelTest):
def setUp(self):
# these parameters are referenced from chainer test
in_shape = (3, 3, 12, 8)
self.x = input_generator.positive_increasing(*in_shape)
# In chainer test, x is shuffled and normalize-like conversion,
# In this test, those operations are skipped.
# If x includes negative value, not match with onnxruntime output.
# You can reproduce this issue by changing `positive_increasing` to
# `increase`
self.rois = np.array([
[0, 1, 1, 6, 6],
[2, 6, 2, 7, 11],
[1, 3, 1, 5, 10],
[0, 3, 3, 3, 3]], dtype=np.float32)
kwargs = {
'outh': 3,
'outw': 7,
'spatial_scale': 0.6
}
class Model(chainer.Chain):
def __init__(self, kwargs):
super(Model, self).__init__()
self.kwargs = kwargs
def __call__(self, x, rois):
return F.roi_pooling_2d(x, rois, **self.kwargs)
self.model = Model(kwargs)
def test_output(self):
with testing.assert_warns(UserWarning):
self.expect(self.model, [self.x, self.rois])
|
import psycopg2
import yaml
import os
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(DIR_PATH, '..', 'config.yaml')
with open(CONFIG_PATH) as conf:
try:
config = yaml.load(conf)
except yaml.YAMLError as exc:
print('Error in config file: {0}'.format(exc))
conn = psycopg2.connect('dbname={0} user={1}'.format(config['db']['name'],config['db']['user']))
cur = conn.cursor()
def get_conn():
return conn
def get_cursor():
return cur
def get_db_name():
return config['db']['name']
def get_tables():
return config['db']['tables']
def commit():
conn.commit()
MISSING = object()
def get_column(column_name, table_name, criteria = MISSING, limit = MISSING):
# Returns a list contraining the entrys in a single column of a table
if '.' not in table_name:
query = 'Select {0} from "{1}"'.format(column_name,table_name)
else:
query = "Select {0} from {1}".format(column_name,table_name)
if criteria != MISSING:
query += ' WHERE {0}'.format(criteria)
if limit != MISSING:
query+= ' LIMIT {0}'.format(limit)
cur.execute(query)
return list(sum( cur.fetchall(), ()))
def get_symbols_list(criteria = MISSING):
symbols = get_column('symbol', 'Stocks', "symbol>'{0}'".format(criteria))
return sorted(symbols)
|
from django.db import models
class Post(models.Model):
title = models.CharField(max_length=128)
content = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
publish = models.DateTimeField(
auto_now_add=False,
auto_now=False,
null=True,
blank=True
)
def get_absolute_url(self):
return f'posts/{self.id}'
@property
def elastic_score(self):
return 0.75
|
"""
Submodule for basic MPI environment discovery
"""
from __future__ import annotations
__all__ = [
"rank",
"size"
]
def rank() -> int:
"""
Returns the MPI rank of the process
"""
def size() -> int:
"""
Returns the MPI size (no. of processes) of the run
"""
|
from .db import db
from .loan import Loan
|
from flask import Flask, render_template, request, url_for, redirect
from passlib.handlers.sha2_crypt import sha256_crypt
import psycopg2, time
hostname = 'localhost'
username = 'postgres'
password = 'admin'
database = 'PongGame'
myConnection = psycopg2.connect( host=hostname, user=username, password=password, dbname=database )
myConnection.close()
conn_string = "host='%s' dbname='%s' user='%s' password='%s' port='%i'" \
% (hostname, database, username, password, 5432)
class valores:
origen = 0
destino = 0
usuarioActual=""
app = Flask(__name__)
@app.route('/')
def homepage():
return render_template("main.html")
@app.route('/dashboard/')
def dashboard():
if valores.usuarioActual != "":
return render_template("dashboard.html")
else:
return redirect(url_for('login'))
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html")
@app.errorhandler(405)
def method_not_found(e):
return render_template("405.html")
@app.errorhandler(500)
def programer_error(e):
return render_template("500.html", error=e)
@app.route('/login/', methods=['GET', 'POST'])
def login():
error = ''
conn = psycopg2.connect(conn_string)
conn2 = psycopg2.connect(conn_string)
try:
if request.method == "POST":
attempted_username = request.form['username']
attempted_password = request.form['password']
cursor = conn.cursor()
cursor2 = conn2.cursor()
salida = ""
salida2 = ""
cursor.execute("SELECT nombre FROM usuario where nombre = (%s)", [request.form['username']])
cursor2.execute("SELECT password FROM usuario where nombre = (%s)", [request.form['username']])
for row in cursor:
salida += str(row[0])
for row in cursor2:
salida2 += str(row[0])
if attempted_username == salida and sha256_crypt.verify(attempted_password, salida2):
valores.usuarioActual = salida
return redirect((url_for('dashboard')))
else:
error = "Invalid Credentials. Please try again"
return render_template("login.html", error = error)
except Exception as e:
return render_template("login.html", error = error)
@app.route('/register/', methods=['GET', 'POST'])
def registro():
error = ''
conn = psycopg2.connect(conn_string)
salida = ''
try:
if request.method == "POST":
username = request.form['username']
email = request.form['email']
password = request.form['password']
passwordEncrypt = sha256_crypt.encrypt(password)
confirm = request.form['confirm']
cursor = conn.cursor()
if password == confirm:
cursor.execute("INSERT into usuario (nombre, email, password) values (%s, %s, %s)",
[str(username), str(email), str(passwordEncrypt)])
conn.commit()
cursor.execute("SELECT nombre FROM usuario where nombre = (%s)", [request.form['username']])
for row in cursor:
salida += str(row[0])
valores.usuarioActual = salida
return redirect((url_for('dashboard')))
else:
error = "Password does not match each other, please try again"
return render_template("registro.html", error=error)
except Exception as e:
return render_template("registro.html", error=error)
@app.route('/jugar/', methods=['GET', 'POST'])
def jugar():
if valores.usuarioActual != "":
return render_template('pong.html')
else:
return redirect(url_for('login'))
if __name__ == "__main__":
app.run(debug=True, port=8000, host='0.0.0.0')
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
tboolean_dd.py
=================
Usage::
tboolean-
tboolean-dd- # see the output of this script doing the conversion into NCSG
tboolean-dd # visualize resulting polygonization and raytrace
#. FIXED: solid-0:PYREX poly-cylinder not matching raytrace, NCylinder SDF was ignoring center.z and bbox was wrong
#. FIXED: solid-2:CATHODE restricting to just the inner observe the correct union of zsphere shape : but it disappears from the other side ?
this wierdness was due to using CSG sub-objects that are not solids : ie closed geometry,
endcaps to close sub-objects are required when using CSG
#. FIXED: doing all together, are missing a translate transform for BOTTOM, the tranOffset needed passing in to csg_intersect_part
#. ISSUE: CATHODE is 0.05 mm thick diff of zspheres, this causes difficulties
* finding the surface, needs manual seeding and extreme resolution (4000)
* extreme resolution 4000, did not run into memory problems only due to continuation failure
* without continuation failure even resolution ~200 runs into memory issues,
and in any case that kinda resolution produces far too many tris
* IM continuation fails to stay with the surface, only producing a very small mesh patch
#. ISSUE: BOTTOM is 1 mm thick diff of zspheres, similar issue to CATHODE but x20 less bad
* IM succeeds with resolution 150, but too many tris to be of much use
* DCS nominal 7, coarse 7/6 produces a whacky partial mesh with "Moire lace pattern"
* DCS nominal 8, coarse 7 : slow and produces too many tris 670198, they are disqualified for being beyond bbox
"""
import numpy as np
import logging
log = logging.getLogger(__name__)
from opticks.ana.base import opticks_main
from opticks.analytic.treebase import Tree
from opticks.analytic.polyconfig import PolyConfig
from opticks.analytic.csg import CSG
from opticks.ana.pmt.ddbase import Dddb
from opticks.ana.pmt.ncsgconverter import NCSGConverter
if __name__ == '__main__':
args = opticks_main()
g = Dddb.parse(args.apmtddpath)
lvn = "lvPmtHemi"
lv = g.logvol_(lvn)
tr = Tree(lv)
nn = tr.num_nodes()
container = CSG("box")
container.boundary = args.container
container.meta.update(PolyConfig("CONTAINER").meta)
objs = []
objs.append(container)
ii = range(nn)
for i in ii:
node = tr.get(i)
lv = node.lv
lvn = lv.name
pc = PolyConfig(lv.shortname)
log.info("\ntranslating .............lvn %s ....node %r " % (lvn, node) )
obj = NCSGConverter.ConvertLV( lv )
obj.boundary = args.testobject
obj.meta.update(pc.meta)
obj.meta.update(gpuoffset="-200,0,0") # shift raytrace only
obj._translate += np.array([200, (i-2)*200, 0], dtype=np.float32 )
# solid parade in Y (left-right) and shift everything down in Z
# translate applied to root nodes, gets propagated
# down to gtransforms on the primitives at serialization
log.info("obj.translate: %s " % (obj.translate) )
log.info("obj.transform: \n%s " % (obj.transform) )
obj.analyse() # labelling etc..
objs.append(obj)
CSG.Dump(obj)
pass
CSG.Serialize(objs, args.csgpath, outmeta=True )
# outmeta: stdout communicates to tboolean-- THAT MEANS NO OTHER "print" use logging instead
|
import numpy as np
import matplotlib.pyplot as plt
#import gif
"""
surface plot
INPUTS:
- ax : axis to draw figure on
- x : numpy array corresponding to ROWS of Z (displayed on x-axis)
x[0] corresponds to Z[0,:] and x[end] corresponds to Z[end,:]
- y : numpy array corresponding to COLUMNS of Z (displayed on y-axis)
y[0] corresponds to Z[:,0] and y[end] corresponds to Z[:,end]
- Z : image to plot
- clim : color limits for image; default: [min(Z), max(Z)]
"""
def plotsurface(ax, x, y, Z, clim=None):
x = x.flatten()
y = y.flatten()
deltax = x[1]-x[0]
deltay = y[1]-y[0]
extent = (np.min(x)+deltax/2,
np.max(x)-deltax/2,
np.min(y)+deltay/2,
np.max(y)-deltay/2)
if clim == None:
clim = [np.min(Z), np.max(Z)]
im = ax.imshow(np.transpose(Z),
origin='lower',
extent=extent,
vmin=clim[0],
vmax=clim[1])
return im
"""
plotExplanation - plot explanation created by GCE.explain().
Rows in output figure correspond to samples (first dimension of Xhats);
columns correspond to latent values in sweep.
:param Xhats: result from GCE.explain()
:param yhats: result from GCE.explain()
:param save_path: if provided, will export to {<save_path>_latentdimX.svg}
"""
def plotExplanation(Xhats, yhats, save_path=None):
cols = [[0.047,0.482,0.863],[1.000,0.761,0.039],[0.561,0.788,0.227]]
border_size = 3
(nsamp,z_dim,nz_sweep,nrows,ncols,nchans) = Xhats.shape
for latent_dim in range(z_dim):
fig, axs = plt.subplots(nsamp, nz_sweep)
for isamp in range(nsamp):
for iz in range(nz_sweep):
img = Xhats[isamp,latent_dim,iz,:,:,0].squeeze()
yhat = int(yhats[isamp,latent_dim,iz])
img_bordered = np.tile(np.expand_dims(np.array(cols[yhat]),(0,1)),
(nrows+2*border_size,ncols+2*border_size,1))
img_bordered[border_size:-border_size,border_size:-border_size,:] = \
np.tile(np.expand_dims(img,2),(1,1,3))
axs[isamp,iz].imshow(img_bordered, interpolation='nearest')
axs[isamp,iz].axis('off')
axs[0,round(nz_sweep/2)-1].set_title('Sweep latent dimension %d' % (latent_dim+1))
if save_path is not None:
plt.savefig('./%slatentdim%d.svg' % (save_path,latent_dim+1), bbox_inches=0)
plt.close(fig)
def outline_mask(ax, mask, bounds=(0,1,0,1), color=(0,0,0,0.25)):
# https://stackoverflow.com/questions/24539296/outline-a-region-in-a-graph
x0, x1, y0, y1 = bounds
# a vertical line segment is needed, when the pixels next to each other horizontally
# belong to diffferent groups (one is part of the mask, the other isn't)
# after this ver_seg has two arrays, one for row coordinates, the other for column coordinates
ver_seg = np.where(mask[:,1:] != mask[:,:-1])
# the same is repeated for horizontal segments
hor_seg = np.where(mask[1:,:] != mask[:-1,:])
# if we have a horizontal segment at 7,2, it means that it must be drawn between pixels
# (2,7) and (2,8), i.e. from (2,8)..(3,8)
# in order to draw a discountinuous line, we add Nones in between segments
l = []
for p in zip(*hor_seg):
l.append((p[1], p[0]+1))
l.append((p[1]+1, p[0]+1))
l.append((np.nan,np.nan))
# and the same for vertical segments
for p in zip(*ver_seg):
l.append((p[1]+1, p[0]))
l.append((p[1]+1, p[0]+1))
l.append((np.nan, np.nan))
# now we transform the list into a numpy array of Nx2 shape
segments = np.array(l)
# now we need to know something about the image which is shown
# at this point let's assume it has extents (x0, y0)..(x1,y1) on the axis
# drawn with origin='lower'
# with this information we can rescale our points
segments[:,0] = x0 + (x1-x0) * segments[:,0] / mask.shape[1]
segments[:,1] = y0 + (y1-y0) * segments[:,1] / mask.shape[0]
# and now there isn't anything else to do than plot it
ax.plot(segments[:,0], segments[:,1], color=color, linewidth=1)
|
from flexp.flexp.core import (
setup,
describe,
name,
static,
backup_files,
backup_sources,
get_static_file,
get_file_path,
get_file,
set_metadata,
disable,
close,
)
|
from flask import Flask, request, render_template
from flask_cors import cross_origin
import sklearn
import pickle
import pandas as pd
import nltk
nltk.download('stopwords')
import re
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
import os
stemmer=PorterStemmer()
app=Flask(__name__)
IMAGE_FOLDER=os.path.join('static','img_pool')
app.config['UPLOAD_FOLDER']=IMAGE_FOLDER
with open('fake_news_predictor.pkl','rb') as f1:
model=pickle.load(f1)
with open('tfidf1.pkl','rb') as f2:
tf1=pickle.load(f2)
@app.route("/")
@cross_origin()
def home():
return render_template("home.html")
@app.route("/predict",methods=["GET","POST"])
@cross_origin()
def predict():
if request.method=="POST":
word_list=[]
news=(request.form["News"])
words=re.sub('[^a-zA-Z]',' ',news)
words=words.lower().split()
words=[stemmer.stem(word) for word in words if word not in set(stopwords.words('english'))]
words=' '.join(words)
word_list.append(words)
tf_vec=TfidfVectorizer(max_features=5000,ngram_range=(1,3),vocabulary = tf1.vocabulary_)
Embedded_News=tf_vec.fit_transform(word_list).toarray()
prediction=model.predict(Embedded_News)
output=""
if prediction==0:
output="Fake"
img_filename=os.path.join(app.config['UPLOAD_FOLDER'],'Sad_Emoji.png')
else:
output="Real"
img_filename=os.path.join(app.config['UPLOAD_FOLDER'],'Smiling_Emoji.png')
return render_template('home.html',prediction_text=f'This is a {output} News',image=img_filename)
return render_template("home.html")
if __name__=="__main__":
app.run(debug=True)
|
import sys
from collections import defaultdict
from lib.intcode import Machine
if len(sys.argv) == 1 or sys.argv[1] == '-v':
print('Input filename:')
f=str(sys.stdin.readline()).strip()
else: f = sys.argv[1]
verbose = sys.argv[-1] == '-v'
for l in open(f):
mreset = [int(x) for x in l.strip().split(',')]
class Robot:
def __init__(self, m):
self.__cpu = Machine(m)
'''
{current, min, max}
'''
self.__x, self.__y = (0, 0, 0), (0, 0, 0)
self.__panels = defaultdict(lambda: '.')
self.__color = 0
'''
< : 0
^ : 1
> : 2
v : 3
'''
self.__direction = 1
if verbose: self.__cpu.toggle_verbose()
def start(self, start_color):
if start_color: self.__panels[(0, 0)] = '#'
while not self.__cpu.halted():
self.__cpu.run(1 if self.__panels[(self.__x[0], self.__y[0])] == '#' else 0)
self.paint(self.__cpu.output())
self.turn(self.__cpu.output())
self.move()
def display(self):
_, minx, maxx = self.__x
_, miny, maxy = self.__y
grid = [[' ']*(maxx+1) for _ in range(maxy+1)]
for (x, y), v in self.__panels.items():
if x < 0 or y < 0: break
if v == '#': grid[y][x] = v
for l in grid: print(''.join(l))
print('\n', '--------------------------------------\n', 'COUNT', len(self.__panels), '\n\n')
def turn(self, lr):
if verbose: print('FROM ', ['<', '^', '>', 'v'][self.__direction]*4, lr)
if lr == 0: self.left()
else: self.right()
if verbose: print('TURNED ', ['<', '^', '>', 'v'][self.__direction]*4)
def left(self):
self.__direction += (3 if self.__direction == 0 else -1)
return self.__direction
def right(self):
self.__direction += (-3 if self.__direction == 3 else 1)
return self.__direction
def move(self):
x, minx, maxx = self.__x
y, miny, maxy = self.__y
# <- left
if self.__direction == 0:
if verbose: print('MOV <----', x)
x -= 1
minx = min(minx, x)
if verbose: print('x:', x)
# -> right
elif self.__direction == 2:
if verbose: print('MOV ---->', x)
x += 1
maxx = max(maxx, x)
if verbose: print('x:', x)
# -> up
elif self.__direction == 1:
if verbose: print('MOV ^^^^^^', y)
y -= 1
miny = min(miny, y)
if verbose: print('y:', y)
# -> down
elif self.__direction == 3:
if verbose: print('MOV vvvvvv', y)
y += 1
maxy = max(maxy, y)
if verbose: print('y:', y)
self.__x, self.__y = (x, minx, maxx), (y, miny, maxy)
def paint(self, color = None):
if color != None: self.__color = color
self.__panels[(self.__x[0], self.__y[0])] = ('.' if self.__color == 0 else '#')
'''
Solution 1
'''
print('Solution 1 ----------------------------', '\n')
r = Robot(mreset[:])
r.start(0)
r.display()
'''
Solution 2
'''
print('Solution 2 ----------------------------', '\n')
r = Robot(mreset[:])
r.start(1)
r.display()
|
from flask_restful import fields
class Fields:
def timestampedmodel_fields(self):
return {
}
def timestampedmodel_fields_min(self):
return {
}
def product_fields(self):
return {
"id": fields.Integer,
"name": fields.String,
"brand": fields.String,
"description": fields.String,
"barcode": fields.String,
"quantity": fields.Integer,
"units": fields.String,
"buying_price": fields.Integer,
"selling_price": fields.Integer,
"stock": fields.Nested(self.stock_fields_min()),
"sales": fields.Nested(self.sale_fields_min()),
"categories": fields.Nested(self.category_fields_min())
}
def product_fields_min(self):
return {
"id": fields.Integer,
"name": fields.String,
"brand": fields.String,
"description": fields.String,
"barcode": fields.String,
"quantity": fields.Integer,
"units": fields.String,
"buying_price": fields.Integer,
"selling_price": fields.Integer,
}
def category_fields(self):
return {
"id": fields.Integer,
"name": fields.String,
}
def category_fields_min(self):
return {
"id": fields.Integer,
"name": fields.String,
}
def stock_fields(self):
return {
"id": fields.Integer,
"quantity": fields.Integer,
"product": fields.Nested(self.product_fields_min()),
}
def stock_fields_min(self):
return {
"id": fields.Integer,
"quantity": fields.Integer,
}
def sale_fields(self):
return {
"id": fields.Integer,
"created_at": fields.String,
"quantity": fields.Integer,
"buying_price": fields.Integer,
"selling_price": fields.Integer,
"product": fields.Nested(self.product_fields_min()),
"sale_group": fields.Nested(self.sale_group_fields_min()),
"session": fields.Nested(self.session_fields_min()),
}
def sale_fields_min(self):
return {
"id": fields.Integer,
"created_at": fields.String,
"quantity": fields.Integer,
"buying_price": fields.Integer,
"selling_price": fields.Integer,
"product": fields.Nested(self.product_fields_min()),
}
def sale_group_fields(self):
return {
"id": fields.Integer,
"created_at": fields.String,
"amount": fields.Integer,
"paid": fields.Integer,
"user": fields.Nested(self.user_fields()),
"sales": fields.Nested(self.sale_fields_min()),
}
def sale_group_fields_min(self):
return {
"id": fields.Integer,
"amount": fields.Integer,
"paid": fields.Integer,
}
def session_fields(self):
return {
"id": fields.Integer,
"start_time": fields.String,
"stop_time": fields.String,
"user": fields.Nested(self.user_fields_min()),
"sales": fields.Nested(self.sale_fields_min()),
}
def session_fields_min(self):
return {
"id": fields.Integer,
"start_time": fields.String,
"stop_time": fields.String,
}
def user_fields(self):
return {
"id": fields.Integer,
"username": fields.String,
# "password": fields.String,
# "recovery_password": fields.String,
"is_active": fields.Boolean,
"token": fields.String,
"profile": fields.Nested(self.profile_fields_min()),
"sessions": fields.Nested(self.session_fields_min()),
"roles": fields.List(fields.String),
}
def user_fields_min(self):
return {
"id": fields.Integer,
# "username": fields.String,
# "password": fields.String,
"recovery_password": fields.String,
"is_active": fields.Boolean,
}
def profile_fields(self):
return {
"id": fields.Integer,
"name": fields.String,
"email": fields.String,
"telephone": fields.String,
"user": fields.Nested(self.user_fields_min()),
}
def profile_fields_min(self):
return {
"id": fields.Integer,
"name": fields.String,
"email": fields.String,
"telephone": fields.String,
}
|
# Generated by Django 3.0.6 on 2020-06-15 19:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('LaF', '0022_auto_20200615_2105'),
]
operations = [
migrations.AlterField(
model_name='find',
name='PIN_code',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='find',
name='aadhaar_no',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='find',
name='mobile_no',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='lost',
name='PIN_code',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='lost',
name='aadhaar_no',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='lost',
name='mobile_no',
field=models.IntegerField(null=True),
),
]
|
# -*- coding: utf-8 -*-
from app.models import PasswordToken, User
from app.tests import dbfixture, UserTokenData, PasswordTokenData, UserData
from app.tests.models import ModelTestCase
from web import config
class TestPasswordToken(ModelTestCase):
def setUp(self):
super(TestPasswordToken, self).setUp()
self.data = dbfixture.data(UserTokenData, PasswordTokenData)
self.data.setup()
def tearDown(self):
self.data.teardown()
def test_all(self):
all_password_tokens = PasswordToken.all()
self.assertEqual(len(all_password_tokens), 2)
[self.assertIsInstance(token, PasswordToken) for token in all_password_tokens]
def test_get(self):
password_token_expired = config.orm.query(PasswordToken).join(PasswordToken.user).filter(User.email == "nico@gmail.com").one() #@UndefinedVariable
password_token_active = config.orm.query(PasswordToken).join(PasswordToken.user).filter(User.email == "jo@gmail.com").one() #@UndefinedVariable
self.assertEquals(password_token_expired.token, "goB9Z7fhsUrjXHDi")
self.assertEquals(password_token_expired.user, UserData.nico)
self.assertEquals(password_token_active.token, "xYCPayfPCPEPCPaL")
self.assertEquals(password_token_active.user, UserData.jo)
def test_get_token(self):
# These tests work because a PasswordTokenData has a similar structure to a PasswordToken
# When Tournament.__eq__ is called, it compares the fields without caring of the parameters' actual types
self.assertIsNone(PasswordToken.get_token(None))
self.assertIsNone(PasswordToken.get_token(""))
self.assertIsNone(PasswordToken.get_token("invalid_token"))
self.assertIsNone(PasswordToken.get_token("znc9TNqpajeN2nEH"))
self.assertIsNone(PasswordToken.get_token("xjRp67wh3HdjEI6I"))
self.assertEquals(PasswordToken.get_token("goB9Z7fhsUrjXHDi"), PasswordTokenData.password_token_expired)
self.assertEquals(PasswordToken.get_token("xYCPayfPCPEPCPaL"), PasswordTokenData.password_token_active)
|
import re
from functools import reduce
def read_morse_to_plaintext_dictionary():
with open('morse.txt', 'r') as lines:
return dict([ tuple(reversed(line.strip().split(' '))) for line in lines ])
def read_plaintext_to_morse_dictionary():
with open('morse.txt', 'r') as lines:
return dict([ tuple(line.strip().split(' ')) for line in lines ])
def decode_morse(code):
"""
Decodes the given code to plaintext.
"""
morse = read_morse_to_plaintext_dictionary()
return "".join([ morse[token] for token in code.split(' ') if len(token) > 0 ])
def encode_morse(plaintext):
"""
Translates given plaintext to morse.
"""
morse = read_plaintext_to_morse_dictionary()
return " ".join([ morse[char] for char in plaintext ])
|
from django.db import models
from django.utils import timezone
from accounts.models.client import Client
from accounts.models.supplier import Supplier
class Project(models.Model): ## TODO: change on_delete
class Meta:
verbose_name = 'project'
verbose_name_plural = 'projects'
client = models.ForeignKey(Client, on_delete=models.CASCADE)
suppliers = models.ManyToManyField(Supplier, blank=True)
date = models.DateTimeField()
is_accepted = models.BooleanField(default=False)
accepted_date = models.DateTimeField(null=True, blank=True)
## estimated time what type?
''' Technical Sheet is has project as foreign key (for one to many relation) '''
## other fields
def __str__(self):
return 'project_' + str( self.id )
|
from typing import List, Union
from .property import Property
from graph_db.engine.types import INVALID_ID
from .label import Label
class Node:
""" Node in a Graph. """
def __init__(self,
label: Label,
id: int = INVALID_ID,
properties: List[Property] = list(),
used: bool = True):
self._id = id
self._label = label
self._used = used
self._properties = properties
self._relationships = []
self._init_properties()
def _init_properties(self):
for i in range(len(self._properties) - 1):
self._properties[i].set_next_property(self._properties[i + 1])
def set_id(self, id: int):
self._id = id
def get_id(self) -> int:
return self._id
def set_label(self, label: Label):
self._label = label
def get_label(self) -> Label:
return self._label
def add_property(self, prop: Property):
if self._properties:
self.get_last_property().set_next_property(prop)
self._properties.append(prop)
def get_properties(self) -> List[Property]:
return self._properties
def get_first_property(self) -> Union[Property, None]:
return self._properties[0] if self._properties else None
def get_last_property(self) -> Union[Property, None]:
return self._properties[-1] if self._properties else None
def add_relationship(self, rel):
assert self == rel.get_start_node() or self == rel.get_end_node()
self._relationships.append(rel)
def remove_relationship(self, rel):
assert rel in self._relationships
self._relationships.remove(rel)
def get_relationships(self):
return self._relationships
def get_first_relationship(self):
return self._relationships[0] if self._relationships else None
def get_last_relationship(self):
return self._relationships[-1] if self._relationships else None
def set_used(self, used: bool):
self._used = used
def is_used(self) -> bool:
return self._used
def __str__(self) -> str:
properties_str = " ".join(map(str, self._properties)) if self._properties else None
return f'Node #{self._id} = {{' \
f'label: {self._label}, ' \
f'properties: {properties_str}, ' \
f'used: {self._used}' \
f'}}'
|
import logging
import fmcapi
def test__application_productivity(fmc):
logging.info("Testing ApplicationProductivity class.")
obj1 = fmcapi.ApplicationProductivities(fmc=fmc)
logging.info("All ApplicationProductivities -- >")
result = obj1.get()
logging.info(result)
logging.info(f"Total items: {len(result['items'])}")
logging.info("\n")
del obj1
obj1 = fmcapi.ApplicationProductivities(fmc=fmc, name="Very Low")
logging.info("One ApplicationProductivity -- >")
logging.info(obj1.get())
logging.info("Testing ApplicationProductivity class done.\n")
|
import urllib.request
import json
import dml
import prov.model
import datetime
import uuid
import csv
import numpy
import statistics as stats
# from alyu_sharontj_yuxiao_yzhang11.Util.Util import *
class Constraint_Solver(dml.Algorithm):
contributor = 'alyu_sharontj_yuxiao_yzhang11'
reads = ['alyu_sharontj_yuxiao_yzhang11.garden',
'alyu_sharontj_yuxiao_yzhang11.education',
'alyu_sharontj_yuxiao_yzhang11.Fire_Hospital_vs_Rent',
'alyu_sharontj_yuxiao_yzhang11.average_rent_zip',
'alyu_sharontj_yuxiao_yzhang11.education_trans_avg',
'alyu_sharontj_yuxiao_yzhang11.correlation']
writes = ['alyu_sharontj_yuxiao_yzhang11.Result']
@staticmethod
def execute(trial=False):
'''Retrieve some data sets (not using the API here for the sake of simplicity).'''
startTime = datetime.datetime.now()
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('alyu_sharontj_yuxiao_yzhang11', 'alyu_sharontj_yuxiao_yzhang11')
def union(R, S):
return R + S
def difference(R, S):
return [t for t in R if t not in S]
def intersect(R, S):
return [t for t in R if t in S]
def project(R, p):
return [p(t) for t in R]
def select(R, s):
return [t for t in R if s(t)]
def product(R, S):
return [(t, u) for t in R for u in S]
def aggregate(R, f):
keys = {r[0] for r in R}
return [(key, f([v for (k, v) in R if k == key])) for key in keys]
def map(f, R):
return [t for (k, v) in R for t in f(k, v)]
def reduce(f, R):
keys = {k for (k, v) in R}
return [f(k1, [v for (k2, v) in R if k1 == k2]) for k1 in keys]
'''get rent = (zipcode,rent) from db.alyu_sharontj_yuxiao_yzhang11.average_rent_zip'''
rentinfo = []
rentdb = repo['alyu_sharontj_yuxiao_yzhang11.average_rent_zip']
rentcur = rentdb.find()
for info in rentcur:
zipcode= info['Zip']
rent = info['Average']
rentinfo.append((zipcode, rent))
rentdict = dict(rentinfo)
# print("rent info:"+str(rentinfo))
'''get number of schools = (zipcode,education_count) from db.alyu_sharontj_yuxiao_yzhang11.education_rent'''
schoolinfo = []
edudb = repo['alyu_sharontj_yuxiao_yzhang11.education']
educur = edudb.find()
for info in educur:
edu_id = info['properties']['SchoolId']
if edu_id != "0":
address = info['properties']['Address']
edu_zip = address[-5:]
schoolinfo.append((edu_zip, 1))
eduinfo = aggregate(schoolinfo, sum)
edudict = dict(eduinfo)
'''get fire_hospital = (zipcode,Fire_Hospital_vs_Rent) from db.alyu_sharontj_yuxiao_yzhang11.Fire_Hospital_vs_Rent'''
fireinfo = []
fire_hos_db = repo['alyu_sharontj_yuxiao_yzhang11.Fire_Hospital_vs_Rent']
fire_hos_cur = fire_hos_db.find()
for info in fire_hos_cur:
zipcode = info['Zipcode']
fire_hos_rate = info['fire/hospital']
fireinfo.append((zipcode, fire_hos_rate))
firedict = dict(fireinfo)
'''get number of garden = (zipcode,garden_count) from db.alyu_sharontj_yuxiao_yzhang11.garden_vs_rent'''
gardeninfo = []
gardendb = repo['alyu_sharontj_yuxiao_yzhang11.garden_vs_rent']
gardencur = gardendb.find()
for info in gardencur:
zipcode = info['Zip']
garden_count = info['garden_count']
# print(str(zipcode)+","+ str(garden_count))
gardeninfo.append((zipcode, garden_count))
gardendict = dict(gardeninfo)
'''get average number of transportation = (zipcode,trans_avg) from db.alyu_sharontj_yuxiao_yzhang11.education_trans_avg'''
transinfo = []
transdb = repo['alyu_sharontj_yuxiao_yzhang11.education_trans_avg']
transcur = transdb.find()
for info in transcur:
zipcode = info['zip']
trans_avg = info['trans_avg']
transinfo.append((zipcode,trans_avg))
transdict = dict(transinfo)
'''find mean, std of each list'''
def get_boundary(info):
value_list = list(info.values())
mean = stats.mean(value_list)
# print(str(mean))
std = stats.stdev(value_list)
# print(str(std))
low = mean-3*std
high = mean + 3*std
return low, high
zipcode_list = ["02110","02210","02132","02109","02199","02108","02113", "02116","02163","02136","02111","02129", "02114", \
"02131", "02118", "02130", "02127", "02135", "02126", "02125", "02215", "02134", "02122", "02128", "02115",\
"02124", "02120", "02119", "02121"]
'''get correlation coefficience'''
weightinfo = []
weightinfo.append(('rent', 0.5))
corrdb = repo['alyu_sharontj_yuxiao_yzhang11.correlation']
corrcur = corrdb.find()
for info in corrcur:
factor = info['name']
weight = info['weight']
weightinfo.append((factor,weight))
weights = []
weight_rent = dict(weightinfo)
weight_edu = {"edu_rent": 0.4, "rent": 0.22, "fire/hospital_rent": 0.18, "trans_rent":0.12, "garden_rent": 0.08}
weight_safety = {"fire/hospital_rent": 0.4, "rent": 0.22, "edu_rent": 0.18, "trans_rent":0.12, "garden_rent": 0.08}
weight_trans = {"trans_rent": 0.4, "rent": 0.22, "edu_rent": 0.18, "fire/hospital_rent":0.12, "garden_rent": 0.08}
weight_facility = {"garden_rent": 0.4, "rent": 0.22, "edu_rent": 0.18, "fire/hospital_rent":0.12, "trans_rent": 0.08}
weights.append(weight_rent)
weights.append(weight_edu)
weights.append(weight_safety)
weights.append(weight_trans)
weights.append(weight_facility)
# print(weights)
def normalize(value, low, high):
return float((value-low)/(high-low))
def getscore(z, dict, factor, weightlist):
if(z in dict.keys()):
low,high = get_boundary(dict)
if(dict[z] <= high and dict[z] >= low):
# print("original"+str(dict[z]))
n = normalize(dict[z], low, high) * 100
# print("normal"+str(n))
score2 = n * weightlist[factor]
else:
score2 = 0
else:
score2 = 0
return score2
results = []
for zipcode in zipcode_list:
# print("weightlist" + str(weightlist))
scorelist = []
for weightlist in weights:
# print('rent')
rentscore = getscore(zipcode, rentdict, 'rent', weightlist)
# print('edu')
eduscore = getscore(zipcode, edudict, 'edu_rent', weightlist)
# print('fire')
firescore = getscore(zipcode, firedict, 'fire/hospital_rent', weightlist)
# print('garden')
gardenscore = getscore(zipcode, gardendict, 'garden_rent', weightlist)
transscore = getscore(zipcode, transdict, 'trans_rent', weightlist)
score = rentscore + firescore + eduscore + gardenscore + transscore
scorelist.append(score)
results.append((zipcode, scorelist))
repo.dropCollection("Result")
repo.createCollection("Result")
for k, v in results:
# normV = normalize(v,low,high) * 100
oneline = {'Zipcode': k, 'rent': v[0],'education': v[1],'safety': v[2],'transportation': v[3],'facility': v[4]}
# print(oneline)
repo['alyu_sharontj_yuxiao_yzhang11.Result'].insert_one(oneline)
endTime = datetime.datetime.now()
return {"start": startTime, "end": endTime}
@staticmethod
def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):
'''
Create the provenance document describing everything happening
in this script. Each run of the script will generate a new
document describing that invocation event.
'''
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('alyu_sharontj_yuxiao_yzhang11', 'alyu_sharontj_yuxiao_yzhang11')
doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in <folder>#<filename> format.
doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in <user>#<collection> format.
doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.
doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.
this_script = doc.agent('alg:alyu_sharontj_yuxiao_yzhang11#Constraint_Solver',
{ prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})
rent_input = doc.entity('dat:alyu_sharontj_yuxiao_yzhang11#average_rent_zip',
{prov.model.PROV_LABEL:'average_rent_zip',
prov.model.PROV_TYPE:'ont:DataSet'})
garden_input = doc.entity('dat:alyu_sharontj_yuxiao_yzhang11#garden',
{prov.model.PROV_LABEL:'garden',
prov.model.PROV_TYPE:'ont:DataSet'})
education_input = doc.entity('dat:alyu_sharontj_yuxiao_yzhang11#education',
{prov.model.PROV_LABEL:'education',
prov.model.PROV_TYPE:'ont:DataSet'})
firehospital_input = doc.entity('dat:alyu_sharontj_yuxiao_yzhang11#Fire_Hospital_vs_Rent',
{prov.model.PROV_LABEL: 'Fire_Hospital_vs_Rent',
prov.model.PROV_TYPE: 'ont:DataSet'})
correlation_input = doc.entity('dat:alyu_sharontj_yuxiao_yzhang11#correlation',
{prov.model.PROV_LABEL: 'correlation',
prov.model.PROV_TYPE: 'ont:DataSet'})
this_run = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)#, 'ont:Query':'?type=Animal+Found&$select=type,latitude,longitude,OPEN_DT'})
output = doc.entity('dat:alyu_sharontj_yuxiao_yzhang11#Result',
{ prov.model.PROV_LABEL:'Result', prov.model.PROV_TYPE: 'ont:DataSet'})
doc.wasAssociatedWith(this_run, this_script)
doc.used(this_run, garden_input, startTime)
doc.used(this_run, education_input, startTime)
doc.used(this_run, rent_input, startTime)
doc.used(this_run, firehospital_input, startTime)
doc.used(this_run, correlation_input, startTime)
doc.wasAttributedTo(output, this_script)
doc.wasGeneratedBy(output, this_run, endTime)
doc.wasDerivedFrom(output, garden_input, this_run, this_run, this_run)
doc.wasDerivedFrom(output, education_input, this_run, this_run, this_run)
doc.wasDerivedFrom(output, rent_input, this_run, this_run, this_run)
doc.wasDerivedFrom(output, firehospital_input, this_run, this_run, this_run)
doc.wasDerivedFrom(output, correlation_input, this_run, this_run, this_run)
repo.logout()
return doc
# Constraint_Solver.execute()
# doc = Constraint_Solver.provenance()
# print(doc.get_provn())
# print(json.dumps(json.loads(doc.serialize()), indent=4))
# eof
|
import logging
from textwrap import dedent
import numpy as np
from PIL import Image
logger = logging.getLogger("osmo_camera.rgb.convert")
# Constant used to convert from 0-1 RGB values to 0-255
MAX_COLOR_VALUE = 255
def to_bgr(rgb_image):
""" Converts an `RGB image` to a `BGR image`
Args:
rgb_image: An `RGB image`
Returns:
A `BGR image`
"""
# https://www.scivision.co/numpy-image-bgr-to-rgb/
bgr_image = rgb_image[..., ::-1]
return bgr_image
def to_PIL(rgb_image):
""" Converts an `RGB image` with 0-1 RGB float values to PIL image object.
Args:
rgb_image: An `RGB image`
Returns:
A PIL image object.
"""
# Count the number of items which will not convert nicely to uint8 and will be truncated
count_out_of_range = (rgb_image > 1).sum() + (rgb_image < 0).sum()
if count_out_of_range > 0:
logger.warning(
dedent(
f"""\
Found {count_out_of_range} items outside acceptable value range of 0-1.
Values greater than 1 will be truncated to the maximum output value of {MAX_COLOR_VALUE}
in the converted image.
Values less than 0 will be truncated to 0 in the converted image.\
"""
)
)
rgb_image = np.clip(rgb_image, 0, 1)
return Image.fromarray((rgb_image * MAX_COLOR_VALUE).astype("uint8"))
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from credentials.models import SshKeyPair
from archives.policies import CdimageArchivePolicy
from archives.archivers import SshArchiver
POLICIES = {"cdimage": CdimageArchivePolicy}
ARCHIVERS = {"ssh": SshArchiver}
@python_2_unicode_compatible
class Archive(models.Model):
name = models.CharField(max_length=64)
host = models.CharField(max_length=64)
policy = models.CharField(
max_length=64, choices=[(p, p) for p in POLICIES.keys()])
basedir = models.CharField(max_length=128)
username = models.CharField(max_length=64)
ssh_credentials = models.ForeignKey(SshKeyPair)
transport = models.CharField(
max_length=64, choices=[(p, p) for p in ARCHIVERS.keys()])
def __str__(self):
return self.name
def get_policy(self):
"""
Returns a class to be used as the archive name generation policy for
this archive or None.
"""
return POLICIES.get(self.policy)
def get_archiver(self):
"""
Returns a class to be used to archive files for this archive or None.
"""
return ARCHIVERS.get(self.transport)
|
# -*- coding: utf-8 -*-
# Create your models here.
from __future__ import unicode_literals
from django.utils import timezone
from django.contrib.gis.geos import Point
from django.contrib.gis.db import models
from django.contrib.gis import geos
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
class Event(models.Model):
name = models.CharField(
max_length=100,
blank=True,
verbose_name="name"
)
time = models.DateTimeField()
description = models.CharField(
max_length=255,
blank=True,
verbose_name="description"
)
location = models.PointField(
verbose_name="event location",
blank=True,
null=True
)
owner = models.ForeignKey(
User,
verbose_name="owner",
on_delete=models.CASCADE,
default=1
)
created = models.DateTimeField(
auto_now_add=True
)
modified = models.DateTimeField(
auto_now=True
)
def __str__(self):
return "{}, ({}), at {} ... cr={}, mod={}" \
.format(self.name, self.time, self.location, self.created, self.modified)
class Attendees(models.Model):
attendee = models.ForeignKey(
User,
verbose_name="attendee",
on_delete=models.CASCADE,
default=1
)
event = models.ForeignKey(
Event,
verbose_name="event",
on_delete=models.CASCADE,
default=1
)
created = models.DateTimeField(
auto_now_add=True
)
modified = models.DateTimeField(
auto_now=True
)
def __str__(self):
return "{} owned by {}".format(self.attendee.first_name, self.event.name)
|
from django.contrib import admin
from .models import Profile, Neighborhood , Post, Business
#
# Register your models here.
admin.site.register(Neighborhood)
admin.site.register(Post)
admin.site.register(Business)
admin.site.register(Profile)
|
import os
import sys
sys.path.insert(0, 'scripts')
import experiments as exp
def get_model(subst_model):
return subst_model.split("+")[0]
def get_gamma_rates(subst_model):
if ("G" in subst_model.split("+")):
return 4
else:
return 1
def is_invariant_sites(subst_model):
return "+I" in subst_model
def is_dna(subst_model):
dna_models = ["JC", "GTR"]
return get_model(subst_model) in dna_models
def get_phyldog_model(subst_model):
return get_model(subst_model)
def get_raxml_model(subst_model):
subst_model = subst_model.replace("+I", "+IC") # empirical invariant sites
res = get_model(subst_model)
if (res == "POISSON"):
res = "PROTGTR{"
res += "/".join(["1"] * 190)
res += "}"
res += "+FE"
rest = subst_model.split("+")[1:]
for r in rest:
res += "+" + r
return res
def get_mrbayes_preset_line(subst_model):
if (get_model(subst_model) == "POISSON"):
return "\tprset aamodelpr=fixed(poisson);\n"
if (get_model(subst_model) == "LG"):
return "\tprset aamodelpr=fixed(lg);\n"
elif(get_model(subst_model) == "DAYHOFF"):
return "\tprset aamodelpr=fixed(dayhoff);\n"
elif(get_model(subst_model) == "WAG"):
return "\tprset aamodelpr=fixed(wag);\n"
elif(get_model(subst_model) == "JC"):
return "\tprset statefreqpr=fixed(equal);\n"
else:
return ""
def get_deleterious_model(subst_model):
if (get_model(subst_model) == "LG"):
return "LG"
elif(get_model(subst_model) == "WAG"):
return "WAG"
elif(get_model(subst_model) == "JTT"):
return "JTT"
elif(get_model(subst_model) == "POISSON"):
return "UNIFORMAA"
else:
return "JC69"
def get_mrbayes_lset_line(subst_model):
model = get_model(subst_model)
line = "\t"
rates = get_gamma_rates(subst_model)
line += "lset "
if (model == "GTR"):
line += "nst=6"
elif (model == "JC"):
line += "nst=2"
if (rates == 1):
if (is_invariant_sites(subst_model)):
line += " rates=propinv"
else:
line += " rates=equal"
else:
if (is_invariant_sites(subst_model)):
line += " rates=invgamma ngammacat=" + str(rates)
else:
line += " rates=gamma ngammacat=" + str(rates)
line += ";\n"
return line
def extract_raxml_model(raxml_model_file):
res = lambda:0
line = open(raxml_model_file).readlines()[0]
split = line.split("+")
str1 = split[0]
str2 = split[1]
res.model = str1[0:str1.find("{")]
res.rates = str1[str1.find("{")+1:str1.find("}")].split("/")
res.frequencies = str2[str2.find("{")+1:str2.find("}")].split("/")
return res
def build_default_dna_model():
res = lambda:0
res.model = "GTR"
res.rates = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
res.frequencies = [0.25, 0.25, 0.25, 0.25]
return res
def model_to_seqgen_cmd(model_obj):
cmd = []
cmd.append("-m")
cmd.append(model_obj.model)
cmd.append("-r")
cmd.extend([str(i) for i in model_obj.rates])
cmd.append("-f")
cmd.extend([str(i) for i in model_obj.frequencies])
return cmd
def get_model_samples(sample_name):
samples = []
if (sample_name == "dnadefault"):
samples.append(build_default_dna_model())
elif (sample_name == "dna4"):
for sample_file in os.listdir(exp.dna4_model_samples):
if (sample_file.endswith("bestModel")):
sample_path = os.path.join(exp.dna4_model_samples, sample_file)
samples.append(extract_raxml_model(sample_path))
else:
print("invalid sample name")
return None
return samples
def get_model_sample_names():
return ["dnadefault", "dna4"]
if (__name__ == "__main__"):
if (len(sys.argv) != 2):
print("Syntax: python sequence_model.py model_file")
exit(1)
model = extract_model(sys.argv[1])
print("model: " + str(model.model))
print("rates: " + str(model.rates))
print("frequencies: " + str(model.frequencies))
seqgen_cmd = model_to_seqgen_cmd(model)
print(" ".join(seqgen_cmd))
|
import json
class BufferedWriter:
def __init__(self, out_folder, out_name, table_name_key=None, ext='.json' ,count_limit=25):
# out_folder is /a/b
# out_name is with extention 'somefile.json'
# ext='.json'
# count_limit=25
self.folder = out_folder # no slash at end
self.out_file_name = out_name
#self.root_name = out_name # include extention
if table_name_key == None:
self.table_name_key = out_name.replace(ext,'') # remove extention assume filename is table name
else:
self.table_name_key = table_name_key
self.buffer = []
self.file_count = 0
self.limit = count_limit
self.item_count = 0
self.word_counts = {} # dict of words with a counter {'the' : 1, 'data': 2}
def formatOutFileName(self):
#print('folder: ', self.folder, ' file_count: ', self.file_count, ' out_file_name: ', self.out_file_name)
return '{}/{}.{}'.format(self.folder, self.file_count, self.out_file_name)
def write(self, item):
self.buffer.append(item)
if len(self.buffer) == self.limit:
# with open('{}/{}.{}'.format(self.folder, self.file_count, self.root_name), 'w') as f:
with open(self.formatOutFileName(), 'w') as f:
# recode the key with the id before writing
#final = {'documents': self.buffer}
final = {self.table_name_key: self.buffer}
f.write(json.dumps(final))
#f.write(json.dumps(self.buffer))
self.file_count += 1
self.buffer=[]
def flush(self):
if len(self.buffer) > 0:
#with open('{}/{}.{}'.format(self.folder, self.file_count, self.root_name), 'w') as f:
with open(self.formatOutFileName(), 'w') as f:
# recode the key with the id before writing
#final = {'documents': self.buffer}
#final = {self.root_name: self.buffer}
final = {self.table_name_key: self.buffer}
f.write(json.dumps(final))
# f.write(json.dumps(self.buffer))
self.file_count = 0
self.buffer=[]
|
import socket # importing the socket module.
HOST = "127.0.0.1" # specifying the host's address.
PORT = 65432 # specifying the port to be used in the communication between the server and the client.
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # creating an object that calls on the socket method from module socket.
server_socket.bind((HOST, PORT)) # using the created method to bind the host and port to the socket.
server_socket.listen() # using the created method to lisen for any incoming connection from the specified port.
conn, addr = server_socket.accept() # using the created method to accept a connection when one is established.
with conn:
print ("connected by", addr)
while 1:
data = conn.recv(1024)
print ("\nReceived", data, "from client!")
print ("\nSending back data...")
conn.sendall(b"Hello fam!")
server_socket.close()
# research the "with" function
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="flare",
version="0.1.5",
author="Jacob Pettit",
author_email="jfpettit@gmail.com",
short_description="Simple implementations of reinforcement learning algorithms.",
long_description=long_description,
url="https://github.com/jfpettit/flare",
install_requires=[
"numpy",
"torch",
"gym[box2d]",
"scipy",
"roboschool",
"pybullet",
"termcolor",
"joblib",
"tensorboard",
"mpi4py",
],
)
|
import re
from collections import Counter
# VOWELS = list('aeiou')
VOWELS = 'aeiou'
def get_word_max_vowels(text):
"""Get the case insensitive word in text that has most vowels.
Return a tuple of the matching word and the vowel count, e.g.
('object-oriented', 6)"""
vowel_count = {}
for word in text.lower().split(' '):
# re.subn returns the count of overlapping replacements
vowel_count[word] = re.subn(f'[{VOWELS}]', '', word)[1]
return Counter(vowel_count).most_common()[0]
|
import pytreebank
import nltk
import itertools
from numpy import array
import numpy as np
SENTENCE_START_TOKEN = 'eos'
UNKNOWN_TOKEN = 'unk'
def word2index(sentences, vocabulary_size):
tokenized_sentences = [nltk.word_tokenize(sent) for sent in sentences]
# Count the word frequencies
word_freq = nltk.FreqDist(itertools.chain(*tokenized_sentences))
#print "Found %d unique words tokens." % len(word_freq.items())
# Get the most common words and build index_to_word and word_to_index vectors
vocab = word_freq.most_common(vocabulary_size-2)
index_to_word = [x[0] for x in vocab]
index_to_word.append(UNKNOWN_TOKEN)
index_to_word.append(SENTENCE_START_TOKEN)
word_to_index = dict([(w, i) for i, w in enumerate(index_to_word)])
return (word_to_index, index_to_word)
def word2vec(sent, word2index_dict):
'''
Word2vec of a sentence
:param sent: input sentence
:param word2vec_dict: dict of word2vec
:param maxlen: max len of sentence in dataset
:return: vector of sentence (list vector of words)
'''
sent = "%s %s" % (SENTENCE_START_TOKEN, sent)
words_in_sent = [x for x in nltk.word_tokenize(sent)]
i = len(words_in_sent)
array_sent=[0]*i
sample_weight = [0]*i
for j in range(i):
if words_in_sent[j].lower() not in word2index_dict.keys():
words_in_sent[j] = UNKNOWN_TOKEN
array_sent[j] = (word2index_dict[words_in_sent[j].lower()])
sample_weight[j] = 1
array_sent = np.asarray(array_sent)
return ((array_sent),array(sample_weight))
def demo_tree():
small_trees = pytreebank.import_tree_corpus('./trees/dev.txt')
small_trees = small_trees[:100]
label = []
sentences = []
tree = small_trees[6]
for l, sent in tree.to_labeled_lines():
label.append(l)
sentences.append(sent)
print(l, sent)
print('breakpoint')
def preprocess_full(vocabulary_size):
trees = pytreebank.load_sst('trees')
trees_train = trees["train"]
trees_dev = trees["dev"]
trees_test = trees["test"]
def preprocess(vocabulary_size):
# trees = pytreebank.load_sst('trees')
# trees_train = trees["train"]
# trees_dev = trees["dev"]
# trees_test = trees["test"]
small_trees = pytreebank.import_tree_corpus('./trees/train.txt')
label = []
sentences = []
for tree in small_trees:
l, sent = tree.to_labeled_lines()[0]
label.append(l)
sentences.append(sent)
#for tree in small_trees:
# for l, sent in tree.to_labeled_lines():
# label.append(l)
# sentences.append(sent)
label = np.asarray(label)
word_to_index, index_to_word = word2index(sentences,vocabulary_size)
train_x = []
for sent in sentences:
x, _ = word2vec(sent,word_to_index)
train_x.append(x)
return (train_x, label)
if __name__ == "__main__":
preprocess(4000)
|
# coding:utf-8
number = 0
while number <= 10:
number += 1
if number * 3 >= 10:
break
print(number)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Vladimir Shurygin. All rights reserved.
import uvicorn
from app.server import get_app
app = get_app()
if __name__ == "__main__":
uvicorn.run("run:app", host="127.0.0.1", port=8080, log_level="info", reload=True)
|
if __name__ == '__main__':
n = int(input())
ans = []
for _ in range(n):
op, *line = input().split()
val = list(map(int, line))
if(op =="remove"):
ans.remove(val[0])
elif(op=="pop"):
del ans[len(ans)-1]
elif(op=="reverse"):
ans.reverse()
elif(op=="append"):
ans.append(val[0])
elif(op=="print"):
print(ans)
elif(op=="sort"):
for i in range(0,len(ans)-1):
for j in range(i+1,len(ans)):
if(ans[i]>ans[j]):
temp = ans[i]
ans[i] = ans[j]
ans[j] = temp
else:
ans.insert(val[0],val[1])
|
'''
Created on Dec 8, 2015
@author: jj1745
'''
class Restaurant(object):
'''
The restaurant object, where each restaurant is determined by its unique camis_id
'''
def __init__(self, camis_id):
'''
Constructor
'''
self.id = camis_id
def test_grades(self, grade_list):
'''
This is the helper function that determines the trend of the grades
if the ending grade is better return 1; if the beginning grade is better, return -1; else return 0
'''
score_book = {'A':3, 'B':2, 'C':1}
init_grade = grade_list[0]
final_grade = grade_list[-1]
# get the transformed grade
init_score = score_book[init_grade]
final_score = score_book[final_grade]
if final_score > init_score:
return 1
elif final_score < init_score:
return -1
else:
return 0
def test_restaurant_grades(self, df):
'''
given the whole dataframe, compute the trend based on camis_id
'''
restaurant_data = df[df['CAMIS'] == self.id]
restaurant_data = restaurant_data.sort('DATE')
grade_list = restaurant_data['GRADE'].tolist()
return self.test_grades(grade_list)
|
"""
Insertion sort implementation
Useful for when the list is known to be
nearly/mostly sorted
"""
numbers = [3,53,65,1,321,54,76,43,2,4,66]
# O(n) best case, O(n^2) generally
def insertionSort(array):
length = len(array)
for x in range(length):
value = array[x]
j = x-1
while j >= 0 and value < array[j] :
array[j + 1] = array[j]
j -= 1
array[j + 1] = value
insertionSort(numbers)
print(numbers)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basketball', '0032_auto_20160124_1912'),
]
operations = [
migrations.AddField(
model_name='statline',
name='fastbreak_points',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='statline',
name='fastbreaks',
field=models.PositiveIntegerField(default=0),
),
]
|
import sys
import copy
from requests.exceptions import HTTPError
from biigle import Api
# Enter your user email address here.
email = ''
# Enter your API token here.
token = ''
# ID of the volume to process.
volume_id = 0
# ID of the label to attach to new annotations.
label_id = 0
# Number of grid rows for each image.
rows = 3
# Number of grid columns for each image.
columns = 3
# Assume that all images of the volume have the same dimension.
assume_same_dimension = True
api = Api(email, token)
# Get the available annotation shapes.
# https://biigle.de/doc/api/index.html#api-Shapes-IndexShapes
shapes = api.get('shapes').json()
shapes = {s['name']: s['id'] for s in shapes}
# Get the list of image IDs that belong to the volume.
# https://biigle.de/doc/api/index.html#api-Volumes-IndexVolumeImages
image_ids = api.get('volumes/{}/images'.format(volume_id)).json()
rectangle_width = 0
rectangle_height = 0
post_data = {
'shape_id': shapes['Rectangle'],
'label_id': label_id,
'confidence': 1,
'points': [],
}
total = len(image_ids) * rows * columns
current = 0
can_batch_create = True
try:
api.post('annotations')
except HTTPError as e:
can_batch_create = False
batch_size = 100
batch = []
for image_id in image_ids:
if not assume_same_dimension or not rectangle_width or not rectangle_height:
# Get detailed information on an image, including width and height in pixels.
# https://biigle.de/doc/api/index.html#api-Images-ShowImages
image_info = api.get('images/{}'.format(image_id)).json()
width, height = image_info['attrs']['width'], image_info['attrs']['height']
rectangle_width = width // columns
rectangle_height = height // rows
for row in range(rows):
y = row * rectangle_height
for column in range(columns):
x = column * rectangle_width
post_data['points'] = [
x, y,
x + rectangle_width, y,
x + rectangle_width, y + rectangle_height,
x, y + rectangle_height,
]
if can_batch_create:
post_data['image_id'] = image_id
batch.append(copy.copy(post_data))
if len(batch) == batch_size:
api.post('annotations', json=batch)
batch = []
else:
# Create a new rectangle annotation for the image.
# https://biigle.de/doc/api/index.html#api-Annotations-StoreImageAnnotations
api.post('images/{}/annotations'.format(image_id), json=post_data)
current += 1
print('Created {} of {}'.format(current, total))
if can_batch_create and len(batch) > 0:
api.post('annotations', json=batch)
|
# 编译proto文件
# protoc object_detection/protos/*.proto --python_out=.
# 将Slim加入PYTHONPATH
# export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim
# 完成安装测试
# python3 object_detection/builders/model_builder_test.py
|
import sys,os
from catalog.models import Category, Item, Brand, Specification, UserReview
from django.core.exceptions import ObjectDoesNotExist
from memcached_utils import cache_categories as cache_categories
from django.core.cache import cache
from django.template import RequestContext
from django.shortcuts import render_to_response
def get_items(request, slug=None):
#######categories
selected_cat = None
if slug != None:
try:
selected_cat=Category.objects.get(slug=slug)
except ObjectDoesNotExist:
raise ValueError()
pass
cats=cache.get('cats')
if cats==None:
(cats)=cache_categories()
#######################
items=Item.objects.filter(category__id__in=cats_filter);
brands=None
if(selected_cat):
for it in items:
brands_ids[it.brand]=True
else:
pass
selected_brand = None
if(brand in request.GET):
try:
selected_brand = Brand.objects.get(id=request.GET['brand'])
except:
pass
cat_brands=Category.selected_cat = None
if slug != None:
# try:
selected_cat=Categories.objects.get(slug=slug)
# except SMTH:
# pass
query_search=''
try:
query_search=request.GET['q']
except:
query_search=''
filter_context={
}
if cats_filter!=[]:
filter_context['category__id__in']=cats_filter
if brand_filter!=None:
filter_context['brand__id__exact']=selected_brand.id
if query_search!='':
filter_context['name__icontains']=query_search
context={
'cats': cats,
'selected_cat': selected_cat
}
context=RequestContext(request, context)
return render_to_response('catalog/items.html', context)
|
"""Console script for fundamentals_of_data_science."""
import os
import sys
import click
from rich import traceback
WD = os.path.dirname(__file__)
@click.command()
def main(args=None):
"""Console script for fundamentals_of_data_science."""
read_included_file('test.txt')
click.echo("Replace this message by putting your code into "
"fundamentals_of_data_science.cli.main")
click.echo("See click documentation at https://click.palletsprojects.com/")
return 0
def read_included_file(filename):
"""
DELETE ME
This method solely demonstrates how to use and read files, which are automatically included in the distributed python package.
@param filename: Name of the file to read, which has to be in a folder, that is included in the python package as specified in setup.py
"""
print("function is called")
with open(f"{WD}/files/{filename}") as f:
content = f.readlines()
print(content)
if __name__ == "__main__":
traceback.install()
sys.exit(main()) # pragma: no cover
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
## Version 0.2
desc={
"geral" : "-RISD",
"pri" : "-P",
"back" : "-B",
"con" : "-C",
"as" : "2860:",
"palha":"0000"
}
vrfs={
"1":"VPN_RIS",
"2":"VPN_RIS_WIFI_UTENTES",
"3":"VPN_SICAD",
"4":"VPN_CH_CHLO",
"5":"VPN_CH_CHTV",
"6":"VPN_CH_CHBV"
}
vrfs_rds={
"VPN_RIS":"11268",
"VPN_RIS_WIFI_UTENTES":"11422",
"VPN_SICAD":"xxxxx",
"VPN_CH_CHLO":"xxxxx",
"VPN_CH_CHTV":"xxxxx",
"VPN_CH_CHBV":"xxxxx"
}
text={
"isp" : "ISP --> ",
"sid" : "Site ID --> ",
"vnr" : "Nr VRF --> ",
"tps" : "Tipo de Site --> ",
"c_mse" : "MSE --> ",
"menu2" : "\n 1 - Principal \n 2 - Backup \n 3 - Contigencia \n",
"menu1" : "\n 1 - Optimus \n 2 - ZON \n 3 - NOS NGN \n",
"c_vrf" : "Escolhe VRF --> ",
}
vrf_texts={
"list_text" : "\nEscolhe uma VRF da lista.",
"cir_text" : "\nEscolher VRF adequada ao Circuito!",
"site_text" : "\nEscolhe tipo de site correcto!\n",
}
erips_text={
"ep" : "ERIP P : ",
"ep1id" : "Erip Id : ",
"ep2" : "ERIP B : ",
"ep2id" : "Erip2 Id : "
}
VRFS_FILE_DIR="C:\Users\Admin\Desktop\vrfs.txt"
##### List PE's ID's , SOME RECORDS
pe_ids={
"ZXC1ERIP001":"0521",
"VVC1ERIP002":"3402",
"ANV1ERIP001":"3403",
"ATY1ERIP002":"0534",
"TYG1ERIP001":"0405",
"HFG1ERIP002":"0436",
"DSV1ERIP001":"0207",
"ASS1ERIP002":"0038"
}
####Fim lista
|
def banner_text(text: str = " ", width: int = 80) -> None:
"""
Center the entered text and border with asterisks
:param text: the string to be printed
:param width: the width around the entered text
:return: nothing is returned
"""
screen_width = width
if len(text) > screen_width - 4:
raise ValueError("String {0} is larger than specified width {1}"
.format(text, screen_width))
if text == "*":
print("*" * screen_width)
else:
output_string = "**{0}**".format(text.center(screen_width - 4))
print(output_string)
banner_text("*", 60)
banner_text("*", 60)
banner_text("Testing", 60)
banner_text("*", 60)
banner_text(width=60)
banner_text("looooooooooooooooooooooooong teeeeeeeeeeeeeeeext", 60)
banner_text("*", 60)
banner_text("BANNER TEXT FUNCTION", 60)
banner_text("*", 60)
banner_text("*", 60)
|
# !/usr/bin/env python
# Object : project for Optimisation 2016 AIC
# Author : Herilalaina, Xiyu ZHANG
# Date : Wed Oct 12 23:36:30 CEST 2016
# Email : zacharie.france@gmail.com
# ==============================================================================
import numpy as np
def paretoRank(objectives):
"""This function is aiming at calculating pareto-ranks for all elements of o
bjectives. It means that we find each paretofront one by one.
The number in ranks array means that the point in objectives belongs to which
level of paretofront. e.g. [1 1 2 2 1 1 3 4 1] means there five points is in
first level of paretofront. Example:
@objectives: matrix
[[5 1]
[2 3]
[4 3]
[5 5]
[4 1]
[7 3]
[6 4]
[4 5]
[6 2]
[3 2]
[5 3]
[3 4]]
@return : np.array with one dimension and the type is np.int
[2 1 2 4 1 4 4 3 3 1 3 2]
"""
objectives = np.array(objectives)
nPop = objectives.shape[0]
ranks = np.zeros((nPop), dtype=np.int)
# print "----------Initial ranks----------"
# print ranks
nPV = 1
popInd = np.ones((nPop), dtype=np.bool)
# print "----------Initial popInd----------"
# print popInd
# If all elements in popInd are False, it returns False.
# If not, it returns True.
while np.any(popInd):
# get next paretofront
frontInd = np.copy(popInd)
frontInd[popInd] = paretofront(objectives[popInd, :])
# print "----------frontInd-{0}----------".format(nPV)
# print frontInd
ranks[frontInd] = nPV
# print "----------ranks-{0}----------".format(nPV)
# print ranks
# remove ones of next paretofront from select-vector
popInd = np.logical_xor(popInd, frontInd)
# print "----------popInd-{0}----------".format(nPV)
# print popInd
nPV = nPV + 1
return ranks
def paretofront(myArray):
"""This functions returns one array in which there are only False or ture elements.
Besides, this returned array tell us which points are located on the border of
paretofront.
!Attention!: this function assume that we need min object funtion on first
dimension and min objective funtion on the second dimension. If not, you need modify this
function by yourself. Meanwhile, this function is only used to calculate bi-objective
funtions problem.
@myArray: it is objectives matrix or a part of objectives matrix. It depends which
level of paretofront that we are calculating now. For the first loop, it will be
exactly all objectives.
@point_front_index: Array of index signs all points that are located on the border of
paretofront."""
# Sort by increased order on first dimension
newMyArray = myArray[myArray[:, 0].argsort()]
# print "newMyArray:\n", newMyArray
# Add first row to pareto_frontier
pareto_frontier = newMyArray[0:1, :]
# Test next row against the last row in pareto_frontier
for row in newMyArray[1:, :]:
if row[0] == pareto_frontier[-1][0]:
if row[1] < pareto_frontier[-1][1]:
pareto_frontier[-1,:] = row
else: # row[0] > pareto_frontier[-1][0] because we sorted before
if row[1] < pareto_frontier[-1][1]:
pareto_frontier = np.concatenate( (pareto_frontier, [row]) )
# print "pareto_frontier:\n", pareto_frontier
# Initialise to False for all
point_front_index = np.zeros((myArray.shape[0]), dtype=np.bool)
# look which point that is located on the border of paretofront.
for i in np.arange(myArray.shape[0]):
for j in pareto_frontier:
if np.array_equal(myArray[i], j):
point_front_index[i] = True
# print "pareto_frontier is listed as followed:\n", pareto_frontier
return point_front_index
def test():
"""You maybe need this function to test paretoRank is correct or not."""
objectives = np.array([[2, 3], [3, 2], [3, 4], [4, 1], [
4, 3], [4, 5], [5, 1], [5, 3], [5, 5], [6, 2],[6, 4], [7,3 ]])
np.random.shuffle(objectives)
print("----------initial objectives looks like----------")
print(objectives)
print("----------final ranks looks like----------")
print(paretoRank(objectives))
# if __name__ == '__main__':
# test()
|
from router_solver import *
import compilador.objects.semantic_table
from compilador.objects.semantic_table import *
# CLASE QUADRUPLE
# Objeto que guarda operando, operadores y resultado de una expresión o instrucción
class Quadruple(object):
def __init__(self, operator, operand_1, operand_2, result_id):
self.operator = operator # Operador o instrucción de cuadruplo
self.operand_1 = operand_1 # Primer operador
self.operand_2 = operand_2 # Segundo operador
self.result_id = result_id # Operador donde se asigna el resultado
self.scope = None
# Guarda operadores de expresiones aritmeticas
def __is_operator(symbol):
return symbol in [
"+",
"-",
"*",
"/",
"%",
"(",
")",
">",
"<",
"=",
"|",
"&",
"!",
]
# Guarda nombres de operadores de asignación y asignación compuesta
def __is_assignment(operator):
return operator in [
"EQ",
"ADDEQ",
"SUBEQ",
"MULEQ",
"DIVEQ",
"MODEQ",
]
# Recibe como input una expresión tipo string y la divide en simbolos
def __divide_expression(expression):
exp = []
operand = ""
i = 0
while i < len(expression):
symbol = expression[i]
if Quadruple.__is_operator(symbol):
if len(operand):
exp.append(operand)
if symbol in ["<", ">", "=", "!"] and expression[i + 1] == "=":
symbol += "="
i += 1
elif symbol in ["|", "&"] and expression[i + 1] == symbol:
symbol += symbol
i += 1
elif symbol in ["+", "-", "*", "/", "%"] and expression[i + 1] == "=":
symbol += "="
i += 1
exp.append(symbol)
operand = ""
else:
operand += expression[i]
i += 1
if len(operand):
exp.append(operand)
return exp
# Elimina los parenteses del stack de operadores
def __sub_stack_from_parentheses(stack):
if "(" in stack:
stack.reverse()
index = stack.index("(")
sub_stack = stack[:index]
stack.reverse()
return sub_stack
return stack
# Valida si hay un operador * / % en el stack
def __another_op_mdr_in_stack(stack_operators):
sub_stack_operators = Quadruple.__sub_stack_from_parentheses(stack_operators)
return any(item in ["MUL", "DIV", "MOD"] for item in sub_stack_operators)
# Valida si hay otro operador + - en el stack
def __another_op_as_in_stack(stack_operators):
sub_stack_operators = Quadruple.__sub_stack_from_parentheses(stack_operators)
return any(item in ["ADD", "SUB"] for item in sub_stack_operators)
# Valida si hay otro operador + - * / % en el stack
def __another_op_as_mdr_in_stack(stack_operators):
sub_stack_operators = Quadruple.__sub_stack_from_parentheses(stack_operators)
return any(
item in ["MUL", "DIV", "MOD", "ADD", "SUB"] for item in sub_stack_operators
)
# Valida si hay otro operador + - * / % > < >= <= en el stack
def __another_op_as_mdr_comp_in_stack(stack_operators):
sub_stack_operators = Quadruple.__sub_stack_from_parentheses(stack_operators)
return any(
item in ["MUL", "DIV", "MOD", "ADD", "SUB", "GT", "LT", "GTE", "LTE"]
for item in sub_stack_operators
)
# Valida si hay un operador ! en el stack
def __a_not_in_stack(stack_operators):
sub_stack_operators = Quadruple.__sub_stack_from_parentheses(stack_operators)
return "NOT" in sub_stack_operators
# Valida si hay cualquier tipo de operador en el stack
def __any_op_in_stack(stack_operators):
sub_stack_operators = Quadruple.__sub_stack_from_parentheses(stack_operators)
return True if len(sub_stack_operators) else False
# Consideración al hacer una expresion de tipo NOT
def __not_consideration(stack_types):
return "BOOL" # if stack_types[-1] == "BOOL" else "ERROR"
# Manda los tipos de los operandos y el operador a la tabla semantica para validar comatibilidad
def __type_consideration(stack_types, stack_operators):
return SemanticTable.considerate(
stack_types[-2], stack_operators[-1], stack_types[-1]
)
# Genera el objeto cuadruplo con los datos del stack
def __generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
):
result_id = "T" + str(result_quadruple_id)
consideration = Quadruple.__type_consideration(stack_types, stack_operators)
operator = Symbol(
stack_operators[-1],
SemanticTable.clasify_symbol_op(stack_operators.pop()),
stack_scopes[-2],
)
operand_1 = Symbol(stack_values[-2], stack_types[-2], stack_scopes[-3])
operand_2 = Symbol(stack_values[-1], stack_types[-1], stack_scopes[-1])
quad_result = Symbol(result_id, consideration, stack_scopes[-2])
q = Quadruple(operator, operand_1, operand_2, quad_result)
del stack_types[-2:]
del stack_values[-2:]
del stack_scopes[-3:]
stack_types.append(consideration)
resulting_quads.append(q)
stack_values.append(result_id)
stack_scopes.append(q.result_id.scope)
# Genera el cuadruplo de una expresion de tipo NOT !
def __generate_not_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
):
result_id = "T" + str(result_quadruple_id)
consideration = Quadruple.__not_consideration(stack_types)
operator = Symbol(stack_operators.pop(), "not", stack_scopes[-2])
value = Symbol(stack_values.pop(), stack_types.pop(), stack_scopes.pop())
quad_result = Symbol(result_id, consideration, stack_scopes.pop())
q = Quadruple(operator, value, None, quad_result)
stack_types.append(consideration)
resulting_quads.append(q)
stack_values.append(result_id)
stack_scopes.append(q.result_id.scope)
# Genera el cuadruplo de una expresion de tipo EQ =
def __generate_assignment_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
):
consideration = Quadruple.__type_consideration(stack_types, stack_operators)
operator = Symbol(stack_operators.pop(), "assignment", stack_scopes[-2])
value = Symbol(stack_values.pop(), stack_types[-1], stack_scopes[-1])
quad_result = Symbol(stack_values.pop(), stack_types[-1], stack_scopes[-3])
stack_types.append(consideration)
q = Quadruple(operator, value, None, quad_result)
resulting_quads.append(q)
# Saca los datos de un simbolo, revisa que hay en el stack y toma acción ante ello
def evaluate_symbol(
symbol,
stack_values,
stack_operators,
stack_types,
stack_scopes,
resulting_quads,
result_quadruple_id,
):
if symbol.address_flag:
s_type = symbol.address_flag
else:
s_type = symbol.type
s_name = symbol.name
s_scope = symbol.scope
# is it is a ! operator
if s_type == "not":
stack_operators.append("NOT")
stack_scopes.append(s_scope)
# is an assignment or an assignment operator
elif s_type in ["assignment", "assignment_operation"]:
stack_operators.append(s_name)
stack_scopes.append(s_scope)
# is a value
elif s_type in SemanticTable.types:
stack_values.append(s_name)
stack_types.append(s_type)
stack_scopes.append(s_scope)
if Quadruple.__a_not_in_stack(stack_operators):
Quadruple.__generate_not_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if resulting_quads[-1].result_id.type == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
# is an operator
elif s_type in ["operation", "comparison", "matching"]:
# Multiplication, Divition and Residue cases
if s_name in ["MUL", "DIV", "MOD"]:
# There is another operator of multiplication, division or residue
if Quadruple.__another_op_mdr_in_stack(stack_operators):
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
# Addition and substraction cases
elif s_name in ["ADD", "SUB"]:
# There is another operator on the stack
if Quadruple.__another_op_as_mdr_in_stack(stack_operators):
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
# There is another operator of sum or addition
if Quadruple.__another_op_as_in_stack(stack_operators):
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
# Comparison operators case
elif s_name in ["GT", "LT", "GTE", "LTE"]:
# There is another mathematical and comparison operator on the stack
if Quadruple.__another_op_as_mdr_comp_in_stack(stack_operators):
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
# There is another mathematical operator in stack
if Quadruple.__another_op_as_mdr_in_stack(stack_operators):
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
# There is another operator of sum or addition
if Quadruple.__another_op_as_in_stack(stack_operators):
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
# matching operators case
elif s_name in ["BEQ", "BNEQ", "OR", "AND"]:
# There is any another operator on the stack
if Quadruple.__any_op_in_stack(stack_operators):
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
# There is another mathematical and comparison operator on the stack
if Quadruple.__another_op_as_mdr_comp_in_stack(stack_operators):
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
# There is another mathematical operator in stack
if Quadruple.__another_op_as_mdr_in_stack(stack_operators):
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
# There is another operator of sum or addition
if Quadruple.__another_op_as_in_stack(stack_operators):
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
stack_operators.append(s_name)
stack_scopes.append(s_scope)
# is a parentheses
elif s_type == "parentheses":
# When a ( arrives
if s_name == "OP":
stack_values.append("(")
stack_operators.append("(")
# When a ) arrives
elif s_name == "CP":
# case when there is just one value inside parenthesis example: A + (B)
if len(stack_operators) == 0:
stack_values.pop(-2)
stack_operators.pop()
else:
while stack_operators[-1] != "(":
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
stack_operators.pop()
stack_values.pop(-2)
# is an unknown character
else:
return "ERROR: type {} not found".format(s_type)
return result_quadruple_id
# Recibe lista de simbolos de la expresion
# llama a evaluación de expresión y generación cuadruplo
def arithmetic_expression(expression, result_quadruple_id):
stack_values = [] # ["A", "B"]
stack_operators = [] # ["ADD"]
stack_types = [] # ["INT", "FLT"]
stack_scopes = []
resulting_quads = []
for symbol in Quadruple.format_expression(expression):
result_quadruple_id = Quadruple.evaluate_symbol(
symbol,
stack_values,
stack_operators,
stack_types,
stack_scopes,
resulting_quads,
result_quadruple_id,
)
if type(result_quadruple_id) != int:
return result_quadruple_id
while len(stack_operators):
if Quadruple.__is_assignment(stack_operators[-1]):
Quadruple.__generate_assignment_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
else:
Quadruple.__generate_quadruple(
stack_values,
stack_operators,
result_quadruple_id,
stack_types,
stack_scopes,
resulting_quads,
)
if stack_types[-1] == "ERROR":
return "ERROR: non-compatible types"
result_quadruple_id += 1
# resulting_quads.append(result_quadruple_id)
return resulting_quads
# Si recibe una expresión de tipo string la convierte a simbolos
# NOTA : se espera que se reciban simbolos siempre
# pero se valida para evitar errores
def format_expression(expression):
response = []
if type(expression) == str:
expression = expression.replace(" ", "")
expression = Quadruple.__divide_expression(expression)
for symbol in expression:
s_type = type(symbol)
if s_type == Symbol:
response.append(symbol)
elif s_type == str:
operators = {
"+": Symbol("ADD", "operation"),
"-": Symbol("SUB", "operation"),
"*": Symbol("MUL", "operation"),
"/": Symbol("DIV", "operation"),
"%": Symbol("MOD", "operation"),
"(": Symbol("OP", "parentheses"),
")": Symbol("CP", "parentheses"),
"!": Symbol("NOT", "not"),
"=": Symbol("EQ", "assignment"),
"<": Symbol("LT", "comparison"),
">": Symbol("GT", "comparison"),
"<=": Symbol("LTE", "comparison"),
">=": Symbol("GTE", "comparison"),
"==": Symbol("BEQ", "matching"),
"!=": Symbol("BNEQ", "matching"),
"||": Symbol("OR", "matching"),
"+=": Symbol("ADDEQ", "assignment_operation"),
"-=": Symbol("SUBEQ", "assignment_operation"),
"*=": Symbol("MULEQ", "assignment_operation"),
"/=": Symbol("DIVEQ", "assignment_operation"),
"%=": Symbol("MODEQ", "assignment_operation"),
}
response.append(operators.get(symbol, Symbol(symbol, "FLT")))
return response
# Imprime cuadruplo
def print_quad(self):
if type(self.operator) == Symbol:
print("OPERATOR: ")
self.operator.print_symbol()
print()
else:
print("OPERATOR: \n{}\n".format(self.operator))
if type(self.operand_1) == Symbol:
print("OPERAND_1: ")
self.operand_1.print_symbol()
print()
else:
print("OPERAND_1: \n{}\n".format(self.operand_1))
if type(self.operand_2) == Symbol:
print("OPERAND_2: ")
self.operand_2.print_symbol()
print()
else:
print("OPERAND_2: \n{}\n".format(self.operand_2))
if type(self.result_id) == Symbol:
print("RESULT_ID:")
self.result_id.print_symbol()
print()
else:
print("RESULT_ID: \n{}\n".format(self.result_id))
def print_quads(quads, header=None):
if header:
print("----------------{}-----------------".format(header))
for q in quads:
quads[q].print_quad()
print("------------------------------------")
|
import logging
import os.path
from poap.controller import BasicWorkerThread, ThreadController
from pySOT.experimental_design import SymmetricLatinHypercube
from pySOT.strategy import SRBFStrategy
from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant
from pySOT.optimization_problems.optimization_problem import OptimizationProblem
import numpy as np
class GenericProblem(OptimizationProblem):
def __init__(self, dim, objective, scale):
self.dim = dim
self.min = 0
self.minimum = np.zeros(dim)
self.lb = -scale * np.ones(dim)
self.ub = scale * np.ones(dim)
self.int_var = np.array([])
self.cont_var = np.arange(0, dim)
self.objective = objective
self.info = str(dim) + "-dimensional objective function " + objective.__name__
def eval(self, x):
"""Evaluate the objective x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
d = float(self.dim)
return self.objective([x[0],x[1],x[2]])[0]
def pysot_cube(objective=None,scale=None, n_trials=50):
if False:
if not os.path.exists("./logfiles"):
os.makedirs("logfiles")
if os.path.exists("./logfiles/example_simple.log"):
os.remove("./logfiles/example_simple.log")
logging.basicConfig(filename="./logfiles/example_simple.log", level=logging.INFO)
num_threads = 2
max_evals = n_trials
gp = GenericProblem(dim=3, objective=objective,scale=scale)
rbf = RBFInterpolant(dim=3, lb=np.array([-scale,-scale,-scale]), ub=np.array([scale,scale,scale]), kernel=CubicKernel(), tail=LinearTail(3))
slhd = SymmetricLatinHypercube(dim=3, num_pts=2 * (3 + 1))
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = SRBFStrategy(
max_evals=max_evals, opt_prob=gp, exp_design=slhd, surrogate=rbf, asynchronous=True
)
# Launch the threads and give them access to the objective function
for _ in range(num_threads):
worker = BasicWorkerThread(controller, gp.eval)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()
return result.value
|
from selenium import webdriver
import time
driver = webdriver.Firefox(executable_path='C:\\Users\\Ольга\\Downloads\\питон\\geckodriver.exe')
driver.get("https://www.avito.ru")
element = driver.find_element_by_id("search")
element.send_keys('модем роутер') #вписать в поисковик
elemen = driver.find_element_by_class_name("suggest_item")
elemen.click()
knop = driver.find_element_by_class_name("main-select-2pf7p.main-location-3j9by") #нажимать на выбор города
knop.click()
cities = driver.find_element_by_class_name("suggest-input-3p8yi") #строчка с городом
cities.send_keys('Барнаул')
time.sleep(5)
cities.click()
new = driver.find_element_by_class_name("suggest-suggest_content-3ZSEd")
new.click()
act = driver.find_element_by_class_name("button-button-2Fo5k.button-size-m-7jtw4.button-primary-1RhOG") #строчка найти
act.click()
driver.quit()
|
ok_bags = ["shiny gold"]
# Repeat this process until no new ok bags are found
while True:
new_bags = []
for bag_color, contents in data.items():
#print(bag_color.upper(), end=": ")
for each in contents:
# Find the position of the number at the start so you can get rid of it
number = re.search("^[0-9]+ ", each)
if number is not None:
inner_bag = each[number.end():] # Just get the text from the list
if inner_bag in ok_bags and bag_color not in ok_bags:
new_bags.append(bag_color)
#print("Found " + inner_bag, end=" ")
#print("\n")
print("New bags found this iteration: " + str(new_bags))
if len(new_bags) > 0:
print("Please continue")
new_bags = []
else:
break
|
import tkinter, os
import tkinter.messagebox
from tkinter.filedialog import askopenfilename
import pyperclip, re
import hashlib
import subprocess
import threading
import time, sys
from tkinter.messagebox import showinfo
import os
import signal
import subprocess
import multiprocessing
import time
window = tkinter.Tk()
window.title("Compare hashes")
window.minsize(300, 200)
thread = None
file_labels =[]
clip_labels = []
script_labels = []
def hash_file(filename):
BUF_SIZE = 65536 # read stuff in 64kb chunks
hasher = hashlib.sha1()
with open(filename, 'rb') as file:
buf = file.read()
while len(buf) > 0:
hasher.update(buf)
buf = file.read(BUF_SIZE)
hasz = hasher.hexdigest()
return hasz
def select_files():
file_path = askopenfilename(initialdir="./", title = "Choose a file.")
if file_path:
hash_str = hash_file(file_path)
file_name = file_path.split("/")[-1]
add_label(topFrame, 0, file_labels, file_name, hash_str)
if file_labels:
button_choose.config(text=("Add another"))
def add_label(frame, col, labels_list, name, hashh="", end="no"):
labels_list.append(tkinter.Label(frame,text=name+"\n"+hashh))
if end == "no":
labels_list[-1].grid(column = col, row=len(labels_list))
elif end == "yes":
labels_list[-1].grid(column = col, row=len(labels_list)+1)
def paste_clip():
clip = pyperclip.paste()
regex = re.compile(r"[A-Fa-f0-9]{40,}|[A-Za-z0-9]{40,}")
for line in clip.splitlines():
if regex.search(line):
match = regex.search(line).group()
remaining_str = line.replace(match, "")
add_label(topFrame, 1, clip_labels, remaining_str, match)
else:
showinfo("Warning","Hash not found in \n {}".format(line))
def script():
a = subprocess.run('adb shell df -H /dev/block/mmcblk0p65', capture_output=True, shell=True, text = True)
if a.returncode != 0:
label_script.config(text=(a.stderr))
else:
available_space = a.stdout.split()[next(i for i in reversed(range(len(a.stdout.split()))) if "G" in a.stdout.split()[i])]
if int(available_space[:-1]) >= 4:
script_commands = {'boot_a':'adb shell dd if=/dev/block/mmcblk0p12 of=/sdcard/boot_a count=65536 bs=1024',
'system_a':'adb shell dd if=/dev/block/mmcblk0p14 of=/sdcard/system_a count=3145728 bs=1024',
'vendor_a':'adb shell dd if=/dev/block/mmcblk0p16 of=/sdcard/vendor_a count=1048576 bs=1024' }
for label, command in script_commands.items():
proc= subprocess.run(command, capture_output=True, shell=True, text = True)
add_label(topFrame, 2, script_labels, f"{label}: ", proc.stderr[proc.stderr.index("transferred"):proc.stderr.index("(")-1], end="yes")
time.sleep(2)
reset_script_labels()
sha_commands = {"boot_a": 'adb shell sha256sum /sdcard/boot_a',
"system_a":'adb shell sha256sum /sdcard/system_a',
"vendor_a":'adb shell sha256sum /sdcard/vendor_a' }
for label, command in sha_commands.items():
process = subprocess.run(command, capture_output=True, shell=True, text = True)
add_label(topFrame, 2, script_labels, label, process.stdout.split()[0], end="no")
rm = subprocess.run('adb shell rm /sdcard/boot_a /sdcard/system_a /sdcard/vendor_a', shell=True)
if rm.returncode != 0:
label_script.config(text=(rm.stderr))
else:
label_script.config(text=("Not enough space, failed"))
button_copy.grid(column=3, row=0)
def run_script():
print(threading.active_count())
if threading.active_count() > 1:
showinfo("Warning", "Script is still runing!")
else:
reset_script_labels()
label_script.config(text=("Please wait..."))
global thread
thread = threading.Thread(target = script)
thread.start()
def reset():
print(threading.active_count())
[flab.destroy() for flab in file_labels ]
del file_labels[:]
[clab.destroy() for clab in clip_labels ]
del clip_labels[:]
button_choose.config(text=("Chose file"))
button_copy.grid_forget()
if threading.active_count() > 1:
showinfo("Warning", "Script is still runing!")
else:
reset_script_labels()
def reset_script_labels():
label_script.config(text=(""))
[slab.destroy() for slab in script_labels ]
del script_labels[:]
def compare():
all_labels = [*file_labels, *clip_labels, *script_labels]
if len(all_labels) < 2:
showinfo("Warning","No hashes to compare.")
return
else:
hashes = [x.cget("text").split("\n")[-1] for x in all_labels]
hashes_match = " ".join([x for n, x in enumerate(hashes) if x in hashes[:n]])
for label in all_labels:
if label.cget("text").split("\n")[-1] in hashes_match:
label.config(fg="green")
else:
label.config(fg="red")
def copy_to_clipboard():
test = []
for label in script_labels:
test.append(label.cget("text").split())
test = [item for sublist in test for item in sublist]
pyperclip.copy("\n".join(test[1::2]))
def on_closing():
if threading.active_count() > 1:
showinfo("Warning", "Script is still runing!")
else:
window.destroy()
window.protocol("WM_DELETE_WINDOW", on_closing)
topFrame = tkinter.Frame(window)
topFrame.pack()
label_script = tkinter.Label(topFrame)
label_script.grid(column=2, row=1)
button_choose = tkinter.Button(topFrame, text ="Chose file", command = select_files)
button_choose.grid(column=0, row=0, padx= (0,20), pady=(10, 10))
button_clipboard = tkinter.Button(topFrame, text="Paste Clipboard", command = paste_clip)
button_clipboard.grid(column=1, row=0, padx= (0,20), pady=(10, 10))
button_script = tkinter.Button(topFrame, text="Run script", command = run_script)
button_script.grid(column=2, row=0, padx= (0,20), pady=(10, 10))
button_copy = tkinter.Button(topFrame, text="copy", command= copy_to_clipboard, bg = "SpringGreen2")
bottomFrame = tkinter.Frame(window)
bottomFrame.pack(side="bottom")
compare_button = tkinter.Button(bottomFrame, text ="Compare", command = compare, bg="SkyBlue3")
compare_button.grid(column=2, row=3,columnspan=2,padx=(100, 0), pady=(10, 10))
reset_button = tkinter.Button(bottomFrame, text ="Reset", command = reset, bg = "OrangeRed2")
reset_button.grid(column=0, row=3, padx = (10,0))
window.mainloop()
|
"""
kumquat application
"""
import typing
import logging
import inspect
import uvicorn
from kumquat.context import env_var
from kumquat.response import (
TextResponse,
JsonResponse,
SimpleResponse,
TemplateResponse,
HTMLResponse,
)
from kumquat.route import Route, Router
from kumquat.request import Request
from kumquat.exceptions import KumquatException
from kumquat._types import Method, Scope, Receive, Send
from kumquat.utils import BackgroundTask
try:
from pyngrok import ngrok
except ImportError:
ngrok = None
logger = logging.getLogger(__name__)
RouteFunc = typing.Callable[[Request, SimpleResponse], typing.Any]
def _dispatch_simple_response(
data: SimpleResponse, status_code: int, response: SimpleResponse
) -> SimpleResponse:
data.custom_headers = response.custom_headers
data.status_code = status_code
return data
def _dispatch_factory(
data: typing.Any,
status_code: int,
response: SimpleResponse,
response_class: typing.Type[SimpleResponse],
) -> typing.Callable:
return response_class(
data, headers=response.custom_headers, status_code=status_code
)
def _dispatch_lambda_factory(
response_class: typing.Type[SimpleResponse],
) -> typing.Callable:
return lambda *args: _dispatch_factory(*args, response_class=response_class)
_DISPATCH_TYPES = {
SimpleResponse: _dispatch_simple_response,
HTMLResponse: _dispatch_simple_response,
TemplateResponse: _dispatch_simple_response,
str: _dispatch_lambda_factory(TextResponse),
dict: _dispatch_lambda_factory(JsonResponse),
}
def _process_route_result(
route_result: typing.Any, response: SimpleResponse
) -> typing.Union[
SimpleResponse, TextResponse, JsonResponse, TemplateResponse, HTMLResponse,
]:
status_code = response.status_code
if isinstance(route_result, tuple):
data = route_result[0]
status_code = route_result[1]
else:
data = route_result
result: typing.Optional[typing.Callable] = _DISPATCH_TYPES.get(type(data))
if result is not None:
return result(data, status_code, response)
return TextResponse(
str(data), status_code=status_code, headers=response.custom_headers,
)
class Kumquat:
"""
kumquat web application
"""
def __init__(self, templates_path: str = "templates/"):
self.router = Router()
self.middleware_stack: typing.List[
typing.Callable[[Request, SimpleResponse], typing.Any]
] = []
env_var.set(templates_path)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
request = Request(scope, receive)
_response = SimpleResponse(b"")
path_dict, current_route = self.router.get_route(request.path, request.method)
request.path_dict = path_dict
response = await self._prepare_response(request, _response, current_route)
await self._call_middleware_stack(request, response)
await response(scope, receive, send)
@staticmethod
async def _prepare_response(
request: Request,
response: SimpleResponse,
current_route: typing.Optional[Route],
) -> SimpleResponse:
if current_route is None:
return TextResponse("Not Found", status_code=404)
if request.method not in current_route.methods:
return TextResponse("Method Not Allowed", status_code=405)
route_result: typing.Any = await current_route.func(request, response)
return _process_route_result(route_result, response)
async def _call_middleware_stack(
self, request: Request, response: SimpleResponse
) -> None:
for middleware_func in self.middleware_stack:
if inspect.iscoroutinefunction(middleware_func):
await middleware_func(request, response)
elif inspect.isfunction(middleware_func):
await (BackgroundTask(middleware_func, request, response))()
def create_route(
self, path: str, func: RouteFunc, methods: typing.Tuple[Method],
) -> typing.Optional[typing.NoReturn]:
"""
create any method route for app
:param path:
:param func:
:param methods:
:return:
"""
route = Route(path, func, methods=methods)
route_func_arg_count = route.func.__code__.co_argcount
if route_func_arg_count != 2:
raise KumquatException(
f"function <<{func.__name__}>> must take strictly 2 args"
)
self.router.add_route(route)
return None
def create_middleware(self, func: RouteFunc) -> None:
self.middleware_stack.append(func)
def middleware(self) -> typing.Callable:
"""
decorator for creating middleware
:return:
"""
def decorator(func: RouteFunc) -> typing.Callable:
self.create_middleware(func)
return func
return decorator
def get(self, path: str):
"""
decorator for creating get route
:param path:
:return:
"""
def decorator(func: RouteFunc) -> typing.Callable:
self.create_route(path, func, methods=(Method("GET"),))
return func
return decorator
def post(self, path: str):
"""
decorator for creating post route
:param path:
:return:
"""
def decorator(func: RouteFunc) -> typing.Callable:
self.create_route(path, func, methods=(Method("POST"),))
return func
return decorator
def route(self, path: str, methods: typing.Tuple[Method]):
"""
decorator for creating any method route
:param path:
:param methods:
:return:
"""
def decorator(func: RouteFunc) -> typing.Callable:
self.create_route(path, func, methods=methods)
return func
return decorator
def index(self):
"""
decorator for creating index route (path = '/')
:return:
"""
def decorator(func: RouteFunc) -> typing.Callable:
self.create_route("/", func, methods=(Method("GET"),))
return func
return decorator
def run(self, host: str = "127.0.0.1", port: int = 8000, log_level: str = "info"):
"""
start application with uvicorn
:param host:
:param port:
:param log_level:
:return:
"""
uvicorn.run(self, host=host, port=port, log_level=log_level)
def ngrok_run(self, port: int = 8000):
if ngrok is None:
raise ImportError(
"For this method you have to install pyngrok - pip install pyngrok"
)
public_url = ngrok.connect(port=port)
print(f"Server started on {public_url}")
self.run(port=port)
|
def beach(sentence):
word = []
for character in sentence:
character = character.lower()
word.append(character)
word = "".join(word)
counter = 0
while word:
if word[:3] == 'sun':
counter += 1
if word[:4] == 'fish':
counter += 1
if word[:4] == 'sand':
counter += 1
if word[:5] == 'water':
counter += 1
word = word[1:]
return counter
sentence = input()
print(beach(sentence))
|
def knapsack_dp(wt,val,W,n):
t=[[-1 for j in range(W+1)] for i in range(n+1)]
#Base Condition Initialization
for i in range(n+1):
for j in range(W+1):
if i==0 or j==0:
t[i][j] = 0
#Recursive Case
for i in range(1,n+1):
for j in range(1,W+1):
if wt[i-1]<=W:
t[i][j] = max(val[i-1]+t[i-1][j-wt[i-1]],t[i-1][j])
elif wt[i-1]>W:
t[i][j] = t[i-1][j]
return t[n][W]
W = 6
wt = [1,2,3,6]
val = [1,2,4,6]
n=4
knapsack_dp(wt,val,W,n)
|
# Generated by Django 2.1.7 on 2019-03-15 15:33
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('contest', '0007_submission_time'),
]
operations = [
migrations.AlterField(
model_name='submission',
name='time',
field=models.DateTimeField(
default=datetime.datetime(1970, 1, 1, 0, 0, tzinfo=utc)),
),
]
|
from max_number_occurence import generated_list
from decorators import run_time_decorator
def unique_elements_in_list(array):
unique_elements = [i for i in set(array)]
return unique_elements
list1 = generated_list(10000)
list2 = generated_list(10000)
def common_elments_in_lists(first_list, second_list):
common_list = []
for i in first_list:
if i in second_list and i not in common_list:
common_list.append(i)
return common_list
def common_elments_in_lists_set(first_list, second_list):
common_list = set()
set_list = set(second_list)
for i in first_list:
if i in set_list and i not in common_list:
common_list.add(i)
return common_list
@run_time_decorator(30)
def run_common_elements_in_list(list1, list2):
common_elments_in_lists(list1, list2)
@run_time_decorator(30)
def run_common_elements_in_list_set(list1, list2):
common_elments_in_lists_set(list1, list2)
if __name__ == '__main__':
print unique_elements_in_list([1, 2, 3, 3, 4, 4])
common_elments_in_lists(list1, list2)
common_elments_in_lists_set(list1, list2)
run_common_elements_in_list(list1, list2)
run_common_elements_in_list_set(list1, list2)
|
from tuneup.horserace import latex_horse_race
from tuneup.trivariatesingleobjectivesolvers.trivariateboxsolvers import GOOD_SOLVERS, sigopt_cube
from tuneup.trivariateobjectives.trivariateboxobjectives import OBJECTIVES
from pprint import pprint
import random
def race_specification(debug:bool):
solvers = GOOD_SOLVERS + [sigopt_cube]
objectives = OBJECTIVES
objective_thinning = 3 # e.g. if 3 we use every 3rd objective, on average.
max_thresholds = 5 if debug else 20
n_outer_repeat = 1000 if not debug else 5
n_threshold_repeat = 5 if not debug else 1 # Number of times to call each solver when setting scoring scale
n_trials = 50 if not debug else 10 # Number of evaluations of the objective function
n_inner_repeat = 100 if not debug else 2 # Number of times to run the horse race
max_objectives = 2 if debug else 10
objectives = dict(([(k, v) for k, v in objectives.items() if random.choice(range(objective_thinning))==0][:max_objectives]))
threshold_trials = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512][:max_thresholds]
spec = {'objectives': objectives,
'solvers': solvers,
'threshold_trials': threshold_trials,
'n_outer_repeat': n_outer_repeat,
'n_threshold_repeat': n_threshold_repeat,
'n_trials': n_trials,
'n_inner_repeat': n_inner_repeat,
'solvers_for_thresholds': GOOD_SOLVERS}
return spec
if __name__=='__main__':
spec = race_specification(debug=False)
pprint(spec)
latex_horse_race(**spec)
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
import unittest
from ask_sdk_model import RequestEnvelope, Context, User, Device
from ask_sdk_model.interfaces.system import SystemState
from ask_sdk_core.exceptions import PersistenceException
from ask_sdk_dynamodb.partition_keygen import (
user_id_partition_keygen, device_id_partition_keygen)
class TestPartitionKeyGenerators(unittest.TestCase):
def setUp(self):
self.request_envelope = RequestEnvelope()
self.context = Context()
self.system = SystemState()
self.user = User()
self.device = Device()
def test_valid_user_id_partition_keygen(self):
self.user.user_id = "123"
self.system.user = self.user
self.context.system = self.system
self.request_envelope.context = self.context
assert user_id_partition_keygen(self.request_envelope) == "123", (
"User Id Partition Key Generation retrieved wrong user id from "
"valid request envelope")
def test_user_id_partition_keygen_raise_error_when_request_envelope_null(self):
with self.assertRaises(PersistenceException) as exc:
user_id_partition_keygen(request_envelope=None)
assert "Couldn't retrieve user id from request envelope" in str(
exc.exception), (
"User Id Partition Key Generation didn't throw exception when "
"null request envelope is provided")
def test_user_id_partition_keygen_raise_error_when_context_null(self):
with self.assertRaises(PersistenceException) as exc:
user_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve user id from request envelope" in str(
exc.exception), (
"User Id Partition Key Generation didn't throw exception when "
"null context provided in request envelope")
def test_user_id_partition_keygen_raise_error_when_system_null(self):
self.request_envelope.context = self.context
with self.assertRaises(PersistenceException) as exc:
user_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve user id from request envelope" in str(
exc.exception), (
"User Id Partition Key Generation didn't throw exception when "
"null system provided in context of "
"request envelope")
def test_user_id_partition_keygen_raise_error_when_user_null(self):
self.context.system = self.system
self.request_envelope.context = self.context
with self.assertRaises(PersistenceException) as exc:
user_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve user id from request envelope" in str(
exc.exception), (
"User Id Partition Key Generation didn't throw exception when "
"null user provided in context.system of "
"request envelope")
def test_valid_device_id_partition_keygen(self):
self.device.device_id = "123"
self.system.device = self.device
self.context.system = self.system
self.request_envelope.context = self.context
assert device_id_partition_keygen(self.request_envelope) == "123", (
"Device Id Partition Key Generation retrieved wrong device id "
"from valid request envelope")
def test_device_id_partition_keygen_raise_error_when_request_envelope_null(self):
with self.assertRaises(PersistenceException) as exc:
device_id_partition_keygen(request_envelope=None)
assert "Couldn't retrieve device id from request envelope" in str(
exc.exception), (
"Device Id Partition Key Generation didn't throw exception when "
"null request envelope is provided")
def test_device_id_partition_keygen_raise_error_when_context_null(self):
with self.assertRaises(PersistenceException) as exc:
device_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve device id from request envelope" in str(
exc.exception), (
"Device Id Partition Key Generation didn't throw exception when "
"null context provided in request envelope")
def test_device_id_partition_keygen_raise_error_when_system_null(self):
self.request_envelope.context = self.context
with self.assertRaises(PersistenceException) as exc:
device_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve device id from request envelope" in str(
exc.exception), (
"Device Id Partition Key Generation didn't throw exception when "
"null system provided in context of "
"request envelope")
def test_device_id_partition_keygen_raise_error_when_device_null(self):
self.context.system = self.system
self.request_envelope.context = self.context
with self.assertRaises(PersistenceException) as exc:
device_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve device id from request envelope" in str(
exc.exception), (
"Device Id Partition Key Generation didn't throw exception when "
"null device provided in context.system of "
"request envelope")
def tearDown(self):
self.request_envelope = None
self.context = None
self.system = None
self.user = None
self.device = None
|
class Decision(object):
def __init__(self, exits={}):
self.exits = exits
self.template = 'decision'
|
"""
1.Logical Operators
//- Floor operator- division that results into a whole number
%- modlus
**- exponent
2.Comparision operators
>,<,==,!=,
"""
# x=34
# y=65
# print(x+y)
# print(x/y)
# print(x%y)
# print(x**y)
# print(y//x)
# print(y/x)
# x=int(input("Enter a number:"))
# y=int(input("Enter a number:"))
# z= x+y
# print(z)
# a=12
# b=13
# print("a > b", a>b)
# print("a < b", a<b)
# print("a == b", a==b)
# print("a != b", a!=b)
#LOGICAL OPERATORS
"""
A B AND OR
true True True True
True False false true
False True false true
False False False False
"""
m= True
n=False
print("M and N", m and n)
print("M or N", m or n)
print(" not M is", not m)
""" ASSIGNMENT OPERATIONS
=
+= Add and assign
-= Subtract and assign
"""
c= 190
print(c)
c+=40 #c=c+10
print(c)
c-=50 #c=c-50
print(c)
c*=4 #c= c*4
print(c)
#Membership Operators
#identity operators
#bit operators
#Arithmetic
x= int(input("Enter a Number"))
y=int(input("Enter a Second Number"))
z=x+y
print("Sum is:",z)
|
from spack import *
import re
import os, sys
from glob import glob
import fnmatch
class FwliteToolConf(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('scram')
depends_on('gmake-toolfile')
if sys.platform == 'darwin':
depends_on('cfe-bindings')
else:
depends_on('llvm-lib-toolfile')
depends_on('gcc-compiler-toolfile')
depends_on('root-toolfile')
depends_on('intel-tbb-toolfile')
depends_on('tinyxml-toolfile')
depends_on('tinyxml2-toolfile')
depends_on('clhep-toolfile')
depends_on('md5-toolfile')
depends_on('python-toolfile')
depends_on('vdt-toolfile')
depends_on('boost-toolfile')
depends_on('libsigcpp-toolfile')
depends_on('xrootd-toolfile')
depends_on('cppunit-toolfile')
depends_on('xerces-c-toolfile')
depends_on('expat-toolfile')
depends_on('sqlite-toolfile')
depends_on('bzip2-toolfile')
depends_on('gsl-toolfile')
depends_on('hepmc-toolfile')
depends_on('libpng-toolfile')
depends_on('giflib-toolfile')
depends_on('openssl-toolfile')
depends_on('pcre-toolfile')
depends_on('zlib-toolfile')
depends_on('xz-toolfile')
depends_on('libtiff-toolfile')
depends_on('libjpeg-turbo-toolfile')
depends_on('libxml2-toolfile')
depends_on('bzip2-toolfile')
depends_on('fireworks-geometry-toolfile')
depends_on('uuid-toolfile')
def install(self, spec, prefix):
with working_dir(prefix, create=True):
mkdirp('tools/selected')
mkdirp('tools/available')
for dep in spec.dependencies():
xmlfiles = glob(join_path(dep.prefix.etc, 'scram.d', '*.xml'))
for xmlfile in xmlfiles:
install(xmlfile, 'tools/selected')
|
import writeBack
import alu
import cache
import issue
import fetch
import simulator
import memory
from helpers import SetUp
gobal_cycle = 0
class simClass:
#instruction
# R = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
#
# postMemBuff = [-1, -1]
# postALUBuff = [-1, -1]
# preMemBuff = [-1, -1]
# preALUBuff = [-1, -1]
# preIssueBuff = [-1, -1, -1, -1]
def __init__(self, instructions, opcode, opcodeStr, dataval,
address, arg1, arg2, arg3, arg1Str, arg2Str, arg3Str,
numInstructs, destReg, src1Reg, src2Reg ):
self.instruction = instructions
self.opcode = opcode
self.dataval = dataval
self.address = address
self.numInstructions = numInstructs
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.arg1Str = arg1Str
self.arg2Str = arg2Str
self.arg3Str = arg3Str
self.destReg = destReg
self.src1Reg = src1Reg
self.src2Reg = src2Reg
self.opcodeStr = opcodeStr
self.PC = 96
self.cycle = 1
self.cycleList = [0]
self.R = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.postMemBuff = [-1, -1] #first num is value, second is instr index
self.postALUBuff = [-1, -1] #first num is value, second is instr index
self.preMemBuff = [-1, -1]
self.preALUBuff = [-1, -1]
self.preIssueBuff = [-1, -1, -1, -1]
#TESTING end
# self.preIssueBuff = [0, 1, 2, 3]
self.WB = writeBack.WriteBack(self.R, self.postMemBuff, self.postALUBuff, destReg)
#self.WB = WriteBack(self.R, self.postMemBuff, self.postALUBuff, destReg)
self.cache = cache.Cache(numInstructs, instructions, dataval, address)
self.ALU = alu.ALU(self.R, self.postALUBuff, self.preALUBuff, opcodeStr, arg1, arg2, arg3)
#self.cache = cache.Cache(numInstructs, instructions, dataval, address)
#order important? had cache after alu but slide has this order (cache then alu)
self.MEM = memory.Memory(self.R, self.postMemBuff, self.preMemBuff, opcodeStr, arg1, arg2,
arg3, dataval, address, self.numInstructions, self.cache, self.cycleList)
self.issue = issue.Issue(self.preIssueBuff, self.preMemBuff, self.preALUBuff,
instructions, opcode, opcodeStr, dataval, address, arg1, arg2, arg3,
numInstructs, destReg, src1Reg, src2Reg)
self.fetch = fetch.Fetch(self.preIssueBuff, instructions, opcode, opcodeStr, dataval, address, arg1, arg2, arg3,
numInstructs, destReg, src1Reg, src2Reg)
self.outputFileName = SetUp.get_output_filename()
#print(self.cache)
def run(self):
go = True
while go:
# self.WB.run()
# self.ALU.run()
# #self.MEM.run()
# self.issue.run()
# #go = self.fetch.run()
#
# self.printState()
# self.cycle += 1
# print(f"cycle:{self.cycle}")
#for testing purposes
#update buffers from WB
# [self.R, self.postMemBuff, self.postALUBuff] = self.WB.run()
#self.WB.run()
[self.R, self.postMemBuff, self.postALUBuff, self.destReg] = self.WB.run()
#update Buffers from ALU
[self.preALUBuff, self.postALUBuff] = self.ALU.run()
[self.preMemBuff, self.postMemBuff] = self.MEM.run()
[self.preMemBuff, self.preALUBuff, self.preIssueBuff] = self.issue.run()
self.preIssueBuff = self.fetch.run()
# send new buffers values to WB
#uncommented originally and line 106
self.WB = writeBack.WriteBack(self.R, self.postMemBuff, self.postALUBuff, self.destReg)
self.printState()
self.cycle += 1
if self.cycle > 22:
go = False
def printState(self):
"""
print(f"Cycle:{self.cycle}")
#
print("Pre-Issue Buffer:")
print(f"\tEntry 0:\t{self.opcodeStr[self.preIssueBuff[0]]}"
f"{self.arg1Str[self.preIssueBuff[0]]}{self.arg2Str[self.preIssueBuff[0]]}"
f"{self.arg3Str[self.preIssueBuff[0]]}")
#outFile.write("\tEntry 0:\t" + self.opcodeStr[self.preIssueBuff[0]] + self.arg1Str[self.preIssueBuff[0]] +
# self.arg2Str[self.preIssueBuff[0]] + self.arg3Str[self.preIssueBuff[0]] + "\n")
#
print(f"\tEntry 1:\t{self.opcodeStr[self.preIssueBuff[1]]}"
f"{self.arg1Str[self.preIssueBuff[1]]}{self.arg2Str[self.preIssueBuff[1]]}"
f"{self.arg3Str[self.preIssueBuff[1]]}")
#
print(f"\tEntry 2:\t{self.opcodeStr[self.preIssueBuff[2]]}"
f"{self.arg1Str[self.preIssueBuff[2]]}{self.arg2Str[self.preIssueBuff[2]]}"
f"{self.arg3Str[self.preIssueBuff[2]]}")
#
print(f"\tEntry 3:\t{self.opcodeStr[self.preIssueBuff[3]]}"
f"{self.arg1Str[self.preIssueBuff[3]]}{self.arg2Str[self.preIssueBuff[3]]}"
f"{self.arg3Str[self.preIssueBuff[3]]}")
#
#going into ALU
print("Pre_ALU Queue:")
print(f"\tEntry 0:\t{self.opcodeStr[self.preALUBuff[0]]}"
f"{self.arg1Str[self.preALUBuff[0]]}{self.arg2Str[self.preALUBuff[0]]}"
f"{self.arg3Str[self.preALUBuff[0]]}")
#
#going into ALU
print(f"\tEntry 1:\t{self.opcodeStr[self.preALUBuff[1]]}"
f"{self.arg1Str[self.preALUBuff[1]]}{self.arg2Str[self.preALUBuff[1]]}"
f"{self.arg3Str[self.preALUBuff[1]]}")
#
# #going into WB self.postALUBuff shouldx be [0] or [1] ??
print("Post_ALU Queue:")
print(f"\tEntry 0:\t{self.opcodeStr[self.postALUBuff[1]]}"
f"{self.arg1Str[self.postALUBuff[1]]}{self.arg2Str[self.postALUBuff[1]]}"
f"{self.arg3Str[self.postALUBuff[1]]}")
#
# # print(f"\tEntry 1:\t{self.opcodeStr[self.preALUBuff[1]]}"
# # f"{self.arg1Str[self.preALUBuff[1]]}{self.arg2Str[self.preALUBuff[1]]}"
# # f"{self.arg3Str[self.preALUBuff[1]]}")
#NEED TO ADD TO outWrite section also
print("Pre_MEM Queue:")
print(f"\tEntry 0:\t{self.opcodeStr[self.preMemBuff[0]]}"
f"{self.arg1Str[self.preMemBuff[0]]}{self.arg2Str[self.preMemBuff[0]]}"
f"{self.arg3Str[self.preMemBuff[0]]}")
print(f"\tEntry 1:\t{self.opcodeStr[self.preMemBuff[1]]}"
f"{self.arg1Str[self.preMemBuff[1]]}{self.arg2Str[self.preMemBuff[1]]}"
f"{self.arg3Str[self.preMemBuff[1]]}")
#
#print(f"(simClass187)self.postMemBuff = {self.postMemBuff}")
#
print("Post_MEM Queue:") #index number in postMemBuff[1]
print(f"\tEntry 0:\t{self.opcodeStr[self.postMemBuff[1]]}"
f"{self.arg1Str[self.postMemBuff[1]]}{self.arg2Str[self.postMemBuff[1]]}"
f"{self.arg3Str[self.postMemBuff[1]]}")
print(f"self.R{self.R}")
#
LRU = 0
#
#
#
# # return [True, self.cacheSets[setNum][(self.lruBit[setNum] + 1) % 2][dataWord + 3]]
#
#
print("Cache")
print(f"Set 0: LRU = {self.cache.lruBit[0]}")
print(f"\tEntry 0:{self.fetch.cache.cacheSets[0][0]}")
#print(f"\tEntry 0:{self.fetch.cache.}")
print(f"\tEntry 1:{self.fetch.cache.cacheSets[0][1]}")
print(f"Set 1: LRU = {self.cache.lruBit[1]}")
print(f"\tEntry 0:{self.fetch.cache.cacheSets[1][0]}")
print(f"\tEntry 1:{self.fetch.cache.cacheSets[1][1]}")
print(f"Set 2: LRU = {self.cache.lruBit[2]}")
print(f"\tEntry 0:{self.fetch.cache.cacheSets[2][0]}")
print(f"\tEntry 1:{self.fetch.cache.cacheSets[2][1]}")
print(f"Set 3: LRU = {self.cache.lruBit[3]}")
print(f"\tEntry 0:{self.fetch.cache.cacheSets[3][0]}")
print(f"\tEntry 1:{self.fetch.cache.cacheSets[3][1]}")
# print(f"\tEntry 0:[({self.fetch.cache.cacheSets[0]},{self.fetch.cache.cacheSets[1]},"
# f"{self.fetch.cache.cacheSets[2]})<{self.fetch.cache.cacheSets[3]},"
# f"{self.fetch.cache.cacheSets[0]}>]")
#outFile.write("Cache\n")
#outFile.write("Set 0: LRU = " + self.cache.lruBit[0])
#outFile.write("\n\tEntry 0:" + str(self.fetch.cache.cacheSets[0][0]))
#outFile.write("\n\tEntry 1:" + str(self.fetch.cache.cacheSets[0][1]))
#outFile.write("\nSet 1: LRU = " + self.cache.lruBit[1])
#outFile.write("\n\tEntry 0:" + str(self.fetch.cache.cacheSets[1][0]))
#outFile.write("\n\tEntry 1:" + str(self.fetch.cache.cacheSets[1][1]))
#outFile.write("\nSet 2: LRU = " + self.cache.lruBit[2])
#outFile.write("\n\tEntry 0:" + str(self.fetch.cache.cacheSets[2][0]))
#outFile.write("\n\tEntry 1:" + str(self.fetch.cache.cacheSets[2][1]))
#outFile.write("\nSet 3: LRU = " + self.cache.lruBit[3])
#outFile.write("\n\tEntry 0:" + str(self.fetch.cache.cacheSets[3][0]))
#outFile.write("\n\tEntry 1:" + str(self.fetch.cache.cacheSets[3][1]))
print(f"\ndata:\n")
for i in range(len(self.dataval)):
if (i % 8 == 0 and i!= 0) or i == len(self.dataval):
print(f" ")
if i % 8 == 0:
print(f"{self.address[i + self.numInstructions]} : {self.dataval[i]}", end = "")
if i % 8 != 0:
print(f"\t {self.dataval[i]}", end ="" )
print(f"")
"""
"""
outFile.write("\ndata:\n")
outStr = ""
for i in range(len(self.dataval)):
# ****
# print("Entering SECOND if loop")
if (i % 8 == 0 and i != 0 or i == len(self.dataval)):
outFile.write(outStr + "\n")
if i % 8 == 0:
outStr = str(self.address[i + self.numInstructions]) + \
":" + str(self.dataval[i])
# print(outStr)
if (i % 8 != 0):
outStr = outStr + "\t" + str(self.dataval[i])
# print(outStr)
outFile.write(outStr + "\n")
"""
#testing
#print(f"{range(len(self.instruction))}")
outputFileName = SetUp.get_output_filename()
with open(outputFileName + "_pipeline.txt", 'a') as outFile:
breakChanger = []
#ALSO LINE ~378 for undo of this
for i in range(len(self.instruction)):
if self.opcodeStr[i] == "BREAK":
self.opcodeStr[i] = ""
outFile.write("---------------------\n")
outFile.write(
"cycle:" + str(self.cycle) + "\t" + "\n")
outFile.write("Pre-Issue Buffer:\n")
outFile.write("\tEntry 0:\t" + self.opcodeStr[self.preIssueBuff[0]] + self.arg1Str[self.preIssueBuff[0]] +
self.arg2Str[self.preIssueBuff[0]] + self.arg3Str[self.preIssueBuff[0]] + "\n")
outFile.write("\tEntry 1:\t" + self.opcodeStr[self.preIssueBuff[1]] + self.arg1Str[self.preIssueBuff[1]] +
self.arg2Str[self.preIssueBuff[1]]+ self.arg3Str[self.preIssueBuff[1]] + "\n")
outFile.write("\tEntry 2:\t" + self.opcodeStr[self.preIssueBuff[2]] + self.arg1Str[self.preIssueBuff[2]] +
self.arg2Str[self.preIssueBuff[2]] + self.arg3Str[self.preIssueBuff[2]] + "\n")
outFile.write("\tEntry 3:\t" + self.opcodeStr[self.preIssueBuff[3]] + self.arg1Str[self.preIssueBuff[3]] +
self.arg2Str[self.preIssueBuff[3]] + self.arg3Str[self.preIssueBuff[3]] + "\n")
outFile.write("Pre_ALU Queue:\n")
outFile.write("\tEntry 0:\t" + self.opcodeStr[self.preALUBuff[0]] + self.arg1Str[self.preALUBuff[0]] +
self.arg2Str[self.preALUBuff[0]] + self.arg3Str[self.preALUBuff[0]] + "\n")
outFile.write("\tEntry 1:\t" + self.opcodeStr[self.preALUBuff[1]] + self.arg1Str[self.preALUBuff[1]] +
self.arg2Str[self.preALUBuff[1]] + self.arg3Str[self.preALUBuff[1]] + "\n")
outFile.write("Post_ALU Queue:\n")
outFile.write("\tEntry 0:\t" + self.opcodeStr[self.postALUBuff[1]] + self.arg1Str[self.postALUBuff[1]] +
self.arg2Str[self.postALUBuff[1]] + self.arg3Str[self.postALUBuff[1]] + "\n")
#outFile.write("\n")
outFile.write("Pre_MEM Queue:\n")
outFile.write("\tEntry 0:\t" + self.opcodeStr[self.preMemBuff[0]] + self.arg1Str[self.preMemBuff[0]] +
self.arg2Str[self.preMemBuff[0]] + self.arg3Str[self.preMemBuff[0]] + "\n")
outFile.write("\tEntry 1:\t" + self.opcodeStr[self.preMemBuff[1]] + self.arg1Str[self.preMemBuff[1]] +
self.arg2Str[self.preMemBuff[1]] + self.arg3Str[self.preMemBuff[1]] + "\n")
outFile.write("Post_MEM Queue:\n") #index in postMemBuff[0]
outFile.write("\tEntry 0:\t" + self.opcodeStr[self.postMemBuff[1]] + self.arg1Str[self.postMemBuff[1]] +
self.arg2Str[self.postMemBuff[1]] + self.arg3Str[self.postMemBuff[1]] + "\n")
# NEED TO ADD TO outWrite section also
#print("Pre_MEM Queue:")
#print(f"\tEntry 0:\t{self.opcodeStr[self.preMemBuff[0]]}"
# f"{self.arg1Str[self.preMemBuff[0]]}{self.arg2Str[self.preMemBuff[0]]}"
# f"{self.arg3Str[self.preMemBuff[0]]}")
#print(f"\tEntry 1:\t{self.opcodeStr[self.preMemBuff[1]]}"
# f"{self.arg1Str[self.preMemBuff[1]]}{self.arg2Str[self.preMemBuff[1]]}"
# f"{self.arg3Str[self.preMemBuff[1]]}")
#print("Post_MEM Queue:")
#print(f"\tEntry 0:\t{self.opcodeStr[self.postMemBuff[0]]}"
# f"{self.arg1Str[self.postMemBuff[0]]}{self.arg2Str[self.postMemBuff[0]]}"
# f"{self.arg3Str[self.postMemBuff[0]]}")
outFile.write("registers:\n")
outStr = "r00:"
for i in range(0, 8):
outStr = outStr + "\t" + str(self.R[i])
outFile.write(outStr + "\n")
outStr = "r08: "
for i in range(0, 8):
outStr = outStr + "\t" + str(self.R[i + 8])
outFile.write(outStr + "\n")
outStr = "r16: "
for i in range(0, 8):
outStr = outStr + "\t" + str(self.R[i + 16])
outFile.write(outStr + "\n")
outStr = "r24: "
for i in range(0, 8):
outStr = outStr + "\t" + str(self.R[i + 24])
outFile.write(outStr + "\n")
outFile.write("Cache\n")
outFile.write("Set 0: LRU = " + str(self.cache.lruBit[0]))
outFile.write("\n\tEntry 0:" + str(self.fetch.cache.cacheSets[0][0]))
outFile.write("\n\tEntry 1:" + str(self.fetch.cache.cacheSets[0][1]))
outFile.write("\nSet 1: LRU = " + str(self.cache.lruBit[1]))
outFile.write("\n\tEntry 0:" + str(self.fetch.cache.cacheSets[1][0]))
outFile.write("\n\tEntry 1:" + str(self.fetch.cache.cacheSets[1][1]))
outFile.write("\nSet 2: LRU = " + str(self.cache.lruBit[2]))
outFile.write("\n\tEntry 0:" + str(self.fetch.cache.cacheSets[2][0]))
outFile.write("\n\tEntry 1:" + str(self.fetch.cache.cacheSets[2][1]))
outFile.write("\nSet 3: LRU = " + str(self.cache.lruBit[3]))
outFile.write("\n\tEntry 0:" + str(self.fetch.cache.cacheSets[3][0]))
outFile.write("\n\tEntry 1:" + str(self.fetch.cache.cacheSets[3][1]))
# print("Cache")
# print(f"Set 0: LRU = {LRU}")
# print(f"\tEntry 0:{self.fetch.cache.cacheSets[0][0]}")
# print(f"\tEntry 1:{self.fetch.cache.cacheSets[0][1]}")
outFile.write("\ndata:\n")
outStr = ""
for i in range(len(self.dataval)):
# ****
# print("Entering SECOND if loop")
if (i % 8 == 0 and i != 0 or i == len(self.dataval)):
outFile.write(outStr + "\n")
if i % 8 == 0:
outStr = str(self.address[i + self.numInstructions]) + \
":" + str(self.dataval[i])
# print(outStr)
if (i % 8 != 0):
outStr = outStr + "\t" + str(self.dataval[i])
# print(outStr)
outFile.write(outStr + "\n")
outFile.close()
#ALSO line ~265 first part of this
for i in range(len(self.instruction)):
if self.opcodeStr[i] == "":
self.opcodeStr[i] = "BREAK"
|
from django import forms
from django.forms import modelformset_factory
from . import models
class NewTicketForm(forms.ModelForm):
class Meta:
model = models.Ticket
fields = ['title', 'category', 'text']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for fields in self.fields:
self.fields[fields].widget.attrs.update({'class': 'form-control'})
self.fields['title'].label = "عنوان"
self.fields['category'].label = "دسته بندی"
self.fields['text'].label = "متن مشکل"
class MessageForm(forms.ModelForm):
class Meta:
model = models.Messages
fields = ['body']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['body'].widget.attrs.update({'class': 'form-control', 'rows':5})
self.fields['body'].label = "پیام"
EditTicketFormSet = modelformset_factory(models.Ticket, fields=('close',), extra=0)
|
import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super(DecoderRNN, self).__init__()
# SSSSembedding layer that turns words into a vector of a specified size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
# Setting dropout
dropout = 0 if num_layers == 1 else 0.2
# LSTM network
self.lstm = nn.LSTM(input_size=embed_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True, dropout=dropout)
# FC layer (hidden_size -> vocab_size)
self.hid2pred = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
# create embedded word vectors for each word in a sentence
embeddings = self.word_embeddings(captions[:, :-1])
# Concatenate the features and caption inputs
inputs = torch.cat((features.unsqueeze(1), embeddings), 1)
# Run the inputs (captions + CNN embessing) through the LSTM
lstm_out, _ = self.lstm(inputs, None)
# Use linear layer to "reshape" dims to vocab_size and make preds
out = self.hid2pred(lstm_out)
return out
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
words = []
embeddings = inputs
hidden_state = None
idx = 0
while (idx != 1) and len(words) < max_len:
lstm_out, hidden_state = self.lstm(embeddings, hidden_state)
out = self.hid2pred(lstm_out)
_, idx = torch.topk(out.squeeze(), k=1)
words.append(idx.cpu().item())
embeddings = self.word_embeddings(idx).unsqueeze(1)
return words
|
# Generated by Django 3.2.3 on 2021-06-10 05:47
from django.db import migrations, models
import django.db.models.deletion
import pizza_app.models
class Migration(migrations.Migration):
dependencies = [
('pizza_app', '0004_auto_20210610_0105'),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45, validators=[pizza_app.models.ValidarLongitudMinima])),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45, validators=[pizza_app.models.ValidarLongitudMinima])),
],
),
migrations.CreateModel(
name='Comuna',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45, validators=[pizza_app.models.ValidarLongitudMinima])),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comunas', to='pizza_app.city')),
],
),
migrations.AddField(
model_name='city',
name='region',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cities', to='pizza_app.region'),
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alias', models.CharField(max_length=45, validators=[pizza_app.models.ValidarLongitudMinima])),
('street', models.CharField(max_length=45, validators=[pizza_app.models.ValidarLongitudMinima])),
('number', models.IntegerField()),
('appartment', models.CharField(default=None, max_length=15)),
('floor', models.CharField(default=None, max_length=15)),
('comments', models.CharField(max_length=45, validators=[pizza_app.models.ValidarLongitudMinima])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('comuna', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to='pizza_app.comuna')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_addresses', to='pizza_app.user')),
],
),
]
|
import math
import matplotlib.pyplot as pp
import sys
if (len(sys.argv) < 2):
print('no data file\n')
exit()
fp = open(sys.argv[1],'r')
line = fp.readline()
gdiff = []
values = fp.readline().strip().split(',')
xp = float(values[0])
yp = float(values[1])
zp = float(values[2])
line = fp.readline().strip()
while line:
values = line.split(',')
x = float(values[0])
y = float(values[1])
z = float(values[2])
gdiff.append([x-xp, y-yp, z-zp])
if ( (x-xp) != 0 ):
print('{:5.2f} {:5.2f} {:5.2f} {:5.2f} {:5.2f} {:5.2f}'.format(1,round((y-yp)/(x-xp),2), round((z-zp)/(x-xp),2), round(x-xp,2),round(y-yp,2),round(z-zp,2)))
elif ((x-xp)==0 and (y-yp)==0 and (z-zp)==0):
print(round(x,2),round(y,2),round(z,2))
else:
print(round(x-xp,2), round(y-yp,2), round(z-zp,2))
xp = x
yp = y
zp = z
line = fp.readline().strip()
|
"""
Copyright 2019 Sangkug Lym
Copyright 2019 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch.nn as nn
# from resnet32_flat import ResNet32
# from feature_size_cifar import *
# from scripts_util import *
LARGE = 99999999999999
PFLOPS = 1000 *1000 *1000 *1000 *1000 /2 # MUL + ADD
MFLOPS = 1000 *1000 /2 # MUL + ADD
WORD = 2
PB = 1024*1024*1024*1024*1024
MB = 1024*1024
reconf_int = 10
mini_batch = 128
epochs = 200
cifar = 50000. / mini_batch
imgnet = 1281167. / mini_batch
iters_per_epoch = cifar
bn = True
# inf_list = glob.glob(args.in_dir)
log_tot = {'coeff':[], 'train_cost':[], 'bn_cost':[], 'best_acc':[], 'inf_cost':[]}
""" Calcuate below metrics from the network architecture
# 1. training cost
# 2. inference cost
# 3. memory accesses in BN layers
# 4. activation size
# 5. model size
# 6. output channels
"""
def getTrainingCost(model, arch, gating=False, base=False, verbose=True):
fmap = cifar_feature_size[arch]
layer_size_dict = {}
module_list = [m for m in model._modules['module'].modules()][1:]
print('Calculating FLOPS')
for name, module in zip(model._modules['module']._modules, module_list):
if 'conv' in name or 'fc' in name:
size_to_add = list(module.weight.shape)
if 'fc' in name:
size_to_add.reverse()
layer_size_dict[name] = size_to_add
print(name , '-->', layer_size_dict[name])
train_cost_acc, bn_cost_acc, inf_cost_acc = 0, 0, 0
out_act, out_chs_tot, model_size = 0, 0, 0
arch = layer_size_dict
for lyr_name, lyr in arch.items():
out_chs = 0
if base:
dims = len(lyr)
if dims == 4:
k, c, r, s = lyr[0], lyr[1], lyr[2], lyr[3]
pad = 0 if (lyr[2] == 1) else 2
out_chs = k
if arch == 'resnet50' and lyr_name == 'conv1':
pad = 3
else:
k, c = lyr[0], lyr[1]
#print("base, {}, {}, {}, {}, {}, {}".format(lyr_name, c, k, r, s, pad))
else:
if lyr['cfg'] == None:
continue
dims = len(lyr['cfg'])
if gating:
c = lyr['cfg'][1] if (lyr['gt'][1] == None) else lyr['gt'][1]
k = lyr['cfg'][0] if (lyr['gt'][0] == None) else lyr['gt'][0]
out_chs = k
else:
c = lyr['cfg'][1]
k = lyr['cfg'][0]
out_chs = k
if dims == 4:
r, s = lyr['cfg'][2], lyr['cfg'][3]
pad = 0 if (r == 1) else 2
if arch == 'resnet50' and lyr_name == 'conv1':
pad = 3
if dims == 4:
# Inference cost = (CRS)(K)(PQ)
inf_cost = (c * r * s) * k * (fmap[lyr_name][1]**2)
# Train cost = Forward + WGRAD + DGRAD
# = (CRS)(K)(NPQ) + (CRS)(NPQ)(K) + (NHW)(KRS)(C)
# = (N x inference_cost x2) + (NHW)(KRS)(C)
train_cost = mini_batch *(2*inf_cost + ((fmap[lyr_name][0]+pad)**2) * (k * r * s) * c)
#train_cost = mini_batch *(2*inf_cost + fmap[lyr_name][0]**2 * (k * r * s) * c)
# BN cost = (3 x NKPQ) + (5 x NKPQ)
bn_cost = 8 * (mini_batch * k * (fmap[lyr_name][1]**2))
model_size += float(r*s*c*k*WORD)/MB
if bn:
model_size += float(k*WORD)/MB
else:
# Forward: NCK
inf_cost = k * c
train_cost = k * c * mini_batch * 3
bn_cost = 0
model_size += float(c*k*WORD)/MB
model_size += float(k*WORD)/MB
if 'fc' not in lyr_name:
out_act += out_chs * (fmap[lyr_name][1]**2)
out_chs_tot += out_chs
if verbose:
if 'conv' in lyr_name:
print_name = lyr_name.split('conv')[1]
else:
print_name = lyr_name
print("{}, {}, {}, {}".format(print_name, train_cost, inf_cost, out_chs))
inf_cost_acc += inf_cost
train_cost_acc += train_cost
bn_cost_acc += bn_cost
#print("{}, {}, {}, {}, {}, {}, {}".format(lyr_name, c, k, r, s, pad, inf_cost))
train_cost_acc *= iters_per_epoch
bn_cost_acc *= iters_per_epoch
if verbose:
print("===================")
return train_cost_acc, bn_cost_acc, inf_cost_acc, out_act, out_chs_tot, model_size
# model = ResNet32()
# print(getTrainingCost(model, 'resnet32_flat', base=True))
# # Training iterations before compression
# train_cost_base, bn_cost_base, inf_cost_base, out_act_base, out_chs_base, model_size_base = getTrainingCost(model, 'resnet32_flat', base=True)
#feature_size_cifar.py
alexnet = {}
alexnet['conv1'] = (32, 32)
alexnet['conv2'] = (8, 8)
alexnet['conv3'], alexnet['conv4'], alexnet['conv5'] = (4, 4), (4, 4), (4, 4)
vgg8 = {}
vgg8['conv1'] = (32, 32)
vgg8['conv2'] = (16, 16)
vgg8['conv3'] = (8, 8)
vgg8['conv4'] = (4, 4)
vgg8['conv5'] = (2, 2)
vgg11 = {}
vgg11['conv1'] = (32, 32)
vgg11['conv2'] = (16, 16)
vgg11['conv3'], vgg11['conv4'] = (8, 8), (8, 8)
vgg11['conv5'], vgg11['conv6'] = (4, 4), (4, 4)
vgg11['conv7'], vgg11['conv8'] = (2, 2), (2, 2)
vgg13 = {}
vgg13['conv1'], vgg13['conv2'] = (32, 32), (32, 32)
vgg13['conv3'], vgg13['conv4'] = (16, 16), (16, 16)
vgg13['conv5'], vgg13['conv6'] = (8, 8), (8, 8)
vgg13['conv7'], vgg13['conv8'] = (4, 4), (4, 4)
vgg13['conv9'], vgg13['conv10'] = (2, 2), (2, 2)
# ResNet20
resnet20 = dict.fromkeys(['conv'+str(i) for i in range(1,22)])
resnet20.update(dict.fromkeys(['conv'+str(i) for i in range(1,8)], (32,32)))
resnet20.update(dict.fromkeys(['conv8', 'conv10'], (32,16)))
resnet20['conv9'] = (16,16)
resnet20.update(dict.fromkeys(['conv'+str(i) for i in range(11,15)], (16,16)))
resnet20.update(dict.fromkeys(['conv15', 'conv17'], (16,8)))
resnet20['conv16'] = (8,8)
resnet20.update(dict.fromkeys(['conv'+str(i) for i in range(18,22)], (8,8)))
# ResNet32
resnet32 = dict.fromkeys(['conv'+str(i) for i in range(1,34)])
resnet32.update(dict.fromkeys(['conv'+str(i) for i in range(1,12)], (32,32)))
resnet32.update(dict.fromkeys(['conv12', 'conv14'], (32,16)))
resnet32['conv13'] = (16,16)
resnet32.update(dict.fromkeys(['conv'+str(i) for i in range(15,23)], (16,16)))
resnet32.update(dict.fromkeys(['conv23', 'conv25'], (16,8)))
resnet32['conv24'] = (8,8)
resnet32.update(dict.fromkeys(['conv'+str(i) for i in range(26,34)], (8,8)))
# ResNet32_BT
resnet32_bt = dict.fromkeys(['conv'+str(i) for i in range(1,50)])
resnet32_bt.update(dict.fromkeys(['conv'+str(i) for i in range(1,19)], (32,32)))
resnet32_bt.update(dict.fromkeys(['conv19', 'conv21'], (32,16)))
resnet32_bt['conv20'] = (16,16)
resnet32_bt.update(dict.fromkeys(['conv'+str(i) for i in range(22,35)], (16,16)))
resnet32_bt.update(dict.fromkeys(['conv35', 'conv37'], (16,8)))
resnet32_bt['conv36'] = (8,8)
resnet32_bt.update(dict.fromkeys(['conv'+str(i) for i in range(38,50)], (8,8)))
# ResNet50_BT
resnet50_bt = dict.fromkeys(['conv'+str(i) for i in range(1,77)])
resnet50_bt.update(dict.fromkeys(['conv'+str(i) for i in range(1,28)], (32,32)))
resnet50_bt.update(dict.fromkeys(['conv28', 'conv30'], (32,16)))
resnet50_bt['conv29'] = (16,16)
resnet50_bt.update(dict.fromkeys(['conv'+str(i) for i in range(31,53)], (16,16)))
resnet50_bt.update(dict.fromkeys(['conv53', 'conv55'], (16,8)))
resnet50_bt['conv54'] = (8,8)
resnet50_bt.update(dict.fromkeys(['conv'+str(i) for i in range(56,77)], (8,8)))
# ResNet56_BT
resnet56_bt = dict.fromkeys(['conv'+str(i) for i in range(1,86)])
resnet56_bt.update(dict.fromkeys(['conv'+str(i) for i in range(1,31)], (32,32)))
resnet56_bt.update(dict.fromkeys(['conv31', 'conv33'], (32,16)))
resnet56_bt['conv32'] = (16,16)
resnet56_bt.update(dict.fromkeys(['conv'+str(i) for i in range(34,59)], (16,16)))
resnet56_bt.update(dict.fromkeys(['conv59', 'conv61'], (16,8)))
resnet56_bt['conv60'] = (8,8)
resnet56_bt.update(dict.fromkeys(['conv'+str(i) for i in range(62,86)], (8,8)))
######### ImageNet data #########
# ResNet50
resnet50 = dict.fromkeys(['conv'+str(i) for i in range(1,54)])
resnet50['conv1'] = (224,112)
resnet50.update(dict.fromkeys(['conv'+str(i) for i in range(2,13)], (56,56)))
resnet50.update(dict.fromkeys(['conv13', 'conv15'], (56,28)))
resnet50['conv14'] = (28,28)
resnet50.update(dict.fromkeys(['conv'+str(i) for i in range(16,26)], (28,28)))
resnet50.update(dict.fromkeys(['conv26', 'conv28'], (28,14)))
resnet50['conv27'] = (14,14)
resnet50.update(dict.fromkeys(['conv'+str(i) for i in range(29,45)], (14,14)))
resnet50.update(dict.fromkeys(['conv45', 'conv47'], (14,7)))
resnet50['conv46'] = (7,7)
resnet50.update(dict.fromkeys(['conv'+str(i) for i in range(48,54)], (7,7)))
# MobileNet (224)
mobilenet = dict.fromkeys(['conv'+str(i) for i in range(1, 28)])
mobilenet['conv1'] = (224,112)
mobilenet.update(dict.fromkeys(['conv'+str(i) for i in range(2,4)], (112,112)))
mobilenet['conv4'] = (112,56)
mobilenet.update(dict.fromkeys(['conv'+str(i) for i in range(5,8)], (56,56)))
mobilenet['conv8'] = (56,28)
mobilenet.update(dict.fromkeys(['conv'+str(i) for i in range(9,12)], (28,28)))
mobilenet['conv12'] = (28,14)
mobilenet.update(dict.fromkeys(['conv'+str(i) for i in range(13,24)], (14,14)))
mobilenet['conv24'] = (14,7)
mobilenet.update(dict.fromkeys(['conv'+str(i) for i in range(25,28)], (7,7)))
vgg16 = {}
vgg16['conv1'], vgg16['conv2'] = (224, 224), (224, 224)
vgg16['conv3'], vgg16['conv4'] = (112, 112), (112, 112)
vgg16['conv5'], vgg16['conv6'], vgg16['conv7'] = (56, 56), (56, 56), (56, 56)
vgg16['conv8'], vgg16['conv9'], vgg16['conv10'] = (28, 28), (28, 28), (28, 28)
vgg16['conv11'], vgg16['conv12'], vgg16['conv13'] = (14, 14), (14, 14), (14, 14)
cifar_feature_size = {
'alexnet' :alexnet,
'vgg8' :vgg8,
'vgg8_bn_flat' :vgg8,
'vgg11' :vgg11,
'vgg11_bn_flat' :vgg11,
'vgg13' :vgg13,
'vgg13_bn_flat' :vgg13,
'resnet20_flat' :resnet20,
'resnet32_flat' :resnet32,
'resnet32_bt_flat' :resnet32_bt,
'resnet32_bt_flat_temp' :resnet32_bt,
'resnet50_bt_flat' :resnet50_bt,
'resnet56_bt_flat' :resnet56_bt,
}
imagenet_feature_size = {
'resnet50' :resnet50,
'resnet50_flat' :resnet50,
'resnet50_flat_01' :resnet50,
'mobilenet' :mobilenet,
'vgg16_flat' :vgg16,
}
#scripts_util.py
# Base architecture
alexnet = {}
alexnet['conv1'] = [64,3,11,11]
alexnet['conv2'] = [192,64,5,5]
alexnet['conv3'] = [384,192,3,3]
alexnet['conv4'] = [256,384,3,3]
alexnet['conv5'] = [256,256,3,3]
alexnet['fc'] = [100,256]
vgg8 = {}
vgg8['conv1'] = [64,3,3,3]
vgg8['conv2'] = [128,64,3,3]
vgg8['conv3'] = [256,128,3,3]
vgg8['conv4'] = [512,256,3,3]
vgg8['conv5'] = [512,512,3,3]
vgg8['fc'] = [100,512]
vgg11 = {}
vgg11['conv1'] = [64,3,3,3]
vgg11['conv2'] = [128,64,3,3]
vgg11['conv3'] = [256,128,3,3]
vgg11['conv4'] = [256,256,3,3]
vgg11['conv5'] = [512,256,3,3]
vgg11['conv6'] = [512,512,3,3]
vgg11['conv7'] = [512,512,3,3]
vgg11['conv8'] = [512,512,3,3]
vgg11['fc'] = [100,512]
vgg13 = {}
vgg13['conv1'] = [64,3,3,3]
vgg13['conv2'] = [64,64,3,3]
vgg13['conv3'] = [128,64,3,3]
vgg13['conv4'] = [128,128,3,3]
vgg13['conv5'] = [256,128,3,3]
vgg13['conv6'] = [256,256,3,3]
vgg13['conv7'] = [512,256,3,3]
vgg13['conv8'] = [512,512,3,3]
vgg13['conv9'] = [512,512,3,3]
vgg13['conv10'] = [512,512,3,3]
vgg13['fc'] = [100,512]
resnet20 = {
'conv1':[16,3,3,3], 'conv2':[16,16,3,3], 'conv3':[16,16,3,3], 'conv4':[16,16,3,3],
'conv5':[16,16,3,3], 'conv6':[16,16,3,3], 'conv7':[16,16,3,3], 'conv8':[32,16,3,3],
'conv9':[32,32,3,3], 'conv10':[32,16,3,3], 'conv11':[32,32,3,3], 'conv12':[32,32,3,3],
'conv13':[32,32,1,1], 'conv14':[32,32,3,3], 'conv15':[64,32,3,3], 'conv16':[64,64,3,3],
'conv17':[64,32,1,1], 'conv18':[64,64,3,3], 'conv19':[64,64,3,3], 'conv20':[64,64,3,3],
'conv21':[64,64,3,3], 'fc':[100,64]
}
resnet32 = {
'conv1':[16,3,3,3], 'conv2':[16,16,3,3], 'conv3':[16,16,3,3], 'conv4':[16,16,3,3],
'conv5':[16,16,3,3], 'conv6':[16,16,3,3], 'conv7':[16,16,3,3], 'conv8':[16,16,3,3],
'conv9':[16,16,3,3], 'conv10':[16,16,3,3], 'conv11':[16,16,3,3], 'conv12':[32,16,3,3],
'conv13':[32,32,3,3], 'conv14':[32,16,1,1], 'conv15':[32,32,3,3], 'conv16':[32,32,3,3],
'conv17':[32,32,3,3], 'conv18':[32,32,3,3], 'conv19':[32,32,3,3], 'conv20':[32,32,3,3],
'conv21':[32,32,3,3], 'conv22':[32,32,3,3], 'conv23':[64,32,3,3], 'conv24':[64,64,3,3],
'conv25':[64,32,1,1], 'conv26':[64,64,3,3], 'conv27':[64,64,3,3], 'conv28':[64,64,3,3],
'conv29':[64,64,3,3], 'conv30':[64,64,3,3], 'conv31':[64,64,3,3], 'conv32':[64,64,3,3],
'conv33':[64,64,3,3], 'fc':[100,64]
}
resnet32_bt = {
'conv1':[16,3,3,3],
'conv2':[16,16,1,1], 'conv3':[16,16,3,3], 'conv4':[64,16,1,1], 'conv5':[64,16,1,1],
'conv6':[16,64,1,1], 'conv7':[16,16,3,3], 'conv8':[64,16,1,1],
'conv9':[16,64,1,1], 'conv10':[16,16,3,3], 'conv11':[64,16,1,1],
'conv12':[16,64,1,1], 'conv13':[16,16,3,3], 'conv14':[64,16,1,1],
'conv15':[16,64,1,1], 'conv16':[16,16,3,3], 'conv17':[64,16,1,1],
'conv18':[32,64,1,1], 'conv19':[32,32,3,3], 'conv20':[128,32,1,1], 'conv21':[128,64,1,1],
'conv22':[32,128,1,1], 'conv23':[32,32,3,3], 'conv24':[128,32,1,1],
'conv25':[32,128,1,1], 'conv26':[32,32,3,3], 'conv27':[128,32,1,1],
'conv28':[32,128,1,1], 'conv29':[32,32,3,3], 'conv30':[128,32,1,1],
'conv31':[32,128,1,1], 'conv32':[32,32,3,3], 'conv33':[128,32,1,1],
'conv34':[64,128,1,1], 'conv35':[64,64,3,3], 'conv36':[256,64,1,1], 'conv37':[256,128,1,1],
'conv38':[64,256,1,1], 'conv39':[64,64,3,3], 'conv40':[256,64,1,1],
'conv41':[64,256,1,1], 'conv42':[64,64,3,3], 'conv43':[256,64,1,1],
'conv44':[64,256,1,1], 'conv45':[64,64,3,3], 'conv46':[256,64,1,1],
'conv47':[64,256,1,1], 'conv48':[64,64,3,3], 'conv49':[256,64,1,1], 'fc':[100,256]
}
resnet50_bt = {
'conv1':[16,3,3,3],
'conv2':[16,16,1,1], 'conv3':[16,16,3,3], 'conv4':[64,16,1,1], 'conv5':[64,16,1,1],
'conv6':[16,64,1,1], 'conv7':[16,16,3,3], 'conv8':[64,16,1,1],
'conv9':[16,64,1,1], 'conv10':[16,16,3,3], 'conv11':[64,16,1,1],
'conv12':[16,64,1,1], 'conv13':[16,16,3,3], 'conv14':[64,16,1,1],
'conv15':[16,64,1,1], 'conv16':[16,16,3,3], 'conv17':[64,16,1,1],
'conv18':[16,64,1,1], 'conv19':[16,16,3,3], 'conv20':[64,16,1,1],
'conv21':[16,64,1,1], 'conv22':[16,16,3,3], 'conv23':[64,16,1,1],
'conv24':[16,64,1,1], 'conv25':[16,16,3,3], 'conv26':[64,16,1,1],
'conv27':[32,64,1,1], 'conv28':[32,32,3,3], 'conv29':[128,32,1,1], 'conv30':[128,64,1,1],
'conv31':[32,128,1,1], 'conv32':[32,32,3,3], 'conv33':[128,32,1,1],
'conv34':[32,128,1,1], 'conv35':[32,32,3,3], 'conv36':[128,32,1,1],
'conv37':[32,128,1,1], 'conv38':[32,32,3,3], 'conv39':[128,32,1,1],
'conv40':[32,128,1,1], 'conv41':[32,32,3,3], 'conv42':[128,32,1,1],
'conv43':[32,128,1,1], 'conv44':[32,32,3,3], 'conv45':[128,32,1,1],
'conv46':[32,128,1,1], 'conv47':[32,32,3,3], 'conv48':[128,32,1,1],
'conv49':[32,128,1,1], 'conv50':[32,32,3,3], 'conv51':[128,32,1,1],
'conv52':[64,128,1,1], 'conv53':[64,64,3,3], 'conv54':[256,64,1,1], 'conv55':[256,128,1,1],
'conv56':[64,256,1,1], 'conv57':[64,64,3,3], 'conv58':[256,64,1,1],
'conv59':[64,256,1,1], 'conv60':[64,64,3,3], 'conv61':[256,64,1,1],
'conv62':[64,256,1,1], 'conv63':[64,64,3,3], 'conv64':[256,64,1,1],
'conv65':[64,256,1,1], 'conv66':[64,64,3,3], 'conv67':[256,64,1,1],
'conv68':[64,256,1,1], 'conv69':[64,64,3,3], 'conv70':[256,64,1,1],
'conv71':[64,256,1,1], 'conv72':[64,64,3,3], 'conv73':[256,64,1,1],
'conv74':[64,256,1,1], 'conv75':[64,64,3,3], 'conv76':[256,64,1,1], 'fc':[100,256]
}
############## ImageNet ###############
resnet50 = {
'conv1':[64,3,7,7],
'conv2':[64,64,1,1], 'conv3':[64,64,3,3], 'conv4':[256,64,1,1], 'conv5':[256,64,1,1],
'conv6':[64,256,1,1], 'conv7':[64,64,3,3], 'conv8':[256,64,1,1],
'conv9':[64,256,1,1], 'conv10':[64,64,3,3], 'conv11':[256,64,1,1],
'conv12':[128,256,1,1], 'conv13':[128,128,3,3], 'conv14':[512,128,1,1], 'conv15':[512,256,1,1],
'conv16':[128,512,1,1], 'conv17':[128,128,3,3], 'conv18':[512,128,1,1],
'conv19':[128,512,1,1], 'conv20':[128,128,3,3], 'conv21':[512,128,1,1],
'conv22':[128,512,1,1], 'conv23':[128,128,3,3], 'conv24':[512,128,1,1],
'conv25':[256,512,1,1], 'conv26':[256,256,3,3], 'conv27':[1024,256,1,1], 'conv28':[1024,512,1,1],
'conv29':[256,1024,1,1], 'conv30':[256,256,3,3], 'conv31':[1024,256,1,1],
'conv32':[256,1024,1,1], 'conv33':[256,256,3,3], 'conv34':[1024,256,1,1],
'conv35':[256,1024,1,1], 'conv36':[256,256,3,3], 'conv37':[1024,256,1,1],
'conv38':[256,1024,1,1], 'conv39':[256,256,3,3], 'conv40':[1024,256,1,1],
'conv41':[256,1024,1,1], 'conv42':[256,256,3,3], 'conv43':[1024,256,1,1],
'conv44':[512,1024,1,1], 'conv45':[512,512,3,3], 'conv46':[2048,512,1,1], 'conv47':[2048,1024,1,1],
'conv48':[512,2048,1,1], 'conv49':[512,512,3,3], 'conv50':[2048,512,1,1],
'conv51':[512,2048,1,1], 'conv52':[512,512,3,3], 'conv53':[2048,512,1,1],
'fc':[1000,2048]
}
mobilenet = {
'conv1':[32, 3, 3, 3], 'conv2':[32, 1, 3, 3], 'conv3':[64, 32, 1, 1],
'conv4':[64, 1, 3, 3], 'conv5':[128, 64, 1, 1], 'conv6':[128, 1, 3, 3],
'conv7':[128, 128, 1, 1], 'conv8':[128, 1, 3, 3], 'conv9':[256, 128, 1, 1],
'conv10':[256, 1, 3, 3], 'conv11':[256, 256, 1, 1], 'conv12':[256, 1, 3, 3],
'conv13':[512, 256, 1, 1], 'conv14':[512, 1, 3, 3], 'conv15':[512, 512, 1, 1],
'conv16':[512, 1, 3, 3], 'conv17':[512, 512, 1, 1], 'conv18':[512, 1, 3, 3],
'conv19':[512, 512, 1, 1], 'conv20':[512, 1, 3, 3], 'conv21':[512, 512, 1, 1],
'conv22':[512, 1, 3, 3], 'conv23':[512, 512, 1, 1], 'conv24':[512, 1, 3, 3],
'conv25':[1024, 512, 1, 1], 'conv26':[1024, 1, 3, 3], 'conv27':[1024, 1024, 1, 1],
'fc':[1000, 1024],
}
base_archs = {
'alexnet' :alexnet,
'vgg8' :vgg8,
'vgg11' :vgg11,
'vgg13' :vgg13,
'resnet20_flat' :resnet20,
'resnet32_flat' :resnet32,
'resnet32_bt_flat' :resnet32_bt,
'resnet32_bt_flat_temp' :resnet32_bt,
'resnet50_bt_flat' :resnet50_bt,
'resnet50' :resnet50,
'mobilenet' :mobilenet,
}
|
import numpy as np
#1. 훈련 데이터
x_train = np.array([1,2,3,4,5,6,7,8,9,10])
y_train = np.array([1,2,3,4,5,6,7,8,9,10])
x_test = np.array([11,12,13,14,15,16,17,18,19,20])
y_test = np.array([11,12,13,14,15,16,17,18,19,20])
x3= np.array([101, 102, 103, 104, 105, 106])
x4= np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
x5= np.array(range(30, 50))
#열이 우선이다 행은 무시된다
#2. 모델 구성
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(30000, input_shape = (1, ), activation ='relu'))
#model.add(Dense(12))
model.add(Dense(4))
model.add(Dense(40))
model.add(Dense(666))
model.add(Dense(55))
model.add(Dense(60))
model.add(Dense(55))
model.add(Dense(10000))
model.add(Dense(55))
model.add(Dense(80))
model.add(Dense(10000))
model.add(Dense(5))
model.add(Dense(1))
#model.summary()
# #model.summary() # param은 dense 가 5와 3일떄는 input weight 5, bias 1, x 3
#3. 훈련
me', optimizer='adam', metrics=['mse'])odel.compile(loss='ms
#model.fit(x, y, epochs=100, batch_size=11)
model.fit(x_train, y_train, epochs=220, batch_size=1)
#4. 평가 예측
# loss, acc = model.evaluate(x_test, y_test, batch_size=3)
# print('acc:', acc)
print(x5)
y_predict = model.predict(x_test)
print(y_predict)
#RMSE 구하기
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE: ", RMSE(y_test, y_predict))
#R2 구하기
from sklearn.metrics import r2_score
r2_y_predict = r2_score(y_test, y_predict)
print("R2: ", r2_y_predict)
|
### Summary
# This module grabs an image from the screen and funnels
# it to the processing module
# -------------------------------------------------
from mss import mss # python3 -m pip install -U --user mss
import cv2 # pip install opencv-python
from PIL import Image # python3 -m pip install Pillow
import numpy as np
import time
from modules import module_process
from modules.config import *
import requests
def returnFrame(settings: set, sct) -> complex:
#Take image of screen
sct_img = sct.grab(settings)
img = Image.frombytes('RGB', (sct_img.size.width, sct_img.size.height), sct_img.rgb)
frame = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
return frame
def grabScreen(xResolution: int, yResolution: int):
keep_grabbing = False
settings = {"top": int(0.08 * yResolution) + adjust_y, "left":int(xResolution * 0.18) + adjust_x, "width":int(xResolution * 0.7), "height":int(0.25 * yResolution), "mon": monitor_number}
first_time = True
x_crop = int(xResolution * 0.18)
y_crop = int(yResolution * 0.08)
with mss() as sct:
while True:
frame = returnFrame(settings, sct)
try:
#Crop image to fit only "voting ended" and "whos the imposter?"
cropped_frame = frame[10:(y_crop + y_extend_crop), int(x_crop/2 - x_extend_crop + 80):-int(x_crop/2 + x_extend_crop)].copy()
if debug_mode:
cv2.imshow('Test', np.array(frame)) #output screen, for testing only
cv2.imshow('Test Cropped', np.array(cropped_frame)) #output screen, for testing only
if cv2.waitKey(25) & 0xFF == ord('q'): #Press Q on debug windows to exit
cv2.destroyAllWindows()
break
except Exception as e:
print(f"{e}\n[*] Looks like your x_extend_crop or y_extend_crop values are way too high")
exit()
if first_time:
print("[*] Screen grab ready.\n")
first_time = False
#Process image
found = module_process.processDiscussion(cropped_frame)
if found == 1: #If discussion or voting ends found, you dont need to process ending
module_process.processEnding(frame)
elif found == 3:
start_time = time.time()
while True:
see_if_ended = returnFrame(settings, sct)
ended = module_process.processEnding(see_if_ended)
if ended:
break
elif time.time() - start_time >= 6 + delay_voting:
requests.get(f"http://{address}:{port}/mute")
break
else: pass
if __name__ == "__main__":
print("[*] Please run start.py: ")
exit()
|
#
# Solver settings object
#################################
import numpy as np
from .. DREAMException import DREAMException
from . ToleranceSettings import ToleranceSettings
from . Preconditioner import Preconditioner
LINEAR_IMPLICIT = 1
NONLINEAR = 2
LINEAR_SOLVER_LU = 1
LINEAR_SOLVER_MUMPS = 2
LINEAR_SOLVER_MKL = 3
LINEAR_SOLVER_SUPERLU = 4
LINEAR_SOLVER_GMRES = 5
class Solver:
def __init__(self, ttype=LINEAR_IMPLICIT, linsolv=LINEAR_SOLVER_LU, maxiter=100, verbose=False):
"""
Constructor.
"""
self.setType(ttype)
self.debug_printmatrixinfo = False
self.debug_printjacobianinfo = False
self.debug_savejacobian = False
self.debug_savesolution = False
self.debug_savematrix = False
self.debug_savenumericaljacobian = False
self.debug_saverhs = False
self.debug_saveresidual = False
self.debug_savesystem = False
self.debug_timestep = 0
self.debug_iteration = 1
self.backupsolver = None
self.tolerance = ToleranceSettings()
self.preconditioner = Preconditioner()
self.setOption(linsolv=linsolv, maxiter=maxiter, verbose=verbose)
def setDebug(self, printmatrixinfo=False, printjacobianinfo=False, savejacobian=False,
savesolution=False, savematrix=False, savenumericaljacobian=False, saverhs=False,
saveresidual=False, savesystem=False, timestep=0, iteration=1):
"""
Enable output of debug information.
:param int timestep: Index of time step to generate debug info for. If ``0``, debug info is generated in every (iteration of every) time step.
:param int savesystem: Save full equation system as a DREAMOutput file in the most recent iteration/time step.
LINEAR SOLVER
:param bool printmatrixinfo: If ``True``, calls ``PrintInfo()`` on the linear operator matrix.
:param bool savematrix: If ``True``, saves the linear operator matrix using a PETSc viewer.
:param bool saverhs: If ``True``, saves the right-hand side vector to a ``.mat`` file.
NON-LINEAR SOLVER
:param bool printjacobianinfo: If ``True``, calls ``PrintInfo()`` on the jacobian matrix.
:param bool savejacobian: If ``True``, saves the jacobian matrix using a PETSc viewer.
:param bool savesolution: If ``True``, saves the solution vector to a ``.mat`` file.
:param bool savenumericaljacobian: If ``True``, evaluates the jacobian matrix numerically and saves it using a PETSc viewer.
:param bool saveresidual: If ``True``, saves the residual vector to a ``.mat`` file.
:param int iteration: Index of iteration to save debug info for. If ``0``, saves in all iterations. If ``timestep`` is ``0``, this parameter is always ignored.
"""
self.debug_printmatrixinfo = printmatrixinfo
self.debug_printjacobianinfo = printjacobianinfo
self.debug_savejacobian = savejacobian
self.debug_savesolution = savesolution
self.debug_savematrix = savematrix
self.debug_savenumericaljacobian = savenumericaljacobian
self.debug_saverhs = saverhs
self.debug_saveresidual = saveresidual
self.debug_savesystem = savesystem
self.debug_timestep = timestep
self.debug_iteration = iteration
def setBackupSolver(self, backup):
"""
Set the backup linear solver to use in case the main linear
solver fails. Set to ``None`` to disable (default).
"""
self.backupsolver = backup
def setLinearSolver(self, linsolv):
"""
Set the linear solver to use.
"""
self.linsolv = linsolv
def setMaxIterations(self, maxiter):
"""
Set maximum number of allowed nonlinear iterations.
"""
self.setOption(maxiter=maxiter)
def setTolerance(self, reltol):
"""
Set relative tolerance for nonlinear solve.
"""
print("WARNING: The 'Solver.setTolerance()' method is deprecated. Please use 'Solver.tolerance.set(reltol=...)' instead.")
self.tolerance.set(reltol=reltol)
def setVerbose(self, verbose):
"""
If 'True', generates excessive output during nonlinear solve.
"""
self.setOption(verbose=verbose)
def setOption(self, linsolv=None, maxiter=None, verbose=None):
"""
Sets a solver option.
"""
if linsolv is not None:
self.linsolv = linsolv
if maxiter is not None:
self.maxiter = maxiter
if verbose is not None:
self.verbose = verbose
self.verifySettings()
def setType(self, ttype):
"""
Specifies which type of solver to use (either ``LINEAR_IMPLICIT``
or ``NONLINEAR``).
"""
if ttype == LINEAR_IMPLICIT:
self.type = ttype
elif ttype == NONLINEAR:
self.type = ttype
else:
raise DREAMException("Solver: Unrecognized solver type: {}.".format(ttype))
def fromdict(self, data):
"""
Load settings from the given dictionary.
"""
def scal(v):
if type(v) == np.ndarray: return v[0]
else: return v
self.type = int(scal(data['type']))
self.linsolv = int(data['linsolv'])
self.maxiter = int(data['maxiter'])
self.verbose = bool(data['verbose'])
if 'tolerance' in data:
self.tolerance.fromdict(data['tolerance'])
if 'preconditioner' in data:
self.preconditioner.fromdict(data['preconditioner'])
if 'backupsolver' in data:
self.backupsolver = int(data['backupsolver'])
if 'debug' in data:
flags = ['printmatrixinfo', 'printjacobianinfo', 'savejacobian', 'savesolution', 'savematrix', 'savenumericaljacobian', 'saverhs', 'saveresidual', 'savesystem']
for f in flags:
if f in data['debug']:
setattr(self, 'debug_{}'.format(f), bool(data['debug'][f]))
if 'timestep' in data['debug']:
self.debug_timestep = int(data['debug']['timestep'])
if 'iteration' in data['debug']:
self.debug_iteration = int(data['debug']['iteration'])
self.verifySettings()
def todict(self, verify=True):
"""
Returns a Python dictionary containing all settings of
this Solver object.
"""
if verify:
self.verifySettings()
data = {
'type': self.type,
'linsolv': self.linsolv,
'maxiter': self.maxiter,
'verbose': self.verbose
}
data['preconditioner'] = self.preconditioner.todict()
if self.type == LINEAR_IMPLICIT:
data['debug'] = {
'printmatrixinfo': self.debug_printmatrixinfo,
'savematrix': self.debug_savematrix,
'saverhs': self.debug_saverhs,
'savesystem': self.debug_savesystem,
'timestep': self.debug_timestep
}
elif self.type == NONLINEAR:
data['tolerance'] = self.tolerance.todict()
data['debug'] = {
'printjacobianinfo': self.debug_printjacobianinfo,
'savejacobian': self.debug_savejacobian,
'savesolution': self.debug_savesolution,
'savenumericaljacobian': self.debug_savenumericaljacobian,
'saveresidual': self.debug_saveresidual,
'savesystem': self.debug_savesystem,
'timestep': self.debug_timestep,
'iteration': self.debug_iteration
}
if self.backupsolver is not None:
data['backupsolver'] = self.backupsolver
return data
def verifySettings(self):
"""
Verifies that the settings of this object are consistent.
"""
if self.type == LINEAR_IMPLICIT:
self.verifyLinearSolverSettings()
if type(self.debug_printmatrixinfo) != bool:
raise DREAMException("Solver: Invalid type of parameter 'debug_printmatrixinfo': {}. Expected boolean.".format(type(self.debug_printmatrixinfo)))
elif type(self.debug_savematrix) != bool:
raise DREAMException("Solver: Invalid type of parameter 'debug_savematrix': {}. Expected boolean.".format(type(self.debug_savematrix)))
elif type(self.debug_saverhs) != bool:
raise DREAMException("Solver: Invalid type of parameter 'debug_saverhs': {}. Expected boolean.".format(type(self.debug_saverhs)))
elif type(self.debug_timestep) != int:
raise DREAMException("Solver: Invalid type of parameter 'debug_timestep': {}. Expected integer.".format(type(self.debug_timestep)))
elif self.type == NONLINEAR:
if type(self.maxiter) != int:
raise DREAMException("Solver: Invalid type of parameter 'maxiter': {}. Expected integer.".format(type(self.maxiter)))
elif type(self.verbose) != bool:
raise DREAMException("Solver: Invalid type of parameter 'verbose': {}. Expected boolean.".format(type(self.verbose)))
if type(self.debug_printjacobianinfo) != bool:
raise DREAMException("Solver: Invalid type of parameter 'debug_printjacobianinfo': {}. Expected boolean.".format(type(self.debug_printjacobianinfo)))
elif type(self.debug_savejacobian) != bool:
raise DREAMException("Solver: Invalid type of parameter 'debug_savejacobian': {}. Expected boolean.".format(type(self.debug_savejacobian)))
elif type(self.debug_savesolution) != bool:
raise DREAMException("Solver: Invalid type of parameter 'debug_savesolution': {}. Expected boolean.".format(type(self.debug_savesolution)))
elif type(self.debug_saverhs) != bool:
raise DREAMException("Solver: Invalid type of parameter 'debug_saverhs': {}. Expected boolean.".format(type(self.debug_saverhs)))
elif type(self.debug_saveresidual) != bool:
raise DREAMException("Solver: Invalid type of parameter 'debug_saveresidual': {}. Expected boolean.".format(type(self.debug_saveresidual)))
elif type(self.debug_timestep) != int:
raise DREAMException("Solver: Invalid type of parameter 'debug_timestep': {}. Expected integer.".format(type(self.debug_timestep)))
elif type(self.debug_iteration) != int:
raise DREAMException("Solver: Invalid type of parameter 'debug_iteration': {}. Expected boolean.".format(type(self.debug_iteration)))
self.tolerance.verifySettings()
self.verifyLinearSolverSettings()
else:
raise DREAMException("Solver: Unrecognized solver type: {}.".format(self.type))
self.preconditioner.verifySettings()
def verifyLinearSolverSettings(self):
"""
Verifies the settings for the linear solver (which is used
by both the 'LINEAR_IMPLICIT' and 'NONLINEAR' solvers).
"""
solv = [LINEAR_SOLVER_LU, LINEAR_SOLVER_MUMPS, LINEAR_SOLVER_MKL, LINEAR_SOLVER_SUPERLU, LINEAR_SOLVER_GMRES]
if self.linsolv not in solv:
raise DREAMException("Solver: Unrecognized linear solver type: {}.".format(self.linsolv))
elif self.backupsolver is not None and self.backupsolver not in solv:
raise DREAMException("Solver: Unrecognized backup linear solver type: {}.".format(self.backupsolver))
|
import typing
from starlette.responses import HTMLResponse
from cbv import WebSocketBase
from temp_router import TempRouter
router = TempRouter()
class WebSocketTest(
WebSocketBase,
path="/ws",
router=router
):
async def on_receive(self, data: typing.Any) -> None:
await self.websocket.send_text(f"Message text was: {data}")
html = """
<!DOCTYPE html>
<html>
<head>
<title>Chat</title>
</head>
<body>
<h1>WebSocket Chat</h1>
<form action="" onsubmit="sendMessage(event)">
<input type="text" id="messageText" autocomplete="off"/>
<button>Send</button>
</form>
<ul id='messages'>
</ul>
<script>
var ws = new WebSocket("ws://localhost:8888/ws");
ws.onmessage = function(event) {
var messages = document.getElementById('messages')
var message = document.createElement('li')
var content = document.createTextNode(event.data)
message.appendChild(content)
messages.appendChild(message)
};
function sendMessage(event) {
var input = document.getElementById("messageText")
ws.send(input.value)
input.value = ''
event.preventDefault()
}
</script>
</body>
</html>
"""
@router.get("/")
async def get():
return HTMLResponse(html)
if __name__ == '__main__':
from fastapi import FastAPI
app = FastAPI()
app.include_router(router)
import uvicorn
uvicorn.run(app, host="127.0.0.1", port=8888)
|
"""Console script to launch Colorgorical.
Colorgorical can be launched through the console as either a terminal
application or as a web application built on top of a Tornado server.
"""
import argparse
from src.makeSamples import MakeSamples
import src.server as server
desc = "Colorgorical is a color palette design assistance tools to make\
aesthetically pleasing and legible categorical color palettes for\
information visualization."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--server", action="store_true",
help="Flag marking that Colorgorical should be launched as a web server.")
parser.add_argument("--makeSamples", action="store_true",
help="Flag to create samples of 66 unique Colorgorical settings output to `samples/`.")
parser.add_argument("--port",
help="The port to start the Colorgorical server on", default=8888)
args = parser.parse_args()
if args.server:
s = server.ColorgoricalServer()
portNumber = args.port if args.port else 8888
s.start(port=portNumber)
elif args.makeSamples:
ms = MakeSamples()
if ms.savedResultsExist() == False:
print 'Making palettes'
ms.make()
else:
ms.loadPalettes()
# To use Helvetica, follow the instructions below. Makes the charts look
# a _lot_ better.
# http://blog.olgabotvinnik.com/blog/2012/11/15/2012-11-15-how-to-set-helvetica-as-the-default-sans-serif-font-in/
ms.savePlots()
ms.writeTex()
else:
print 'Did you mean to run ``python run.py --server``?'
|
from django.core.exceptions import ImproperlyConfigured
def dependency_ordered(test_databases, dependencies):
"""Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
while test_databases:
changed = False
deferred = []
while test_databases:
signature, aliases = test_databases.pop()
dependencies_satisfied = True
for alias in aliases:
if alias in dependencies:
if all(a in resolved_databases for a in dependencies[alias]):
# all dependencies for this alias are satisfied
dependencies.pop(alias)
resolved_databases.add(alias)
else:
dependencies_satisfied = False
else:
resolved_databases.add(alias)
if dependencies_satisfied:
ordered_test_databases.append((signature, aliases))
changed = True
else:
deferred.append((signature, aliases))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
|
import os
import tempfile
from settings import settings
from office365.runtime.auth.client_credential import ClientCredential
from office365.sharepoint.files.file import File
root_site_url = settings.get('url')
client_credentials = ClientCredential(settings.get('client_credentials').get('client_id'),
settings.get('client_credentials').get('client_secret'))
abs_file_url = "{site_url}sites/team/Shared Documents/big_buck_bunny.mp4".format(site_url=root_site_url)
with tempfile.TemporaryDirectory() as local_path:
file_name = os.path.basename(abs_file_url)
with open(os.path.join(local_path, file_name), 'wb') as local_file:
file = File.from_url(abs_file_url).with_credentials(client_credentials).download(local_file).execute_query()
print("'{0}' file has been downloaded into {1}".format(file.serverRelativeUrl, local_file.name))
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
from scipy import cluster
import numpy as np
import sys
# importlib.reload(sys)
import pandas as pd
from scipy.cluster import hierarchy
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from pandas.core.frame import DataFrame
# filename = "admin2.csv"
filename = sys.argv[1]
heights =sys.argv[2]
heights = "none"
if heights == "none":
heights = 42
else:
heights = eval(heights)
# 获取数据源
case_train = np.genfromtxt("{}".format(filename), delimiter=',',encoding="utf-8")
keywords = pd.read_csv("{}".format(filename), nrows=0,encoding="utf-8")
keywords = keywords.columns
keywords = list(keywords)
# 去除第一行和第一列(如果有字段名的话)
case_train = np.delete(case_train,0,axis=0)
case_train1=np.array(case_train)
# 将矩阵反转为a
a = case_train1.T
global list3
global list4
global list5
global list6
global keywords2
list3 = []
list4 = []
list5 = []
list6 = []
keywords2 = []
for i in a:
for k in a:
c = [i,k]
# 求每两行的最小值(AB交集的最小值)
ik = np.min(c, axis=0)
# 对两个矩阵对比后,得到的矩阵求和(AB共同出现的频率)
sum_two = ik.sum()
list3.append(sum_two)
n = len(a)
# 将相似矩阵组合成相似系数矩阵
data_two = [list3[i:i + n] for i in range(0, len(list3), n)]
# 构建两两的词频共现矩阵
data_two = DataFrame(data_two,index=keywords,columns=keywords)
# 生成聚类树形图
fig = plt.figure(figsize=(30, 25),dpi=80)
Z = hierarchy.linkage(data_two, method='ward', metric='euclidean')
hierarchy.dendrogram(Z, orientation='right',
show_leaf_counts=False,
leaf_font_size=15.,
labels=data_two.index)
# 1)创建画布,并设置画布属性
# 设置切割精确度
label = cluster.hierarchy.cut_tree(Z, height=heights)
label = label.reshape(label.size, )
for u in keywords:
num = u.encode('gbk')
# num = u.encode('utf-8')
keywords2.append(num)
list6.append(keywords2)
list6.append(label)
list6 = np.array(list6)
list6 = list6.T
n =len(list6)
# 根据不同的阈值返回不同的聚类词
label2 = list(set(label))
# global list7
list7 = [[] for i in range(len(label2))]
cluster_list = [[] for i in range(len(label2))]
for i in label2:
for k in range(n):
if int(list6[:][k][1]) == int(i):
list7[label2.index(i)].append(list6[:][k])
for a in range(len(list7)):
for j in list7[a]:
cluster_list[a].append(j[0])
# num = "number".encode('utf-8')
# cluster_list = str(cluster_list)
# cluster_lists = cluster_list.encode("gbk")
print(cluster_list)
# 将聚类树形图保存
picname = filename.replace(".csv","")
picname = picname+"_clusters.png"
plt.savefig(picname)
# plt.show()
|
from zope.interface import Interface
class IEscoAuction(Interface):
""" Esco Auction """
|
__author__ = 'Justin'
# DESCRIPTION:
# This function returns a max speed based on Open Street Map (OSM) naming conventions
# Each OSM edgetype has a given max speed in mph by convention
#
def getdefaultspeed(string):
unknownspeed = 30
restrictedspeed = 0.01
defaultspeeds = {
'motorway':70,
'trunk':60,
'primary':55,
'secondary':55,
'tertiary':50,
'unclassified':40,
'residential':25,
'living_street':15,
'motorway_link': 40,
'trunk_link': 35,
'primary_link': 35,
'secondary_link':25,
'tertiary_link': 20,
'service': 12,
'track':restrictedspeed,
'cycleway':restrictedspeed,
'footway':restrictedspeed,
'rail':restrictedspeed,
'steps':restrictedspeed,
}
speed = defaultspeeds.get(string,unknownspeed)
return speed
|
from openpyxl import Workbook
import openpyxl as pyxl
def grade_point_from_letter(1):
l_s_map = {"S":9}
wb = pyxl.load_workbook("student.xlsx")
sheet = wb.active
for row in sheet.iter_rows(min_row = 3, min_col = 2, max_row = 4, max_col = 3):
if row:
data = [c.value for c in row]
|
# for adafruit circuit playground express
# flash w or w/o buzz morse for "lame" or "SOS"
"""
The dot duration is the basic unit of time measurement in code transmission.
The duration of a dash is three times the duration of a dot.
Each dot or dash is followed by a short silence, equal to the dot duration.
The letters of a word are separated by a space equal to three dots (one dash),
and the words are separated by a space equal to seven dots. (from Wikipedia)
some code from AdaFruit examples
updated with new library - cpx
"""
import time
from adafruit_circuitplayground.express import cpx
CODE = {'A': '.-', 'B': '-...', 'C': '-.-.',
'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..',
'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---',
'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..---',
'3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..',
'9': '----.'
}
DOT = 0.05 # 100 ms
DASH = 3 * DOT
GAP = 2 * DOT # needed if we've already waited one dot
SPACE = 4 * DOT # would be 7 but will have already waited one DOT and one GAP
RED = (255, 0, 0)
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
# prep neopixels
cpx.pixels.brightness = 0.05 # control overall brightness here
# could have done it with the color settings
cpx.pixels.fill(GREEN)
time.sleep(2)
cpx.pixels.fill(BLACK)
TONE = 1047
word = "LAME" # set initial condition
color = BLUE
while 1:
# toggle between words; will set to whichever button pressed most recently
if cpx.button_a:
word = 'LAME'
color = BLUE
elif cpx.button_b:
word = 'SOS'
color = RED
for letter in word:
for c in CODE[letter]:
if c == '.':
cpx.pixels.fill(color)
if not cpx.switch: # want tone if switch to right, which is 'False'
cpx.play_tone(TONE, DOT)
time.sleep(DOT)
cpx.pixels.fill(BLACK)
time.sleep(DOT)
else: # must be dash
cpx.pixels.fill(color)
if not cpx.switch:
cpx.play_tone(TONE, DASH)
time.sleep(DASH)
cpx.pixels.fill(BLACK)
time.sleep(DOT)
time.sleep(GAP) # wait additional GAP ms between characters
time.sleep(SPACE) # wait additional SPACE ms for word spacing
|
class Solution(object):
def remove_element_v1(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
if not nums:
return nums
for i in range(len(nums)-1, 0, -1):
if nums[i] == val:
print(nums)
# del nums[i]
print(f"index: {i}")
nums.pop(i)
# nums.remove(nums[i])
if nums[0] == val:
nums.pop(0)
return nums
def remove_element_v2(self, nums, val):
"""
"""
for num in nums:
if num == val:
nums.remove(num)
return nums
nums = [3, 3]
val = 3
obj = Solution()
result = obj.remove_element_v2(nums, val)
print(f"result {result}")
|
import requests
from celery import chain, shared_task
from .models import Currency
@shared_task
def parse_private():
url = 'https://api.privatbank.ua/p24api/pubinfo?json&exchange&coursid=5'
response = requests.get(url)
currency = response.json()
return currency
@shared_task
def save_currency_to_model(currency):
instance = Currency()
instance.content = currency
instance.save()
@shared_task
def common_task():
chain(
parse_private.s() | save_currency_to_model.s()
)()
|
import unittest
from katas.kyu_5.airport_arrivals_departures_1 import flap_display
class FlapDisplayTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(flap_display(['CAT'], [[1, 13, 27]]), ['DOG'])
def test_equal_2(self):
self.assertEqual(
flap_display(['HELLO '], [[15, 49, 50, 48, 43, 13]]), ['WORLD!'])
def test_equal_3(self):
self.assertEqual(flap_display(['CODE'], [[20, 20, 28, 0]]), ['WARS'])
|
from fastapi_scaffolding.main import get_app
app = get_app()
def test_heartbeat(test_client) -> None:
response = test_client.get('/api/health/heartbeat')
assert response.status_code == 200
assert response.json() == {"is_alive": True}
def test_default_route(test_client) -> None:
response = test_client.get('/')
assert response.status_code == 404
|
# albus.exceptions
class AlbusError(Exception):
def __init__(self, message, inner=None, detail=None):
self.message = message
self.inner = inner
self.detail = detail
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.