text stringlengths 8 6.05M |
|---|
import sys
import os
from selenium import webdriver
ELEMENT_WAIT_TIMEOUT = 15
def get_config():
from utils.config import Config
return Config().configuration
REQUESTS_WAIT_TIMEOUT = 15
ROOT_URL = get_config()["environments"]["url"]
def get_driver():
executable_path = os.path.join('drivers', sys.platform, 'chromedriver')
if sys.platform == 'win32':
executable_path += '.exe'
driver = webdriver.Chrome(executable_path=executable_path)
return driver
def take_screenshot(driver, name=None):
platform = driver.capabilities['platform']
browser = driver.capabilities['browserName']
browser_version = driver.capabilities['version']
file_name = 'screenshots/{}/{}/{}/{}.png'.format(
platform,
browser.replace(' ', ''),
browser_version,
name if name else sys._getframe().f_back.f_code.co_name) # inspect.stack()[1][3]
os.makedirs(os.path.split(file_name)[0], exist_ok=True)
with open(file_name, 'wb') as screenshot_file:
screenshot_file.write(driver.get_screenshot_as_png())
|
from flask import Blueprint, jsonify, request
from videoblog import logger, docs
from videoblog.schemas import VideoSchema
from videoblog.models import Video
from flask_apispec import use_kwargs, marshal_with
from flask_jwt_extended import jwt_required, get_jwt_identity
from videoblog.base_view import BaseView
from videoblog.utils import upload_file
videos = Blueprint("videos", __name__)
class ListView(BaseView):
@marshal_with(VideoSchema(many=True))
def get(self):
try:
videos = Video.get_list()
except Exception as e:
logger.warning(
f"user:{user_id} tutorials - read action failed with errors: {e}"
)
return {"message": str(e)}, 400
return videos
@videos.route("/tutorials", methods=["GET"])
@jwt_required
@marshal_with(VideoSchema(many=True))
def get_list():
try:
user_id = get_jwt_identity()
videos = Video.get_user_list(user_id=user_id)
except Exception as e:
logger.warning(
f"user:{user_id} tutorials - read action failed with errors: {e}"
)
return {"message": str(e)}, 400
return videos
@videos.route("/tutorials", methods=["POST"])
@jwt_required
@use_kwargs(VideoSchema)
@marshal_with(VideoSchema)
def update_list(**kwargs):
files = request.files.getlist("media[]")
if not files:
return "", 204
try:
user_id = get_jwt_identity()
new_one = Video(user_id=user_id, **kwargs)
new_one.save()
upload_file(files[0], user_id, new_one.id)
if len(files) > 1:
upload_file(files[1], user_id, new_one.id, field="cover")
except Exception as e:
logger.warning(
f"user:{user_id} tutorials - create action failed with errors: {e}"
)
return {"message": str(e)}, 400
return new_one
@videos.route("/tutorials/<int:tutorial_id>", methods=["PUT"])
@jwt_required
@use_kwargs(VideoSchema)
@marshal_with(VideoSchema)
def update_tutorial(tutorial_id, **kwargs):
try:
user_id = get_jwt_identity()
item = Video.get(tutorial_id, user_id)
item.update(**kwargs)
except Exception as e:
logger.warning(
f"user:{user_id} tutorials: {tutorial_id} - update action failed with errors: {e}"
)
return {"message": str(e)}, 400
return item
@videos.route("/tutorials/<int:tutorial_id>", methods=["DELETE"])
@jwt_required
@marshal_with(VideoSchema)
def delete_tutorial(tutorial_id):
try:
user_id = get_jwt_identity()
item = Video.get(tutorial_id, user_id)
item.delete()
except Exception as e:
logger.warning(
f"user:{user_id} tutorials: {tutorial_id} - delete action failed with errors: {e}"
)
return {"message": str(e)}, 400
return "", 204
@videos.errorhandler(422)
def error_handler(err):
headers = err.data.get("headers", None)
messages = err.data.get("messages", ["Invalid request"])
logger.warning(f"Ivalid input params: {messages}")
if headers:
return jsonify({"message": messages}), 400, headers
else:
return jsonify({"message": messages}), 400
docs.register(get_list, blueprint="videos")
# docs.register(update_list, blueprint='videos')
# docs.register(update_tutorial, blueprint='videos')
docs.register(delete_tutorial, blueprint="videos")
ListView.register(videos, docs, "/main", "listview")
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 14:10:00 2020
@author: kevin
!!! MAJOR ERROR: Our custom trained language cannot be used as it results in an error with Tesseract v5
It works with v4 but v5 will of course be the future
This script uses tesseract's conf value during ocr processing and writes the metrics to the dedicated directory
Tesseract returns a conf value for every string it finds in every block
Three files are saved: one shows the avg conf value per block (tesseract finds multiple blocks of text in every image)
the second other file shows the avg conf value per image (avg over all strings which were found)
and the third file shows the avg conf value per configuration of psm and image preprocessing
"""
import pytesseract
from pytesseract import Output
import cv2
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import os
import csv
import utils
import datetime
import pandas as pd
import time
import datetime
import image_preprocessing
# Installation pytesseract ist etwas tricky
# 1. pip install pytesseract in anaconda env
# 2. hier den 64-bit installer herunterladen und ausfuehren: https://github.com/UB-Mannheim/tesseract/wiki
# 3. deutsche Sprache hinzufügen wie hier beschrieben: https://github.com/UB-Mannheim/tesseract/wiki/Install-additional-language-and-script-models
# 4. die Kommandozeile unten im Code ausfuehren
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
# initialize logger
log = utils.set_logging()
starttime = '{:%Y-%m-%d-%H%M%S}'.format(datetime.datetime.now())
# function to apply ocr on images
def handle_image(path, file, custom_oem_psm_config, rgb, gray, blur, thres, resc, inv, l):
try:
image = cv2.imread(path)
starttime_ocr = datetime.datetime.now()
# IMAGE PREPROCESSING
if rgb:
image = image_preprocessing.bgr_to_rgb(image)
if gray:
image = image_preprocessing.grayscaling(image)
if blur:
image = image_preprocessing.blurring(image)
if thres:
image = image_preprocessing.thresholding(image)
if resc:
image = image_preprocessing.rescaling(image)
if inv:
image = image_preprocessing.inverting(image)
data = pytesseract.image_to_data(image, lang=l, config=custom_oem_psm_config, output_type=Output.DATAFRAME)
endtime_ocr = datetime.datetime.now()
runtime_ocr = endtime_ocr - starttime_ocr
runtime_ocr = round(runtime_ocr.total_seconds(),0)
# remove all rows with no confidence value (-1)...
data = data[data.conf != -1]
# ...and empty string as text
data = data[data.text.replace(' ','') != '']
data = data[data.text != ' ']
### get confidence and text per block (tesseract detects multiple blocks of text per image)
# get text per block
texts_per_block = data.groupby('block_num')['text'].apply(list)
# get mean confidence value per block
confs_per_block = data.groupby(['block_num'])['conf'].mean()
# join texts and confs
results_per_block = pd.merge(confs_per_block, texts_per_block, left_on='block_num', right_index=True)
### get one conficence value (mean of all confidence values) and whole text per image
# get mean confidence value of image
conf_of_image = round(data['conf'].mean(),1)
# get number of blocks
blocks_of_image = len(results_per_block['conf'])
# log.info('Number of blocks found: ' + str(blocks_of_image))
# log.info('Avg conf value: ' + str(conf_of_image))
# log.info('Runtime: ' + str(runtime_ocr))
# log.info('')
# number of found strings has to be returned for aggregations on config level
no_of_data = len(data['conf'])
sum_of_conf = data['conf'].sum()
# in case of everything ran smoothly
image_status = True
return image_status, results_per_block, conf_of_image, blocks_of_image, runtime_ocr, no_of_data, sum_of_conf
except Exception as e:
log.error('There was a problem handling the image ' + str(file) + ': ' + str(e))
return False, None, None, None, None, None, None
log.info('Inizialize lists for image data')
imageBlockRows = [] # list to save data on text block level
imageRows = [] # list to save data on image level (aggregated over blocks in image)
configRows = [] # list to save data on config level (aggregated over blocks found during config was in use)
directory = 'C:/Users/kevin/OneDrive/Studium/4_WiSe20_21/1_W3-WM/app_data/test_images'
directory_name = 'test_images'
listOfFiles = os.listdir(directory)
total_images = len(listOfFiles)
# initializing config which was identified best with script 'ocr_tesseract_metrics_config'
oem = 1
psm = 3
bgr_to_rgb = True
grayscaling = False
blurring = True
thresholding = False
rescaling = False
inverting = False
# initializing languages for comparison
# earlier analysis showed that fast tessdata works best on our images
# that is why we used fast traineddata file with lang=deu as base for our training which resulted in lang=cust
langs = ['num+deu+eng','deu+eng']
for l in langs:
custom_oem_psm_config = r'--oem {} --psm {}'.format(oem,psm)
full_config = ' --lang {}'.format(l)
log.info('===' + full_config)
# initializing variables for avg conf value on config level and image counter for nice logs
total_data = 0
total_conf = 0
total_blocks = 0
total_runtime = 0
image_counter_for_log = 0
success_images = 0
for file in listOfFiles:
image_counter_for_log = image_counter_for_log + 1
completePath = os.path.join(directory, file).replace('\\','/')
log.info('Start handling image ' + str(image_counter_for_log) + ' of ' + str(total_images) + ': ' + str(file))
image_status, results_per_block, conf_of_image, blocks_of_image, runtime_ocr, no_of_data, sum_of_conf = handle_image(completePath, file, custom_oem_psm_config, bgr_to_rgb, grayscaling, blurring, thresholding, rescaling, inverting, l)
# handle information per block
try: # iterrows will not work if handle_image runs into exception
for index, row in results_per_block.iterrows():
blockRow = [l, directory_name, file, round(row[0],1), row[1]]
imageBlockRows.append(blockRow)
except:
blockRow = [l, directory_name, file, None, None]
imageBlockRows.append(blockRow)
# handle information per image
imageRow = [l, directory_name, file, blocks_of_image, conf_of_image, runtime_ocr]
imageRows.append(imageRow)
# add additional data lines and conf values in total variables to avg later on config level
if image_status:
total_data += no_of_data
total_blocks += blocks_of_image
total_conf += sum_of_conf
total_runtime += runtime_ocr
success_images += 1
if success_images > 0:
avg_conf = round(total_conf/total_data,1)
avg_runtime = round(total_runtime/success_images,1)
configRow = [l, success_images, total_blocks, avg_conf, avg_runtime]
configRows.append(configRow)
else:
avg_conf = None
avg_runtime = None
configRow = [l, None, None, None, None]
configRows.append(configRow)
log.info('=== End of' + full_config)
log.info('=== Images ' + str(success_images) + ' / Blocks ' + str(total_blocks) + ' / Avg_Conf ' + str(avg_conf) + ' / Avg_Runtime ' + str(avg_runtime))
log.info('')
file_dir = os.path.dirname(os.path.abspath(__file__))
metrics_folder = 'metrics'
metrics_path = os.path.join(file_dir, metrics_folder)
if not os.path.exists(metrics_path):
os.makedirs(metrics_path)
# write block information to csv
# csvfilename = str(timestamp) + '_metrics_per_block' + '.csv'
csvfilename = 'metrics_per_block' + '.csv'
csv_path = os.path.join(metrics_path, csvfilename)
log.info('Write list with block data to file ' + str(csvfilename))
columns = ['LANG', 'DIRECTORY', 'FILE', 'CONF', 'TEXT']
with open(csv_path, 'w', encoding='utf-8', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(columns)
csvwriter.writerows(imageBlockRows)
# write image information to csv
# csvfilename = str(timestamp) + '_metrics_per_image' + '.csv'
csvfilename = 'metrics_per_image' + '.csv'
csv_path = os.path.join(metrics_path, csvfilename)
log.info('Write list with image data to file ' + str(csvfilename))
columns = ['LANG', 'DIRECTORY', 'FILE', 'BLOCKS', 'CONF', 'RUNTIME']
with open(csv_path, 'w', encoding='utf-8', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(columns)
csvwriter.writerows(imageRows)
# write config information to csv
# csvfilename = str(timestamp) + '_metrics_per_image' + '.csv'
csvfilename = 'metrics_per_config' + '.csv'
csv_path = os.path.join(metrics_path, csvfilename)
log.info('Write list with config data to file ' + str(csvfilename))
columns = ['LANG', 'IMAGES', 'BLOCKS', 'CONF', 'RUNTIME']
with open(csv_path, 'w', encoding='utf-8', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(columns)
csvwriter.writerows(configRows)
endtime = '{:%Y-%m-%d-%H%M%S}'.format(datetime.datetime.now())
log.info('All done / ' + 'started ' + str(starttime) + ' / finished ' + str(endtime)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 13 09:51:22 2019
@author: charlie
"""
import socket
import string
import time
# vars specificying server
SERVER = "127.0.0.1"
PORT = 6667
CHANNEL = "#test"
# open socket
IRCSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect():
IRCSocket.connect((SERVER, PORT))
print('Connected to ', SERVER, ':', PORT)
# hardcoded to join test
def join():
IRCSocket.send("JOIN #test\n".encode())
def listen():
while (True):
time.sleep(5)
buffer = IRCSocket.recv(1024)
message = buffer.decode()
if ("MSG" in message):
print("received-message")
print(message)
# send login data (customizable)
def login():
IRCSocket.send("USER bot networksbot server :bot\r\n".encode())
IRCSocket.send("NICK Bot\r\n".encode())
# send_data("NICK " + nickname)
connect()
login()
join()
listen()
|
MENU_ID = 'featurelets'
|
l = list(map(int, input().split(' ')))
l.sort()
a, b, c = l
if a + b <= c:
print("Invalido")
else:
if a == b and b == c:
t = "Valido-Equilatero"
elif a == b or b == c:
t = "Valido-Isoceles"
else:
t = "Valido-Escaleno"
if (a ** 2) + (b ** 2) == c ** 2:
r = 'S'
else:
r = 'N'
print(t)
print("Retangulo:", r) |
from Features import Features
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import tree
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
import logging
import os
import sys
import datetime
class Classifier(object):
def __init__(self):
tests = [("svm", "linear"), ("svm", "poly"), ("svm", "rbf"), ("svm", "sigmoid"), ("svm", "svc"),
("bayes", "gaussian"), ("bayes", "bernoulli"), ("knn", 11), ("knn", 53),
("knn", 101), ("dtree", "gini"), ("dtree", "entropy"), ("rforest", 10), ("rforest", 100),
("rforest", 1000), ("rforest", 10000)
]
# ("bayes", "multinomial"), cannot have negative values
# To use it train the data with data_standardization = False
# ----------------- SETTINGS -----------------
generate_data = False
data_standardization = True
k_values = [10000]
iterations = [10, 20, 50]
attempts = [10, 20, 50]
max_voc_size = [10000, 20000, 50000, 100000]
train_dir = os.path.abspath("C:/git/Logos-Recognition-for-Webshop-Services/logorec/resources/images/train")
test_dir = os.path.abspath("C:/git/Logos-Recognition-for-Webshop-Services/logorec/resources/images/test")
# Starting logging
logging.basicConfig(filename='data.log', filemode='w', level=logging.INFO)
if generate_data:
feature = Features("data", train_dir, os.listdir(train_dir)[0])
logging.warning("Generating data for train and test ...")
# Generate data for each logo types (e.g. MasterCard vs Other, Visa vs Other, etc.)
for k in k_values:
for a in attempts:
for i in iterations:
for s in max_voc_size:
logging.info("Number of cluster (kmeans): " + str(k))
logging.info("Voc Size: " + str(s))
logging.info("Number of iterations: " + str(i))
logging.info("Number of attempts: " + str(a))
start_time = datetime.datetime.now()
feature.train(k, data_standardization, False, s, i, a)
logging.info("Time train: " + str(datetime.datetime.now() - start_time))
logging.warning("Generation ended.")
logging.warning("Start test ...")
for algo in tests:
for k in k_values:
for a in attempts:
for it in iterations:
for s in max_voc_size:
# Confusion matrix quality values initialisation
tpr = 0
tnr = 0
ppv = 0
npv = 0
acc = 0
# Number of total classes (Visa vs Other, MasterCard vs Other, etc.)
classes = 0
# Time initialisation
start_time = datetime.datetime.now()
# Loop for each logo types (e.g. MasterCard vs Other, Visa vs Other, etc.)
for dir_name in os.listdir(train_dir):
# CLASSIFIER
if algo[0] == "svm":
# SVC
if algo[1] == "svc":
classifier = svm.LinearSVC()
else:
classifier = svm.SVC(kernel=algo[1])
if algo[0] == "bayes":
# Bayes
if algo[1] == "multinomial":
classifier = MultinomialNB()
elif algo[1] == "gaussian":
classifier = GaussianNB()
else:
classifier = BernoulliNB()
if algo[0] == "dtree":
# Decision tree
if algo[1] == "gini":
classifier = tree.DecisionTreeClassifier()
else:
classifier = tree.DecisionTreeClassifier(criterion="entropy")
if algo[0] == "knn":
# knn
classifier = KNeighborsClassifier(algo[1])
if algo[0] == "rforest":
# Random Forest
classifier = RandomForestClassifier(algo[1])
# lead train data
feature = Features("data", train_dir, dir_name)
classifier.fit(feature.get_train_histograms(k), feature.images_class)
# Prediction generation
# ----------------- TO PRINT CLASSES NUMBERS USE THIS TWO LINES -----------------
feature = Features("data", test_dir, dir_name)
solutions = classifier.predict(feature.test(k, data_standardization, s, it, a))
# Confusion matrix values initialisation
tp = 0
tn = 0
fn = 0
fp = 0
# Confusion matrix values computation
for i in range(len(solutions)):
if int(solutions[i]) == int(feature.images_class[i]):
if feature.images_class[i] == 1:
tn += 1
else:
tp += 1
else:
if feature.images_class[i] == 1:
fp += 1
else:
fn += 1
# Confusion matrix values computation
try:
# temp values because can happen division by zero
t_tpr = tp / (tp + fn)
t_tnr = tn / (tn + fp)
t_ppv = tp / (tp + fp)
t_npv = tn / (tn + fn)
t_acc = (tp + tn) / (tp + tn + fp + fn)
except ZeroDivisionError:
exc_type, exc_obj, exc_tb = sys.exc_info()
logging.error(
"Division by zero for " + str(algo[0]) + ": " + str(
algo[1]) + " with: " + dir_name + " at line: " + str(exc_tb.tb_lineno))
else:
tpr += t_tpr
tnr += t_tnr
ppv += t_ppv
npv += t_npv
acc += t_acc
classes += 1
# Print information
try:
logging.info("Algorithm: " + str(algo[0]) + ": " + str(algo[1]))
logging.info("Number of cluster (kmeans): " + str(k))
logging.info("Voc size: " + str(s))
logging.info("Number of iterations: " + str(it))
logging.info("Number of attempts: " + str(a))
logging.info("Time: " + str(datetime.datetime.now() - start_time))
logging.info("Sensitivity: %.1f" % ((tpr / classes) * 100))
logging.info("Specificty: %.1f" % ((tnr / classes) * 100))
logging.info("Precision: %.1f" % ((ppv / classes) * 100))
logging.info("Negative Predictive Value: %.1f" % ((npv / classes) * 100))
logging.info("Accuracy: %.1f" % ((acc / classes) * 100))
logging.info("_______________________________")
except ZeroDivisionError:
logging.error("Error")
logging.info("_______________________________")
def main():
classifier = Classifier()
if __name__ == "__main__":
main()
|
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--n-trials',
type=int, default=1,
help='Number of trials'
)
parser.add_argument(
'--n-modules',
type=int, default=1,
help='Number of modules'
)
parser.add_argument(
'--early-stop', default=False,
action='store_true', help='Whether to use a validation set for early stopping'
)
parser.add_argument(
'--dataset', type=str,
default='MNIST',
choices=['MNIST', 'Fashion-MNIST', 'EMNIST', 'CIFAR-10', 'CIFAR-100',
'SVHN', 'STL10'],
help='Dataset to use'
)
parser.add_argument(
'--augment-data', default=False,
action='store_true', help='Whether to augment training data'
)
parser.add_argument(
'--activation', type=str,
default='relu',
choices=['relu', 'sigmoid'],
help='Activation function'
)
parser.add_argument(
'--network-type', type=str,
default='fc',
choices=['fc', 'conv', 'densenet'],
help='Type of neural network'
)
parser.add_argument(
'--hidden-nodes', nargs='+',
type=int, default=[5],
help='List of number of hidden nodes per layer of FC network'
)
parser.add_argument(
'--conv-final-layer', type=str,
default='avg',
choices=['avg', 'fc'],
help='Specifies whether the convolutional part of the network is ' + \
'followed by average pooling or a fully connected layer'
)
parser.add_argument(
'--filters', nargs='+',
type=int, default=[5],
help='List of number of filters per layer of convolutional network'
)
parser.add_argument(
'--kernels', nargs='+',
type=int, default=[3],
help='List of kernel sizes per layer of convolutional network. ' + \
'Must be of length 1 or equal the length of the filters arg'
)
parser.add_argument(
'--strides', nargs='+',
type=int, default=[1],
help='List of strides per layer of convolutional network. ' + \
'Must be of length 1 or equal the length of the filters arg'
)
parser.add_argument(
'--dilations', nargs='+',
type=int, default=[1],
help='List of dilations per layer of convolutional network. ' + \
'Must be of length 1 or equal the length of the filters arg'
)
parser.add_argument(
'--densenet-depth', type=int,
default=100, help='DenseNet depth. Should be 3n+4 for some n > 1'
)
parser.add_argument(
'--densenet-k', type=int,
default=12, help='DenseNet growth rate'
)
parser.add_argument(
'--densenet-reduction', type=float,
default=0.5, help='DenseNet reduction factor. Between 0 and 1.'
)
parser.add_argument(
'--densenet-bottleneck', default=False,
action='store_true', help='Use bottleneck layers in DenseNet'
)
parser.add_argument(
'--lambda-values', nargs='+',
type=float, required=True,
help='List of lambda values to use'
)
parser.add_argument(
'--batch-size', type=int,
default=100, help='Batch size'
)
parser.add_argument(
'--epochs', type=int,
default=1, help='Number of epochs'
)
parser.add_argument(
'--learning-rates', nargs='+',
type=float, default=[0.02],
help='Learning rate for SGD. Single value or one per lambda value'
)
parser.add_argument(
'--learning-rate-decay-milestones', nargs='+',
type=int, default=[],
help='Sorted list of epochs at which to decay learning rate. Default = [], no learning rate decay.'
)
parser.add_argument(
'--learning-rate-decay-factor', type=float,
default='0.1', help='Learning rate decay factor'
)
parser.add_argument(
'--weight-decay', type=float,
default='0.0', help='Weight decay'
)
parser.add_argument(
'--momentum', type=float,
default='0.9', help='Momentum for SGD'
)
parser.add_argument(
'--use-nesterov', default=False,
action='store_true', help='Use Nesterov momentum'
)
parser.add_argument(
'--seed', type=int,
default=1230, help='Seed for Torch'
)
parser.add_argument(
'--cpu', default=False,
action='store_true', help="Don't use CUDA"
)
parser.add_argument(
'--debug', default=False,
action='store_true', help='Print layer output shapes then exit'
)
parser.add_argument(
'--output-directory', type=str,
required=True, help='Directory where results are stored'
)
return parser.parse_args()
|
def LCS (x,y) :
global X,Y
if (x == 0) or (y == 0) :
return 0
else :
if (X[x-1] == Y[y-1]) :
return 1 + LCS (x-1,y-1)
else :
return max(LCS(x,y-1) , LCS(x-1,y))
X = input()
Y = input()
x = X.__len__()
y = Y.__len__()
print(LCS(x,y))
|
for _ in range(int(input())):
tc = input()
print(tc[0].upper()+tc[1:])
|
class Node(object):
def __init__(self, data):
self.data = data
self.next = None
def length(node):
current = node
result = 0
while current is not None:
current = current.next
result += 1
return result
def count(node, data):
current = node
result = 0
while current is not None:
if current.data == data:
result += 1
current = current.next
return result
|
ppl_json_file_path = "C://Users//ericw//CodingProjects//0 - Secrets//ppl//Owner - Eric Sang.json"
|
Checklist01 = [
'2021-08-02 13:14:59.844443',
{'position00': False, 'position01': False, 'position02': False, 'position10': False, 'position11': False,
'position12': False, 'position20': False, 'position21': False, 'position22': False}]
Checklist02 = [
'2021-08-02 13:15:02.508831',
{'position00': False, 'position01': False, 'position10': False, 'position11': False}]
Checklist01 = [
'2021-08-02 13:29:37.789477',
{'position00': False, 'position01': False, 'position02': False, 'position10': False, 'position11': False,
'position12': False, 'position20': False, 'position21': False, 'position22': False}]
Checklist01 = [
'2021-08-02 13:29:38.404153',
{'position00': False, 'position01': False, 'position02': False, 'position10': False, 'position11': False,
'position12': False, 'position20': False, 'position21': False, 'position22': False}]
Checklist02 = [
'2021-08-02 13:29:48.185322',
{'position00': False, 'position01': False, 'position10': False, 'position11': False}]
Checklist02 = [
'2021-08-02 13:29:49.274467',
{'position00': False, 'position01': False, 'position10': False, 'position11': False}]
Checklist01 = [
'2021-08-02 16:21:48.919663',
{'position00': True, 'position01': True, 'position02': True, 'position10': True, 'position11': True,
'position12': True, 'position20': True, 'position21': True, 'position22': True}]
Checklist01 = [
'2021-08-02 17:08:23.218046',
{'aviv': True, 'daniel': True, 'bobo': True, 'koko': True, 'shishi': True, 'haya': True}]
Checklist01 = [
'2021-08-02 17:09:08.770968',
{'aviv': True, 'daniel': True, 'bobo': True, 'koko': True, 'shishi': True, 'haya': True}]
Checklist01 = [
'2021-08-02 17:09:10.053006',
{'aviv': False, 'daniel': False, 'bobo': False, 'koko': False, 'shishi': False, 'haya': False}]
Checklist01 = [
'2021-08-02 17:34:01.170533',
{'aviv': True, 'daniel': True, 'bobo': True, 'koko': True, 'shishi': True, 'haya': True, 'hello': True,
'this': True, 'is': True, 'cool': True, 1: 'daniel', 2: 'daniel'}]
Checklist01 = [
'2021-08-02 17:35:10.243821',
{'aviv': True, 'daniel': True, 'bobo': True, 'koko': True, 'shishi': True, 'haya': True, 'hello': True,
'this': True, 'is': True, 'cool': True, 1: 'shishi', 2: 'bobo'}]
Checklist02 = [
'2021-08-04 12:54:44.186900',
{'position00': True, 'position01': True, 'position10': True, 'position11': True}]
Checklist01 = [
'2021-08-08 04:31:46.869688',
{'aviv': False, 'daniel': False, 'bobo': False, 'koko': False, 'shishi': False, 'haya': False, 'hello': False, 'this': False, 'is': False, 'cool': False, 1: '', 2: ''}]
|
from room import Room
word_part_list1 = ["動詞編", "名詞編", "形容詞編", "副詞・その他", "すべて"]
word_part_list2 = ["動詞編", "名詞編", "形容詞編", "副詞", "すべて"]
word_part_list3 = ["動詞編", "名詞編", "形容詞編", "すべて"]
def generate_rooms():
game_stages = [Part(1, 9), Part(2, 8), Part(3, 4)]
return game_stages
class Part:
def __init__(self, number, section_num):
self.number = number
self.sections = [Section(i + 1, str(number) + ",") for i in range(section_num)]
class Section:
def __init__(self, number, address):
self.number = number
if address == "1," and (number <= 2 or number == 4):
self.word_parts = [SectionPart(word_part_list1[i], address + str(number) + ",") for i in
range(len(word_part_list1))]
elif address == "1," and number == 3:
self.word_parts = [SectionPart(word_part_list2[i], address + str(number) + ",") for i in
range(len(word_part_list2))]
else:
self.word_parts = [SectionPart(word_part_list3[i], address + str(number) + ",") for i in
range(len(word_part_list3))]
class SectionPart:
def __init__(self, part_name, address):
self.part_name = part_name
self.address = address + part_name + ","
self.rooms = []
def create_room(self, name, max_num):
room = Room(name, self.address, max_num)
self.rooms.append(room)
return room
|
def calc(s):
num = ''.join(str(ord(a)) for a in s)
num2 = num.replace('7', '1')
return abs(sum(int(b) for b in num) - sum(int(c) for c in num2))
|
# caller.py
import receiver
print("caller_haha")
print(__name__)
def test():
print("caller_test can be called!")
def caller_print():
print("I'm caller.py")
if __name__ == '__main__':
caller_print()
test() |
import sys
import numpy as np
import astropy.modeling.fitting
from matplotlib import pyplot as plt
import seaborn as sns
from scipy.interpolate import interp1d
import equation6
import conic_parameters
import theta_ratio_fit
sys.path.append('../conic-projection')
from conproj_utils import Conic
XI_LIST = [None, 1.0, 0.8, 0.4]
BETA_LIST = [0.1, 0.01, 0.0001]
nxi, nbeta = len(XI_LIST), len(BETA_LIST)
ntheta = 100
theta = np.linspace(0.0, np.pi, ntheta)
figfilename = sys.argv[0].replace('.py', '.pdf')
sns.set_style('whitegrid')
sns.set_color_codes('dark')
NROWS = 2
fig, axes = plt.subplots(nxi, nbeta, sharex=True, sharey=True)
xmin, xmax = -5.0, 2.1
ymin, ymax = -0.1, 7.0
# xmin, xmax = -7.0, 4.1
# ymin, ymax = -0.1, 11.0
ytop = ymin + 0.98*(ymax - ymin)
xright = xmin + 0.98*(xmax - xmin)
whitebox = {'edgecolor': 'none', 'facecolor': 'white',
'alpha': 0.7, 'boxstyle': 'round,pad=0.1'}
# x-data for tail asymptote
xa = np.linspace(xmin, xmax, 2)
# Set up fitter for fitting the tail
fit = astropy.modeling.fitting.LevMarLSQFitter()
for j, xi in enumerate(XI_LIST):
for i, beta in enumerate(BETA_LIST[::-1]):
ax = axes[j, i]
# The exact solution to the shell
if xi is None:
shell = equation6.Shell(innertype='isotropic', beta=beta)
else:
shell = equation6.Shell(innertype='anisotropic', beta=beta, xi=xi)
R, theta1 = shell.radius(theta, full=True)
ratio = theta1/theta
R_crw = R/shell.R0
x_crw = R_crw*np.cos(theta)
y_crw = R_crw*np.sin(theta)
# Fit to head and analytic fit to fit to tail
ht = conic_parameters.HeadTail(beta, xi=xi,
xmin=0.0, method='analytic fit')
# And calculate Cartesian arrays for the shapes
x_head = ht.x_head(ht.t_h)
y_head = ht.y_head(ht.t_h)
x_tail = ht.x_tail(ht.t_t)
y_tail = ht.y_tail(ht.t_t)
# Work in the ratios theta_1 / theta ...
# ... first for the head ...
theta_head = np.arctan2(y_head, x_head)
theta1_head = np.arctan2(y_head, ht.D - x_head)
ratio_head_func = interp1d(theta_head, theta1_head/theta_head,
fill_value='extrapolate')
ratio_head = ratio_head_func(theta)
# ... and then for the tail
model = theta_ratio_fit.hyperbola_ratio(ht.a_t, x0=ht.x0_t,
tau=np.tan(ht.theta_t),
D=ht.D)
# Fractional residual in theta_1 is same as fractional
# residual in the ratio
resid_head = (ratio_head - ratio)/ratio
resid_tail = (model(theta) - ratio)/ratio
# Heuristic to find the switc-over point between the head and the tail
m = (theta < 0.2) | (resid_head**2 < resid_tail**2)
thm = np.min(theta[~m])
m[theta > thm] = False
ax.plot(np.degrees(theta[~m][:-1]), resid_tail[~m][:-1],
c='r', label='Tail fit')
ax.plot(np.degrees(theta[:-1]), resid_tail[:-1],
c='r', lw=1, ls=':', label=None)
ax.plot(np.degrees(theta[m]), resid_head[m],
c='g', label='Head fit')
ax.plot(np.degrees(theta), resid_head,
c='g', lw=1, ls=':', label=None)
ax.axvline(np.degrees(shell.th_infty), lw=0.5,
c='k', ls='--')
if xi is None:
text = r'Isotropic'
else:
text = r'Anisotropic, $\xi = {:.1f}$'.format(xi)
text += '\n' + r'$\beta = {:.4f}$'.format(beta)
ax.text(0.02, 0.98, text,
ha='left', va='top', transform=ax.transAxes,
bbox=whitebox, fontsize='small')
# Put legend on upper left panel only
axes[0, 0].legend(fontsize='x-small', frameon=True)
# Put axis labels on lower left panel only
axes[-1, 0].set(
xlim=[0.0, 180.0], ylim=[-0.25, 0.25],
xticks=[0, 30, 60, 90, 120, 150, 180],
yticks=[-0.2, -0.1, 0.0, 0.1, 0.2],
xlabel=r'$\theta$, degrees', ylabel=r'$\Delta \theta_1 / \theta_{1}$',
)
fig.set_size_inches(2*nbeta, 1.5*nxi)
fig.tight_layout()
fig.savefig(figfilename)
print(figfilename, end='')
|
# demo02_dtype.py numpy的数据类型
import numpy as np
data=[('zs', [90, 80, 85], 15),
('ls', [92, 81, 83], 16),
('ww', [95, 85, 95], 15)]
# 创建ndarray时,指定dtype
ary = np.array(data, dtype='U2, 3int32, int32')
print(ary[0])
print(ary['f0'])
# 第二种设置dtype的方式
ary = np.array(data, dtype=[('name', 'str', 2),
('scores', 'int32', 3),
('age', 'int32', 1)])
print(ary)
print(ary['age'])
# 第三种设置dtype的方式
ary = np.array(data, dtype={
'names':['name', 'scores', 'age'],
'formats':['U2', '3int32', 'float64']
})
print(ary[1]['age'])
print(ary['age'].mean()) # ndarray.mean() 返回平均值
# 测试日期数据
f = np.array(['2011', '2012-01-01',
'2013-01-01 01:01:01','2011-02-01'])
dates = f.astype('M8[D]')
print(dates, dates.dtype)
d = dates[0] - dates[-1]
print(d, type(d)) |
def read_file(file, Dict):
while True:
string = file.readline()
if not string: break
sub_strings = string.split()
Dict[sub_strings[0]] = int(sub_strings[1])
stock_dict = {}
f1 = open('stock.txt', 'r')
read_file(f1, stock_dict)
f1.close()
input_file = input('보유 주식 파일을 입력하시오 : ')
f2 = open(input_file, 'r')
myDict = {}
read_file(f2, myDict)
f2.close()
for i in myDict.keys():
myDict[i] = myDict[i] * stock_dict[i]
total = 0
for i in myDict.values():
total += i
Sorted_dict = sorted(myDict, key = lambda k : myDict[k], reverse = True)
print('총 보유금액 : ', total)
print('보유금액 순 종목명 : ',Sorted_dict)
|
from aws_cdk import (
aws_ec2 as ec2,
aws_ecr as ecr,
aws_codecommit as codecommit,
core
)
class DevTools(core.Construct):
@property
def code_repo(self):
return self._code_repo
@property
def ecr_repo(self):
return self._ecr_repo
def __init__(self, scope: core.Construct, id: str, **kwargs):
super().__init__(scope, id, **kwargs)
### CodeCommit - code repo
self._code_repo = codecommit.Repository(
self, "Repository",
repository_name="flask-app",
description="CodeCommit repo for the workshop")
### ECR - docker repo
self._ecr_repo = ecr.Repository(
self, "ECR",
repository_name="flask-app",
removal_policy=core.RemovalPolicy.DESTROY
) |
'''
Chapter 3, Exercise 9
Input: pocket_number, int, 0 -36
Process: Determine what color the roulette pocket number is, by using boolean
and logical operators to determine if the pocket number is odd.
The color may be either black or red
Output: Color of the roulette pocket number entered, print(string)
('The color of the roulette pocket is red', for example)
'''
# Get the user input (roulette pocket number)
pocket_number = int(input("Enter a pocket number:"))
if pocket_number == 0:
print("The color of the pocket is green.")
elif (pocket_number >= 1 and pocket_number <= 10) or (pocket_number >= 19 and pocket_number <= 28):
if pocket_number % 2 == 0:
print("The color of the pocket is black.")
else:
print("The color of the pocket is red.")
elif (pocket_number >= 11 and pocket_number <= 18) or (pocket_number >= 29 and pocket_number <= 36):
if pocket_number % 2 == 0:
print("The color of the pocket is red.")
else:
print("The color of the pocket is black.")
else:
print("Please enter a number, 0 - 36") |
# create and define variables
"""name = input("What is your name?: ")
country = input("What country are you from?: ")
age = int(input("How old are you?: "))
hourly_wage = int(input("What is your hourly wage?: "))
satisfied = input("Are you satisfied?: ")
daily_wage = hourly_wage * 8"""
# print variables
#print("My name is: " + name)
#print("I am from " + country)
#print("I am " + str(age) + " years old")
#print("I make $" + str(hourly_wage) + " per hour")
#print("I am satisfied: " + satisfied)
#print("I make $" + str(daily_wage) + " a day")
# conditionals
"""
x = 1
if (x == 1):
print("is 1")
else:
print("no")
"""
# list
numbers = [1,2,3,4,5]
# set - all values are different
s = {1,2,3}
# tuple - can't change once set
t = (1,2,3)
# loops
for num in numbers:
print(num) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import gzip
import sys
import os
import psycopg2
import time
class Main():
def __init__(self):
print("실행할 메인 클래스")
file_list = self.get_file_dir()
print(file_list)
self.start = time.time()
#self.write_file_to_db(file_list[0])
for f in file_list:
self.write_file_to_db(f)
def get_file_dir(self):
print("gz파일가져오기")
#print(os.getcwd()) #현재 폴더경로 가져오기
#print(os.listdir(os.getcwd())) #현재 폴더 파일리스트가져오기
#print(os.listdir(os.getcwd()))
tmp_dir = os.getcwd()
self.stock_data_dir =tmp_dir+"\stock_data"
#print(tmp_dir)
os.chdir(self.stock_data_dir) #작업디렉토리변경
#print(os.getcwd()) # 현재 폴더경로 가져오기
#print(os.listdir(os.getcwd()))
return os.listdir(os.getcwd())
def write_file_to_db(self,file_name):
tmp = self.stock_data_dir+"\\"+file_name
print("gz파일읽기 %s" % tmp)
file_data = []
try:
with gzip.open(tmp, 'rb') as f:
cnt=0;
for line in f:
#첫번째라인은 건너 띄어야한다.
if cnt==0:
cnt+=1
continue
row_data=[]
tmp=line.decode('utf-8') #라인별로 파일을 가져옴
arr_tmp=tmp.split(",");
Code= arr_tmp[0].strip()[1:-1] #주식코드 #양옆에 " 이거 하나씩 제거
Name= arr_tmp[1].strip()[1:-1] #주식명
#print("aaaa") #종가 1997 코드 015545 종목명 핵심텔레텍(1우) 종가금액이 ""이다.
if arr_tmp[2].strip().replace('""',''): #비어있지 않아야만 통과다!!
Close= float(arr_tmp[2]) #종가
Close= int(Close)
else:
Close= 0
Changes= int(arr_tmp[3]) #전일대비
ChagesRatio = float(arr_tmp[4]) # 전일비
Volume = int(arr_tmp[5]) # 거래량
Amount = int(arr_tmp[6]) # 거래대금
Open = int(arr_tmp[7]) # 시가
High = int(arr_tmp[8]) # 고가
Low = int(arr_tmp[9]) # 저가
Marcap = int(arr_tmp[10]) # 시가총액(백만원)
MarcapRatio = arr_tmp[11].strip().replace('"','')
if MarcapRatio:
MarcapRatio = float(MarcapRatio) # 시가총액비중(%)
else:
MarcapRatio= 0
Stocks = int(arr_tmp[12]) # 상장주식수
ForeignShares = arr_tmp[13].strip().replace('"',"") # 외국인 보유주식수
if ForeignShares:
ForeignShares = float(ForeignShares) # 외국인 보유주식수
ForeignShares = int(ForeignShares)
else:
ForeignShares= 0
ForeignRatio = arr_tmp[14].strip().replace('"',"") # 외국인 지분율(%)
if ForeignRatio:
ForeignRatio = float(ForeignRatio) # 외국인 지분율(%)
else:
ForeignRatio = 0
Rank = float(arr_tmp[15]) # 시가총액 순위 (당일)
Rank = int(Rank)
Date = arr_tmp[16].strip()[1:-1].replace("-","") # 날짜 (DatetimeIndex)
#print(Date)
row_data.append(Code)
row_data.append(Name)
row_data.append(Close)
row_data.append(Changes)
row_data.append(ChagesRatio)
row_data.append(Volume)
row_data.append(Amount)
row_data.append(Open)
row_data.append(High)
row_data.append(Low)
row_data.append(Marcap)
row_data.append(MarcapRatio)
row_data.append(Stocks)
row_data.append(ForeignShares)
row_data.append(ForeignRatio)
row_data.append(Rank)
row_data.append(Date)
cnt+=1
# if cnt==10:
# break
file_data.append(row_data)
print(len(file_data))
total_cnt=len(file_data)
except (Exception, psycopg2.DatabaseError) as error:
print(cnt)
print(len(file_data))
print(file_data[len(file_data)-1])
print(error)
raise error
conn_str ="host='localhost' dbname='stockweb' user='postgres' password='pwd'"
conn = psycopg2.connect(conn_str)
print(conn)
cur = conn.cursor()
print(cur)
cnt=0;
try:
for d in file_data:
cur.execute('CALL insert_tb_marcap_stock('
'%s' #[0]stock_cd
',%s' #[1]stock_nm
',%s' #[2]cls_amt
',%s' #[3]changes_amt
',%s' #[4]p_changes_rt
',%s' #[5]p_trade_qty
',%s' #[6]p_trade_amt
',%s' #[7]p_start_amt
',%s' #[8]p_high_amt
',%s' #[9]p_low_amt
',%s' #[10]p_total_mrkt_amt
',%s' #[11]p_total_mrkt_amt_rt
',%s' #[12]p_stock_cnt
',%s' #[13]p_frgn_cnt
',%s' #[14]p_frgn_rt
',%s' #[15]p_rnk
',%s' #[16]stock_dt
')'
'', (
d[0],
d[1],
d[2], #p_cls_amt
d[3], #p_changes_amt
d[4], #p_changes_rt
d[5], #p_trade_qty
d[6], #p_trade_amt
d[7], #p_start_amt
d[8], #p_high_amt
d[9], #p_low_amt
d[10], #p_total_mrkt_amt
d[11], #p_total_mrkt_amt_rt
d[12], #p_stock_cnt
d[13], #p_frgn_cnt
d[14], #p_frgn_rt
d[15], #p_rnk
d[16]
)
)
conn.commit()
cnt+=1
if (cnt % 10000) == 0:
print("[%s]진행율 %s / %s 진행되고 있습니다(진행시간: %s)." % (file_name,cnt, total_cnt,(time.time()-self.start)))
elif total_cnt<=(cnt+100):
print("[%s]진행율 %s / %s 진행되고 있습니다(진행시간: %s)." % (file_name,cnt, total_cnt,(time.time()-self.start)))
except (Exception, psycopg2.DatabaseError) as error:
print(file_data[cnt-1])
print(error)
cur.close()
conn.close()
raise error
finally:
if conn is not None:
conn.close()
cur.close()
conn.close()
if __name__== "__main__":
Main()
# pip install psycopg2 # postgresql 연결 library
|
# Linux example for external capture trigger on econsystems FSCAM_CU135
# By Taylor Alexander, MIT License. Please enjoy, expand, and share.
# Run as root or add a udev rule to give user appropriate rights.
# Note: Hook the hardware trigger input up to an arduino or other device
# with a pin toggling on and off at 10hz (for example).
# Connect a webcam viewer such as "cheese" to view the camera, and then
# run this program in a terminal. You will see the webcam stream slow to 10hz.
import hid # pip3 install hidapi
import time
CAMERA_CONTROL_FSCAM_CU135 = 0x95
GET_LED_CONTROL_FSCAM_CU135 = 0x26
BUFFER_LENGTH = 65
SET_FAIL = 0x00
SET_SUCCESS = 0x01
GET_FAIL = 0x00
GET_SUCCESS = 0x01
SET_FLICKER_DETECTION_FSCAM_CU135 = 0x29
GET_FLICKER_DETECTION_FSCAM_CU135 = 0x28
ENABLE_LED_CONTROL_FSCAM_CU135 = 0x01
DISABLE_LED_CONTROL_FSCAM_CU135 = 0x00
ENABLE_POWERON_CONTROL_FSCAM_CU135 = 0x01
DISABLE_POWERON_CONTROL_FSCAM_CU135 = 0x00
ENABLE_STREAMING_CONTROL_FSCAM_CU135 = 0x01
DISABLE_STREAMING_CONTROL_FSCAM_CU135 = 0x00
ENABLE_TRIGGERACK_CONTROL_FSCAM_CU135 = 0x01
DISABLE_TRIGGERACK_CONTROL_FSCAM_CU135 = 0x00
CAMERA_CONTROL_FSCAM_CU135 = 0x95
SET_SPECIAL_EFFECT_MODE_FSCAM_CU135 = 0x04
GET_SPECIAL_EFFECT_MODE_FSCAM_CU135 = 0x03
SET_SCENE_MODE_FSCAM_CU135 = 0x02
GET_SCENE_MODE_FSCAM_CU135 = 0x01
SET_DENOISE_CONTROL_FSCAM_CU135 = 0x06
GET_DENOISE_CONTROL_FSCAM_CU135 = 0x05
GET_Q_FACTOR_FSCAM_CU135 = 0x0B
SET_Q_FACTOR_FSCAM_CU135 = 0x0C
SET_HDR_MODE_FSCAM_CU135 = 0x0A
GET_HDR_MODE_FSCAM_CU135 = 0x09
SET_STREAM_MODE_FSCAM_CU135 = 0x0E
GET_STREAM_MODE_FSCAM_CU135 = 0x0D
GET_LED_CONTROL_FSCAM_CU135 = 0x26
SET_LED_CONTROL_FSCAM_CU135 = 0x27
SET_ORIENTATION_FSCAM_CU135 = 0x11
GET_ORIENTATION_FSCAM_CU135 = 0x10
STREAM_MASTER_CONTINUOUS = 0x00
STREAM_MASTER_ONDEMAND = 0x01
STREAM_SOFTWARE_TRIGGER = 0x02
STREAM_HARDWARE_TRIGGER = 0x03
GRAB_PREVIEW_FRAME = 0x1A
GRAB_STILL_FRAME = 0x1E
STORE_FRAME = 0x1B
QUERY_NEXT_FRAME = 0x01
STORE_PREV_FRAME = 0x01
STORE_STILL_FRAME = 0x02
# Example snippett from https://www.ontrak.net/pythonhidapi.htm
VENDOR_ID = 0x2560
PRODUCT_ID = 0xC1D4
# print(hid.enumerate(VENDOR_ID, PRODUCT_ID))
# enumerate USB devices
def run_hid_process():
paths = []
for d in hid.enumerate(VENDOR_ID, PRODUCT_ID):
if int(d['interface_number']) == 2:
paths.append(d['path'])
print(d['path'])
# keys = list(d.keys())
# keys.sort()
# print(d['path'])
# for key in keys:
# print("%s : %s" % (key, d[key]))
# print()
#
# device = hid.device()
# # device.open(VENDOR_ID, PRODUCT_ID)
#
#
# print(dir(device))
# print(device)
#
# device.close()
#
# import sys
# sys.exit()
devices = [hid.device(), hid.device()]
# devices = [hid.device()]
devices[0].open_path(paths[0])
devices[1].open_path(paths[1])
for device in devices:
# device = hid.device()
# device.open(VENDOR_ID, PRODUCT_ID)
print('Connected to ecam {}\n'.format(PRODUCT_ID))
timeout = 0
# First just read LED Control status, as a test.
g_out_packet_buf = [0, 0]
g_out_packet_buf[0] = CAMERA_CONTROL_FSCAM_CU135
g_out_packet_buf[1] = GET_LED_CONTROL_FSCAM_CU135
device.write(g_out_packet_buf)
time.sleep(0.5)
data = device.read(BUFFER_LENGTH, timeout)
print(data)
# if data[6]==GET_SUCCESS:
if data[0] == CAMERA_CONTROL_FSCAM_CU135 and data[1]==GET_LED_CONTROL_FSCAM_CU135 and data[6]==GET_SUCCESS:
ledstatus=data[2]
powerctl=data[3]
stream=data[4]
trigger=data[5]
print("ledstatus {}, powerctl {}, stream {}, trigger {}".format(ledstatus, powerctl, stream, trigger))
else:
print("GET_FAILED")
# Now set LED control to indicate when hardware trigger has activated.
g_out_packet_buf = [0, 0, 0, 0, 0, 0]
g_out_packet_buf[0] = CAMERA_CONTROL_FSCAM_CU135 # /* set camera control code */
g_out_packet_buf[1] = SET_LED_CONTROL_FSCAM_CU135 # /* set led control code */
g_out_packet_buf[2] = ENABLE_LED_CONTROL_FSCAM_CU135
g_out_packet_buf[3] = DISABLE_STREAMING_CONTROL_FSCAM_CU135
g_out_packet_buf[4] = ENABLE_TRIGGERACK_CONTROL_FSCAM_CU135
g_out_packet_buf[5] = DISABLE_POWERON_CONTROL_FSCAM_CU135
device.write(g_out_packet_buf)
time.sleep(0.5)
data = device.read(BUFFER_LENGTH, timeout)
#print(data)
# Finally set trigger control.
g_out_packet_buf = [0, 0, 0, 0]
g_out_packet_buf[1] = CAMERA_CONTROL_FSCAM_CU135 # /* set camera control code */
g_out_packet_buf[2] = SET_STREAM_MODE_FSCAM_CU135 # /* set stream mode code */
g_out_packet_buf[3] = STREAM_HARDWARE_TRIGGER # /* actual stream mode */
# g_out_packet_buf[3] = STREAM_MASTER_CONTINUOUS # NOTE: Uncomment this to select auto trigger.
device.write(g_out_packet_buf)
time.sleep(2)
data = device.read(BUFFER_LENGTH, timeout)
if data[0] == CAMERA_CONTROL_FSCAM_CU135 and data[1]==SET_STREAM_MODE_FSCAM_CU135 and data[6]==SET_SUCCESS:
print("SUCCESS")
else:
print("FAILED")
time.sleep(2)
print("RUNNING HID FRAME GRAB")
while True:
sample_time = time.time()
for device in devices:
# In hardware trigger mode we must continually request the next frame.
g_out_packet_buf[1] = CAMERA_CONTROL_FSCAM_CU135 # // camera control id
g_out_packet_buf[2] = GRAB_PREVIEW_FRAME # // query frame
g_out_packet_buf[3] = QUERY_NEXT_FRAME # // query next frame - 0x01 , query prev frame - 0x02
device.write(g_out_packet_buf)
time.sleep(0.001)
# data = device.read(BUFFER_LENGTH, timeout)
# if data[6] == GET_SUCCESS and device == devices[0]:
# print(time.time() - sample_time)
# sample_time = time.time()
#print(time.time())
time.sleep(0.001)
if __name__ == "__main__":
run_hid_process()
|
from math import sqrt
limit = 10000000
t = [n*(n+1)/2 for n in xrange(1, limit)]
p = set(n*(3*n-1)/2 for n in xrange(1, limit))
h = set(n*(2*n-1) for n in xrange(1, limit))
isti = [x for x in t if x in p and x in h]
print (-1 + sqrt(1+8*isti[2]))/2
|
c=eval(input("Enter a celsius"))
fah=(c*18/10)+32
print(fah)
|
import pickle
import time
from .rcversion import VersionList, VERSION_FILE
from .path import relative
class VimrcVersionController:
def __init__(self, vimrc_path :str):
self.__vimrc_path = vimrc_path
def __try_load_version_list(self) -> VersionList:
try:
return pickle.load(
open(relative(VERSION_FILE), 'rb'))
except FileNotFoundError:
return None
def __update_version_list(self, version_list :VersionList):
pickle.dump(version_list,
open(relative(VERSION_FILE), 'wb'))
def new_version(self, name :str) -> int:
vlist = self.__try_load_version_list()
if vlist is None:
vlist = VersionList()
vlist.new_version(name, open(self.__vimrc_path).read())
self.__update_version_list(vlist)
print('[A] new version \'%s\'' % name)
return 0
def change_version(self, name :str) -> int:
vlist = self.__try_load_version_list()
if vlist is None:
print('[E] version \'%s\' not found! (version file not found)' % name)
return 1
target = vlist.get_version(name)
if target is None:
print('[E] version \'%s\' not found!' % name)
return 1
with open(self.__vimrc_path, 'w') as f:
f.write(target.content)
print('[A] successfully change vimrc version to \'%s\'' % name)
return 0
def remove_version(self, name :str):
vlist = self.__try_load_version_list()
if vlist is None:
print('[E] version file not found!')
return 1
b = vlist.remove_version(name)
if b :
print('[A] version \'%s\' deleted' % name)
self.__update_version_list(vlist)
else:
print('[E] version \'%s\' not found' % name)
return 1
return 0
def list_version(self):
vlist = self.__try_load_version_list()
if vlist is None:
print('[e] version file not found!')
return 1
print('list all version:')
for b in vlist:
print('\t%s' % b)
def now_vimrc_version(self):
vlist = self.__try_load_version_list()
if vlist is None:
print('[e] version file not found!')
return 1
with open(self.__vimrc_path) as f:
now_vimrc_content = f.read()
not_matched = True
for b in vlist:
if b.content == now_vimrc_content:
not_matched = False
print('now vimrc version match:')
print('\t%s' % b)
if not_matched:
print('[E] no version match.')
return 1
return 0
|
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import pickle
import os
df = pd.read_csv ("titanic.csv")
def pre_process (df):
def get_title(name):
if '.' in name:
return name.split(',')[1].split(',')[0].strip()
else:
return 'Unknown'
def title_map (title):
if title in ['Mr']:
return 1
elif title in ['Master']:
return 3
elif title in ['Miss','Ms','Mlle']:
return 4
elif title in ['Mme','Mrs']:
return 5
else:
return 2
df['title'] = df['Name'].apply(get_title).apply(title_map)
df = df.drop(['PassengerId','Name','Ticket'], axis=1)
df['Sex'] = df['Sex'].replace(['male','female'], [0,1])
df['Cabin'] = df['Cabin'].isna()
df = pd.get_dummies(df)
df['Age'][df["Age"].isna()] = df['Age'].mean()
mf=df['Fare'].mean()
df['Fare']=df['Fare']>mf
df['Fare']=df['Fare'].astype(int)
return df
def training (df):
df = pre_process(df)
y=df['Survived']
X = df.drop('Survived', axis=1)
dummyRow = pd.DataFrame(np.zeros(len(X.columns)).reshape(1,len(X.columns)), columns=X.columns) #creates this for test prediction.
dummyRow.to_csv("dummyRow.csv", index=False)
model = LogisticRegression()
model.fit(X,y)
pickle_file = "pickle_model.pkl"
with open (pickle_file, 'wb') as file: #wb - write/open file in binary mode.
pickle.dump(model,file)
print (model.score(X,y))
yp = model.predict(X)
print ("Sur",sum(yp!=0))
print ("Not Sur", sum(yp==0))
print(confusion_matrix(y, yp))
def pred(ob):
d1 = ob.to_dict() #convert object into dictionary and create a df.
df = pd.DataFrame(d1, index=[0])
df.drop("Survived", axis=1, inplace=True) #Dropping target feature before pre-processing.
df = pre_process(df)
dummyrow_filename = 'dummyRow.csv'
dummyrow_filename = os.path.dirname(__file__)+"/" + dummyrow_filename #dummyRow is used for the dummified columns (Embarked)
df2 = pd.read_csv(dummyrow_filename) #dummyRow is all the columns during training with 1 row of 0 values.
for c1 in df.columns: #Add each column from df to df2.
df2[c1]=df[c1]
#Load the pickled model.
pickle_filename = "pickle_model.pkl"
pickle_filename=os.path.dirname(__file__)+"/"+pickle_filename #__file__ is current file (TiML2). Need to specify the os path.
with open (pickle_filename, 'rb') as file:
model = pickle.load(file)
pred = model.predict(df2)
return pred
if __name__ == "__main__":
df = pd.read_csv("titanic.csv")
training(df)
#print ("Python - This will run if the file is run as a main, and not a support file")
|
class Solution:
def interchangeableRectangles(self, rectangles: List[List[int]]) -> int:
map = {}
for it in rectangles:
key = it[0] / it[1]
value = map.get(key)
if value is None:
value = 0
map[key] = value + 1
count = 0
for v in map.values():
count += v * (v - 1) / 2
print(map)
return int(count) |
import logging
from datetime import datetime
import os
import pytest
import sys
from pydriller import Repository, Git
from pydriller.repository import MalformedUrl
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO)
@pytest.fixture
def repo(request):
return list(Repository(path_to_repo=request.param).traverse_commits())
@pytest.fixture
def repo_to(request):
path, to = request.param
return list(Repository(path_to_repo=path, to=to).traverse_commits())
@pytest.fixture()
def git_repo(request):
gr = Git(request.param)
yield gr
gr.clear()
# It should fail when no URLs are specified
def test_no_url():
with pytest.raises(Exception):
list(Repository().traverse_commits())
# It should fail when URL is not a string or a List
def test_badly_formatted_repo_url():
with pytest.raises(Exception):
list(Repository(path_to_repo=set('repo')).traverse_commits())
# It should fail when URL is malformed
def test_malformed_url():
with pytest.raises(MalformedUrl):
list(Repository("https://badurl.git/").traverse_commits())
@pytest.mark.parametrize('repo,expected', [
("test-repos/small_repo", 5)
], indirect=['repo'])
def test_simple_url(repo, expected):
assert len(repo) == expected
@pytest.mark.parametrize('repo,expected', [
((["test-repos/small_repo", "test-repos/branches_merged"]), 9)
], indirect=['repo'])
def test_two_local_urls(repo, expected):
assert len(repo) == expected
@pytest.mark.parametrize('repo_to,expected', [
(("https://github.com/ishepard/pydriller.git", datetime(2018, 10, 20)), 159)
], indirect=['repo_to'])
def test_simple_remote_url(repo_to, expected):
assert len(repo_to) == expected
@pytest.mark.parametrize('repo_to,expected', [
((["https://github.com/mauricioaniche/repodriller.git",
"https://github.com/ishepard/pydriller"], datetime(2018, 10, 20)),
518)
], indirect=['repo_to'])
def test_two_remote_urls(repo_to, expected):
assert len(repo_to) == expected
@pytest.mark.parametrize('repo,expected', [
((["test-repos/small_repo", "test-repos/small_repo"]), 10)
], indirect=['repo'])
def test_2_identical_local_urls(repo, expected):
assert len(repo) == expected
@pytest.mark.parametrize('repo_to,expected', [
((["test-repos/small_repo", "https://github.com/ishepard/pydriller.git"],
datetime(2018, 10, 20)),
164)
], indirect=['repo_to'])
def test_both_local_and_remote_urls(repo_to, expected):
assert len(repo_to) == expected
@pytest.mark.parametrize('repo_to,expected', [
((["test-repos/small_repo", "https://github.com/mauricioaniche/repodriller.git",
"test-repos/branches_merged", "https://github.com/ishepard/pydriller.git"],
datetime(2018, 10, 20)),
527)
], indirect=['repo_to'])
def test_both_local_and_remote_urls_list(repo_to, expected):
assert len(repo_to) == expected
def test_badly_formatted_url():
with pytest.raises(Exception):
list(Repository(
path_to_repo='https://github.com/ishepard.git/test')
.traverse_commits())
with pytest.raises(Exception):
list(Repository(path_to_repo='test').traverse_commits())
@pytest.mark.parametrize('git_repo', ["test-repos/histogram"], indirect=True)
def test_diff_without_histogram(git_repo):
# without histogram
commit = list(Repository('test-repos/histogram',
single="93df8676e6fab70d9677e94fd0f6b17db095e890").traverse_commits())[0]
diff = commit.modified_files[0].diff_parsed
assert len(diff['added']) == 11
assert (3, ' if (path == null)') in diff['added']
assert (5, ' log.error("Icon path is null");') in diff['added']
assert (6, ' return null;') in diff['added']
assert (8, '') in diff['added']
assert (9, ' java.net.URL imgURL = GuiImporter.class.getResource(path);') in diff['added']
assert (10, '') in diff['added']
assert (11, ' if (imgURL == null)') in diff['added']
assert (12, ' {') in diff['added']
assert (14, ' return null;') in diff['added']
assert (16, ' else') in diff['added']
assert (17, ' return new ImageIcon(imgURL);') in diff['added']
assert len(diff['deleted']) == 7
assert (3, ' java.net.URL imgURL = GuiImporter.class.getResource(path);') in diff['deleted']
assert (4, '') in diff['deleted']
assert (5, ' if (imgURL != null)') in diff['deleted']
assert (7, ' return new ImageIcon(imgURL);') in diff['deleted']
assert (9, ' else') in diff['deleted']
assert (10, ' {') in diff['deleted']
assert (13, ' return null;') in diff['deleted']
@pytest.mark.parametrize('git_repo', ["test-repos/histogram"], indirect=True)
def test_diff_with_histogram(git_repo):
# with histogram
commit = list(Repository('test-repos/histogram',
single="93df8676e6fab70d9677e94fd0f6b17db095e890",
histogram_diff=True).traverse_commits())[0]
diff = commit.modified_files[0].diff_parsed
assert (4, ' {') in diff["added"]
assert (5, ' log.error("Icon path is null");') in diff["added"]
assert (6, ' return null;') in diff["added"]
assert (7, ' }') in diff["added"]
assert (8, '') in diff["added"]
assert (11, ' if (imgURL == null)') in diff["added"]
assert (12, ' {') in diff["added"]
assert (13, ' log.error("Couldn\'t find icon: " + imgURL);') in diff["added"]
assert (14, ' return null;') in diff["added"]
assert (17, ' return new ImageIcon(imgURL);') in diff["added"]
assert (6, ' {') in diff["deleted"]
assert (7, ' return new ImageIcon(imgURL);') in diff["deleted"]
assert (10, ' {') in diff["deleted"]
assert (11, ' log.error("Couldn\'t find icon: " + imgURL);') in diff["deleted"]
assert (12, ' }') in diff["deleted"]
assert (13, ' return null;') in diff["deleted"]
def test_ignore_add_whitespaces():
commit = list(Repository('test-repos/whitespace',
single="338a74ceae164784e216555d930210371279ba8e").traverse_commits())[0]
assert len(commit.modified_files) == 1
commit = list(Repository('test-repos/whitespace',
skip_whitespaces=True,
single="338a74ceae164784e216555d930210371279ba8e").traverse_commits())[0]
assert len(commit.modified_files) == 0
@pytest.mark.parametrize('git_repo', ["test-repos/whitespace"], indirect=True)
def test_ignore_add_whitespaces_and_modified_normal_line(git_repo):
commit = list(Repository('test-repos/whitespace',
single="52716ef1f11e07308b5df1b313aec5496d5e91ce").traverse_commits())[0]
assert len(commit.modified_files) == 1
parsed_normal_diff = commit.modified_files[0].diff_parsed
commit = list(Repository('test-repos/whitespace',
skip_whitespaces=True,
single="52716ef1f11e07308b5df1b313aec5496d5e91ce").traverse_commits())[0]
assert len(commit.modified_files) == 1
parsed_wo_whitespaces_diff = commit.modified_files[0].diff_parsed
assert len(parsed_normal_diff['added']) == 2
assert len(parsed_wo_whitespaces_diff['added']) == 1
assert len(parsed_normal_diff['deleted']) == 1
assert len(parsed_wo_whitespaces_diff['deleted']) == 0
def test_ignore_deleted_whitespaces():
commit = list(Repository('test-repos/whitespace',
single="e6e429f6b485e18fb856019d9953370fd5420b20").traverse_commits())[0]
assert len(commit.modified_files) == 1
commit = list(Repository('test-repos/whitespace',
skip_whitespaces=True,
single="e6e429f6b485e18fb856019d9953370fd5420b20").traverse_commits())[0]
assert len(commit.modified_files) == 0
def test_ignore_add_whitespaces_and_changed_file():
commit = list(Repository('test-repos/whitespace',
single="532068e9d64b8a86e07eea93de3a57bf9e5b4ae0").traverse_commits())[0]
assert len(commit.modified_files) == 2
commit = list(Repository('test-repos/whitespace',
skip_whitespaces=True,
single="532068e9d64b8a86e07eea93de3a57bf9e5b4ae0").traverse_commits())[0]
assert len(commit.modified_files) == 1
def test_clone_repo_to(tmp_path):
dt2 = datetime(2018, 10, 20)
url = "https://github.com/ishepard/pydriller.git"
assert len(list(Repository(
path_to_repo=url,
to=dt2,
clone_repo_to=str(tmp_path)).traverse_commits())) == 159
assert tmp_path.exists() is True
def test_clone_repo_to_not_existing():
with pytest.raises(Exception):
list(Repository("https://github.com/ishepard/pydriller",
clone_repo_to="NOTEXISTINGDIR").traverse_commits())
def test_clone_repo_to_repeated():
import tempfile
tmp_path = tempfile.gettempdir()
dt2 = datetime(2018, 10, 20)
url = "https://github.com/ishepard/pydriller.git"
assert len(list(Repository(
path_to_repo=url,
to=dt2,
clone_repo_to=str(tmp_path)).traverse_commits())) == 159
assert os.path.isdir(os.path.join(tmp_path, "pydriller"))
assert len(list(Repository(
path_to_repo=url,
to=dt2,
clone_repo_to=str(tmp_path)).traverse_commits())) == 159
assert os.path.isdir(os.path.join(tmp_path, "pydriller"))
def test_projectname_multiple_repos():
repos = [
'test-repos/files_in_directories',
'test-repos/files_in_directories',
'test-repos/files_in_directories'
]
for commit in Repository(path_to_repo=repos).traverse_commits():
assert commit.project_name == 'files_in_directories'
def test_projectname_multiple_repos_remote():
repos = [
'https://github.com/ishepard/pydriller',
'test-repos/pydriller'
]
for commit in Repository(path_to_repo=repos).traverse_commits():
assert commit.project_name == 'pydriller'
def test_get_repo_name_from_url():
# with .git in the middle of the name
url_set_a = [
"https://github.com/academicpages/academicpages.github.io",
"https://github.com/academicpages/academicpages.github.io.git",
]
url_set_b = [
"https://github.com/ishepard/pydriller",
"https://github.com/ishepard/pydriller.git",
]
for url in url_set_a:
assert Repository._get_repo_name_from_url(url) == "academicpages.github.io"
for url in url_set_b:
assert Repository._get_repo_name_from_url(url) == "pydriller"
@pytest.mark.skipif(sys.version_info < (3, 8) and sys.platform == "win32", reason="requires Python3.8 or greater on Windows")
def test_deletion_remotes():
repos = [
'https://github.com/ishepard/pydriller',
'https://github.com/ishepard/pydriller'
]
paths = set()
for commit in Repository(path_to_repo=repos).traverse_commits():
paths.add(commit.project_path)
for path in paths:
assert os.path.exists(path) is False
def test_deleted_files():
deleted_commits = list(
Repository('https://github.com/ishepard/pydriller',
filepath='.bettercodehub.yml',
include_deleted_files=True).traverse_commits()
)
assert len(deleted_commits) > 0
|
import System.Drawing as drawing
import random
import util
import Rhino as rc
import geometry as geo
from colorsys import rgb_to_hls, hls_to_rgb
import scriptcontext as sc
import rhinoscriptsyntax as rs
#Random
def GetRandomNamedColor():
"""Randomly selects a windows color from System.Drawing.Color
Excludes white and black
returns:
color
"""
cnames = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgreen': '#90EE90',
'lightgray': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#FAA460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'}
while True:
color = drawing.Color.FromName(random.choice(list(cnames)))
if color == drawing.Color.White or color == drawing.Color.Black: continue
return color
#Gradients
def GetGradient(number = -1):
"""
0 - Argon
1 - Instagram
2 - Fabled Sunset
3 - Piglet
4 - Pale RGB
5 - White hot
6 - Blue to Yellow
7 - Makeup
8 - Pink Sands
9 - Fade to black
10 - Zhestkov
"""
if number == -1:
col1 = GetRandomNamedColor()
while True:
col2 = GetRandomNamedColor()
if col2 != col1: break
while True:
col3 = GetRandomNamedColor()
if col3 != col1 and col3 != col2: break
while True:
col4 = GetRandomNamedColor()
if col4 != col1 and col4 != col2 and col4 != col3: break
return [col1, col2, col3, col4]
gradients = []
#0 - Argon
gradients.append(['#03001e', '#7303c0', '#ec38bc', '#fdeff9'])
#1 - Instagram
gradients.append(['#833ab4', '#fd1d1d', '#fcb045'])
#2 - Fabled Sunset
gradients.append(['#231557', '#44107A', '#FF1361', '#FFF800'])
#3 - Piglet
gradients.append(['#ee9ca7', '#ffdde1'])
#4 - Pale RGB
gradients.append(['#A8E6CE', '#DCEDC2', '#FFD3B5', '#FFAAA6', '#FF8C94'])
#5 - White hot
gradients.append(['#E1F5C4', '#EDE574', '#F9D423', '#FC913A', '#FF4E50'])
#6 - Blue to Yellow
gradients.append(['#3f51b1', '#5a55ae', '#7b5fac', '#8f6aae', '#a86aa4' , '#cc6b8e' , '#f18271', '#f3a469', '#f7c978'])
#7 - Makeup
gradients.append(['#F7DFD4', '#EABCAC', '#E2B091', '#874E4C', '#472426'])
#8 - Pink Sands
gradients.append(['#4bbcf4', '#61c0bf', '#bbded6', '#ffb6b9', '#fae3d9'])
#9 - Fade to black
gradients.append(['#ffffff', '#1f1f1f'])
#10 - Zhestkov
gradients.append(['#040005','#580078', '#e028d4', '#ff8fd4', '#fff5fb'])
return gradients[number]
def GradientOfColors(colors, t, degree=1):
"""
parameters:
colors [list]: list of colors or hex
returns:
color
"""
if degree == 3 and len(colors) < 4:
degree = 2
pts = []
for color in colors:
if IsHex(color):
rgb = hex_to_rgb(color)
r = rgb[0]
g = rgb[1]
b = rgb[2]
else:
r = color.R
g = color.G
b = color.B
pts.append(rc.Geometry.Point3d(r, g, b))
crv = rc.Geometry.NurbsCurve.Create(False, degree, pts)
samplePt = crv.PointAtNormalizedLength(t)
r = util.Constrain(samplePt.X, 0, 255)
g = util.Constrain(samplePt.Y, 0, 255)
b = util.Constrain(samplePt.Z, 0, 255)
return drawing.Color.FromArgb(255, r, g, b)
#Interpolate Colors
def ColorBetweenColors(col1, col2, t = .5):
"""
colorBetweenColors(col1, col2, t)
input:
col1 = rc point
col2 = rc point
t = normalized pt between col1 and col2
return:
new color point
"""
r = util.Remap(t, 0, 1, col1.R, col2.R)
g = util.Remap(t, 0, 1, col1.G, col2.G)
b = util.Remap(t, 0, 1, col1.B, col2.B)
return drawing.Color.FromArgb(255, r,g,b)
#Modify Colors
def ChangeBrightness(col, adjustment = 10):
"""
parameters:
col (color)
adjustment (number): + to brighten, - to darken
return:
(color)
"""
r = util.Constrain(col.R + adjustment, 0, 255)
g = util.Constrain(col.G + adjustment, 0, 255)
b = util.Constrain(col.B + adjustment, 0, 255)
return drawing.Color.FromArgb(255,r,g,b)
def ReddenColor(color, correctionFactor):
red = color.R
green = color.G
blue = color.B
red += correctionFactor
green += correctionFactor
blue += correctionFactor
if red < 0 or red > 255 or green < 0 or green > 255 or blue < 0 or blue > 255:
return None
return drawing.Color.FromArgb(color.A, 255,green, blue)
def WalkColor(color, amount):
"""Randomly changes the RGB by a maximum random Amount
parameters:
color (color)
amount (int or float): maximum random deviation
returns:
color
This shold be changed so the amount = a random vector of length. This way it guarentees a distances covered by the step.
"""
rAmount = random.uniform(-amount ,amount)
red = util.Constrain(color.R + rAmount,100, 220)
gAmount = random.uniform(-amount ,amount)
green = util.Constrain(color.G + gAmount, 100, 220)
bAmount = random.uniform(-amount ,amount)
blue = util.Constrain(color.B + bAmount, 100, 220)
return drawing.Color.FromArgb(color.A, red, green, blue)
def ChangeColorBrightness(col, factor):
"""
parameters:
col (color): color to adjust
factor (float): multiply brightness. (.9 darkens, 1.1 brightens)
returns:
(color): new Color object
"""
r = col.R
g = col.G
b = col.B
h, l, s = rgb_to_hls(r / 255.0, g / 255.0, b / 255.0)
l = max(min(l * factor, 1.0), 0.0)
r, g, b = hls_to_rgb(h, l, s)
return drawing.Color.FromArgb(255, int(r * 255), int(g * 255), int(b * 255))
#Utility
def hex_to_rgb(value):
"""converts hex to RGB
parameters:
value (str): eg. '#3d72b4'
returns:
(tuple): r,g,b
"""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def IsHex(value):
"""checks if first character is a '#'
"""
try:
valStr = str(value)
if valStr[0] == '#':
return True
else:
return False
except:
return False
if __name__ == "__main__":
attr0 = rc.DocObjects.ObjectAttributes()
attr0.ColorSource = rc.DocObjects.ObjectColorSource.ColorFromObject
attr0.ObjectColor = drawing.Color.White
id0 = rs.coerceguid("fc9f0f50-68f7-483b-88ef-55c7e38c3bab")
id1 = rs.coerceguid("e4fb7017-3d8b-4ae0-8a50-8be50ad38476")
id2 = rs.coerceguid("dae1b0b6-0c67-4659-956d-196e38c93898")
attr0.ObjectColor = GetRandomNamedColor()
attr1 = attr0.Duplicate()
attr2 = attr0.Duplicate()
for i in range(100):
attr1.ObjectColor = ChangeColorBrightness(attr1.ObjectColor, 1.1)
attr2.ObjectColor = ChangeColorBrightness(attr2.ObjectColor, .9)
sc.doc.Objects.ModifyAttributes(id0, attr0, True)
sc.doc.Objects.ModifyAttributes(id1, attr1, True)
sc.doc.Objects.ModifyAttributes(id2, attr2, True)
sc.doc.Views.Redraw()
rs.Sleep(10)
|
import requests
import json
import time
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from jupextdemo.const import (CHECKIN_MESSAGE_AKS, APP_NAME_DEFAULT, APP_NAME_PLACEHOLDER,
ACR_PLACEHOLDER, RG_PLACEHOLDER, PORT_NUMBER_DEFAULT,
CLUSTER_PLACEHOLDER, RELEASE_PLACEHOLDER, RELEASE_NAME)
_git_remotes = {}
_GIT_EXE = 'git'
_HTTP_NOT_FOUND_STATUS = 404
_HTTP_SUCCESS_STATUS = 200
_HTTP_CREATED_STATUS = 201
class Files: # pylint: disable=too-few-public-methods
def __init__(self, path, content):
self.path = path
self.content = content
def getLocalRepoUrl():
localUrls = get_git_remotes()
x = None
if localUrls != None:
print(localUrls)
x = localUrls["origin(fetch)"]
return x if is_github_url_candidate(x) else None
def compareUrls(uri1, uri2):
if (uri1 == None and uri2 != None) or (uri1 != None and uri2 == None) or (uri1 == None and uri2 == None):
return False
components1 = uri_parse(uri1.lower())
components2 = uri_parse(uri2.lower())
if (components1.netloc == "github.com" and components1.netloc == components2.netloc) and components2.path == components1.path:
return True
else:
return False
def uri_parse(url):
return urlparse(url)
def is_github_url_candidate(url):
if url is None:
return False
components = uri_parse(url.lower())
if components.netloc == 'github.com':
return True
return False
def get_git_remotes():
import subprocess
import sys
if _git_remotes:
return _git_remotes
try:
# Example output:
# git remote - v
# full https://mseng.visualstudio.com/DefaultCollection/VSOnline/_git/_full/VSO (fetch)
# full https://mseng.visualstudio.com/DefaultCollection/VSOnline/_git/_full/VSO (push)
# origin https://mseng.visualstudio.com/defaultcollection/VSOnline/_git/VSO (fetch)
# origin https://mseng.visualstudio.com/defaultcollection/VSOnline/_git/VSO (push)
output = subprocess.check_output(
[_GIT_EXE, 'remote', '-v'], stderr=subprocess.STDOUT)
except BaseException as ex: # pylint: disable=broad-except
print("Not a repo")
return None
if sys.stdout.encoding is not None:
lines = output.decode(sys.stdout.encoding).split('\n')
else:
lines = output.decode().split('\n')
for line in lines:
components = line.strip().split()
if len(components) == 3:
_git_remotes[components[0] + components[2]] = components[1]
return _git_remotes
def get_application_json_header():
return {'Content-Type': 'application/json' + '; charset=utf-8',
'Accept': 'application/json'}
def get_application_json_header_for_preview():
return {'Accept': 'application/vnd.github.antiope-preview+json'}
def get_check_runs_for_commit(repo_name, repo_owner, commmit_sha, token):
"""
API Documentation - https://developer.github.com/v3/checks/runs/#list-check-runs-for-a-specific-ref
"""
headers = get_application_json_header_for_preview()
get_check_runs_url = 'https://api.github.com/repos/{owner}/{repo_id}/commits/{ref}/check-runs'.format(owner=repo_owner,
repo_id=repo_name, ref=commmit_sha)
print(get_check_runs_url)
get_response = requests.get(
url=get_check_runs_url, auth=('', token), headers=headers)
if not get_response.status_code == 200:
print(" could not find the valid url")
print(get_response)
return
import json
return json.loads(get_response.text)
def get_work_flow_check_runID(repo_name, repo_owner, commmit_sha, token):
check_run_found = False
count = 0
while(not check_run_found or count > 3):
check_runs_list_response = get_check_runs_for_commit(
repo_name, repo_owner, commmit_sha, token)
if check_runs_list_response and check_runs_list_response['total_count'] > 0:
# fetch the Github actions check run and its check run ID
check_runs_list = check_runs_list_response['check_runs']
for check_run in check_runs_list:
if check_run['app']['slug'] == 'github-actions':
check_run_id = check_run['id']
check_run_found = True
return check_run_id
time.sleep(5)
count = count + 1
return None
def get_check_run_status_and_conclusion(repo_name, repo_owner, check_run_id, token):
"""
API Documentation - https://developer.github.com/v3/checks/runs/#get-a-single-check-run
"""
headers = get_application_json_header_for_preview()
get_check_run_url = 'https://api.github.com/repos/{owner}/{repo_id}/check-runs/{checkID}'.format(owner=repo_owner,
repo_id=repo_name, checkID=check_run_id)
get_response = requests.get(
url=get_check_run_url, auth=('', token), headers=headers)
if not get_response.status_code == _HTTP_SUCCESS_STATUS:
print(" no valid status code")
return
import json
return json.loads(get_response.text)['status'], json.loads(get_response.text)['conclusion']
def poll_workflow_status(repo_name, repo_owner, check_run_id, token):
import time
check_run_status = None
check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(
repo_name, repo_owner, check_run_id, token)
if check_run_status == 'completed':
print("already completed")
elif check_run_status == 'queued':
# When workflow status is Queued
while True:
time.sleep(0.5)
check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(
repo_name, repo_owner, check_run_id, token)
print(".")
if check_run_status == 'completed':
break
elif check_run_status == 'in_progress':
# When workflow status is inprogress
while True:
time.sleep(0.5)
check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(
repo_name, repo_owner, check_run_id, token)
print(".")
if check_run_status == 'completed':
break
print('GitHub workflow completed.')
return (check_run_status, check_run_conclusion)
def encrypt_secret(public_key, secret_value):
"""Encrypt a Unicode string using the public key."""
from base64 import encodebytes
from nacl import encoding, public
public_key = public.PublicKey(
public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return encodebytes(encrypted).decode("utf-8")
|
from LayerProvider import *
from NeuralNet import *
import numpy as np
import matplotlib.image as mpimg
import scipy.misc
from Utils import LoadList
from Trainer import Trainer
from MainLoop import *
import glob
import scipy
from coco_utils import *
from PrepareCOCOData import VGG_preprocess
import pdb
def LoadVGG():
data_path = '../../data/pretrained/vgg16.npy'
#data_path = '/home/kien/PycharmProjects/data/vgg16.npy'
data_dict = np.load(data_path).item()
net = ShowTellNet()
net.net_opts['rng_seed'] = 123
net.net_opts['rng'] = np.random.RandomState(net.net_opts['rng_seed'])
net.layer_opts['updatable'] = False
net.layer_opts['border_mode'] = 1
W = data_dict['conv1_1'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv1_1'][1]
b = b.reshape(1,64,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv1_1'] = ConvLayer(net, net.content['input_img'])
net.content['conv1_1'].W.set_value(W)
net.content['conv1_1'].b.set_value(b)
net.content['relu1_1'] = ReLULayer(net, net.content['conv1_1'])
W = data_dict['conv1_2'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv1_2'][1]
b = b.reshape(1,64,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv1_2'] = ConvLayer(net, net.content['relu1_1'])
net.content['conv1_2'].W .set_value(W)
net.content['conv1_2'].b.set_value(b)
net.content['relu1_2'] = ReLULayer(net, net.content['conv1_2'])
net.layer_opts['pool_mode'] = 'max'
net.content['pool1'] = Pool2DLayer(net, net.content['relu1_2'])
W = data_dict['conv2_1'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv2_1'][1]
b = b.reshape(1,128,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv2_1'] = ConvLayer(net, net.content['pool1'])
net.content['conv2_1'].W.set_value(W)
net.content['conv2_1'].b.set_value(b)
net.content['relu2_1'] = ReLULayer(net, net.content['conv2_1'])
W = data_dict['conv2_2'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv2_2'][1]
b = b.reshape(1,128,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv2_2'] = ConvLayer(net, net.content['relu2_1'])
net.content['conv2_2'].W.set_value(W)
net.content['conv2_2'].b.set_value(b)
net.content['relu2_2'] = ReLULayer(net, net.content['conv2_2'])
net.content['pool2'] = Pool2DLayer(net, net.content['relu2_2'])
W = data_dict['conv3_1'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv3_1'][1]
b = b.reshape(1,256,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv3_1'] = ConvLayer(net, net.content['pool2'])
net.content['conv3_1'].W.set_value(W)
net.content['conv3_1'].b.set_value(b)
net.content['relu3_1'] = ReLULayer(net, net.content['conv3_1'])
W = data_dict['conv3_2'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv3_2'][1]
b = b.reshape(1,256,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv3_2'] = ConvLayer(net, net.content['relu3_1'])
net.content['conv3_2'].W.set_value(W)
net.content['conv3_2'].b.set_value(b)
net.content['relu3_2'] = ReLULayer(net, net.content['conv3_2'])
W = data_dict['conv3_3'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv3_3'][1]
b = b.reshape(1,256,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv3_3'] = ConvLayer(net, net.content['relu3_2'])
net.content['conv3_3'].W.set_value(W)
net.content['conv3_3'].b.set_value(b)
net.content['relu3_3'] = ReLULayer(net, net.content['conv3_3'])
net.content['pool3'] = Pool2DLayer(net, net.content['relu3_3'])
W = data_dict['conv4_1'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv4_1'][1]
b = b.reshape(1,512,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv4_1'] = ConvLayer(net, net.content['pool3'])
net.content['conv4_1'].W.set_value(W)
net.content['conv4_1'].b.set_value(b)
net.content['relu4_1'] = ReLULayer(net, net.content['conv4_1'])
W = data_dict['conv4_2'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv4_2'][1]
b = b.reshape(1,512,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv4_2'] = ConvLayer(net, net.content['relu4_1'])
net.content['conv4_2'].W.set_value(W)
net.content['conv4_2'].b.set_value(b)
net.content['relu4_2'] = ReLULayer(net, net.content['conv4_2'])
W = data_dict['conv4_3'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv4_3'][1]
b = b.reshape(1,512,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv4_3'] = ConvLayer(net, net.content['relu4_2'])
net.content['conv4_3'].W .set_value(W)
net.content['conv4_3'].b.set_value(b)
net.content['relu4_3'] = ReLULayer(net, net.content['conv4_3'])
net.content['pool4'] = Pool2DLayer(net, net.content['relu4_3'])
W = data_dict['conv5_1'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv5_1'][1]
b = b.reshape(1,512,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv5_1'] = ConvLayer(net, net.content['pool4'])
net.content['conv5_1'].W .set_value(W)
net.content['conv5_1'].b.set_value(b)
net.content['relu5_1'] = ReLULayer(net, net.content['conv5_1'])
W = data_dict['conv5_2'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv5_2'][1]
b = b.reshape(1,512,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv5_2'] = ConvLayer(net, net.content['relu5_1'])
net.content['conv5_2'].W .set_value(W)
net.content['conv5_2'].b.set_value(b)
net.content['relu5_2'] = ReLULayer(net, net.content['conv5_2'])
W = data_dict['conv5_3'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv5_3'][1]
b = b.reshape(1,512,1,1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv5_3'] = ConvLayer(net, net.content['relu5_2'])
net.content['conv5_3'].W .set_value(W)
net.content['conv5_3'].b.set_value(b)
net.content['relu5_3'] = ReLULayer(net, net.content['conv5_3'])
net.content['pool5'] = Pool2DLayer(net, net.content['relu5_3'])
net.layer_opts['num_fc_node'] = 4096
net.content['fc6'] = FCLayer(net, net.content['pool5'], (1, 512, 7, 7))
W = data_dict['fc6'][0]
W = np.reshape(W,(7,7,512,4096))
W = np.transpose(W,(2,0,1,3))
W = np.reshape(W,(7*7*512,4096))
# W = np.transpose(W)
# W = np.reshape(W, (4096, 25088, 1, 1))
b = data_dict['fc6'][1]
b = b.reshape(1,4096)
net.content['fc6'].W.set_value(W)
net.content['fc6'].b.set_value(b)
net.content['fc7'] = FCLayer(net, net.content['fc6'], (1, 4096, 1, 1))
W = data_dict['fc7'][0]
# W = np.transpose(W)
# W = np.reshape(W, (4096, 4096, 1, 1))
b = data_dict['fc7'][1]
b = b.reshape(1,4096)
net.content['fc7'].W.set_value(W)
net.content['fc7'].b.set_value(b)
net.layer_opts['num_fc_node'] = 1000
net.content['fc8'] = FCLayer(net, net.content['fc7'], (1, 4096, 1, 1))
W = data_dict['fc8'][0]
# W = np.transpose(W)
# W = np.reshape(W, (1000, 4096, 1, 1))
b = data_dict['fc8'][1]
b = b.reshape(1,1000)
net.content['fc8'].W.set_value(W)
net.content['fc8'].b.set_value(b)
return net
def npsigmoid(X):
return 1.0 / (1.0 + np.exp(-X))
def TrimCaptions(captions, img_idx):
# There are multiple captions for one image
# Choose one of them as train labels
"""
:param captions: the list of captions
:param img_idx: corresponding image of each caption
for example captions[55] is for image with img_idx[55]
:return: trimmed captions
"""
num_samples = np.max(img_idx)+1
max_len = captions.shape[1]
new_captions = np.zeros((num_samples, max_len), dtype=theano.config.floatX)
for i in range(0, len(img_idx)):
idx = img_idx[i]
new_captions[idx] = captions[i]
return new_captions
def CreateData(n_word=None, null_word=0):
# Get data from h5 file
data_path = '../../data/MSCOCO/coco_captioning/'
data = load_co_co(data_path)
train_X = data['train_features']
val_X = data['val_features']
train_Y = data['train_captions']
val_Y = data['val_captions']
vocab = data['idx_to_word']
train_idx = data['train_image_idxs']
val_idx = data['val_image_idxs']
train_urls = data['train_urls']
val_urls = data['val_urls']
max_len = train_Y.shape[1]
if (n_word==None):
n_word = len(vocab)
num_sample = train_X.shape[0]
num_val_sample = val_X.shape[0]
num_cnn_features = train_X.shape[1]
# Shape the data into 4D tensor
train_X = np.reshape(train_X, (num_sample, num_cnn_features, 1, 1))
val_X = np.reshape(val_X, (num_val_sample, num_cnn_features, 1, 1))
# Covert captions to one-of-k vectors
I = np.eye(n_word)
I = np.asarray(I, dtype=theano.config.floatX)
train_Y = TrimCaptions(train_Y, train_idx)
val_Y = TrimCaptions(val_Y, val_idx)
train_Y = I[np.asarray(train_Y, dtype=np.uint32)]
val_Y = I[np.asarray(val_Y, dtype=np.uint32)]
train_Y = np.reshape(train_Y, (num_sample, max_len, n_word, 1))
val_Y = np.reshape(val_Y, (num_val_sample, max_len, n_word, 1))
words = np.argmax(train_Y[:, 1:, :, :], axis=2)
remove_ind = words == null_word
train_weight = np.ones_like(words, dtype=theano.config.floatX)
train_weight[remove_ind] = 0
train_weight = train_weight.reshape((num_sample, max_len-1, 1, 1))
train_weight = np.repeat(train_weight, n_word, 2)
#train_weight = theano.shared(train_weight)
words = np.argmax(val_Y[:, 1:, :, :], axis=2)
remove_ind = words == null_word
val_weight = np.ones_like(words, dtype=theano.config.floatX)
val_weight[remove_ind] = 0
val_weight = val_weight.reshape((num_val_sample, max_len-1, 1, 1))
val_weight = np.repeat(val_weight, n_word, 2)
# train_X = theano.shared(train_X)
# train_Y = theano.shared(train_Y)
# val_X = theano.shared(val_X)
# val_Y = theano.shared(val_Y)
return (train_X, train_Y, train_weight, train_urls, num_sample,
val_X, val_Y, val_weight, val_urls, num_val_sample,
vocab, n_word, max_len, num_cnn_features)
def train():
trained_path = '../../data/trained_model/'
# LSTM params
n_word = 1004
max_len = 40
big_batch_size = np.asarray([10000], dtype=theano.config.floatX)
# Create net
net = ShowTellNet()
net.name = 'ShowTellCOCO6'
memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
print('Memory: %.2f avail before loading numpy data' % (memory[0]/1024./1024/1024))
train_X, train_Y, train_weight, train_urls, num_sample, \
val_X, val_Y, val_weight, val_urls, num_val_sample, \
vocab, n_word, max_len, num_cnn_features = CreateData(n_word)
num_big_batch = np.ceil(np.asarray(num_sample, dtype=theano.config.floatX)/big_batch_size)
num_big_val_batch = np.ceil(np.asarray(num_val_sample, dtype=theano.config.floatX)/big_batch_size) #num_val_sample
num_big_epoch = 50
snapshot_list = glob.glob(trained_path + net.name + '*.dat')
if (len(snapshot_list) == 0):
# Trainer params
trainer = Trainer()
trainer.opts['batch_size'] = 400
trainer.opts['save'] = True
trainer.opts['save_freq'] = 2
trainer.opts['num_sample'] = num_sample
trainer.opts['num_val_sample'] = num_val_sample
trainer.opts['validation'] = False
trainer.opts['num_epoch'] = 1
#trainer.opts['dzdw_norm_thres'] = 0.25
#trainer.opts['dzdb_norm_thres'] = 0.025
memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
print('Memory: %.2f avail before creating network' % (memory[0]/1024./1024/1024))
# Setting params
net.layer_opts['updatable'] = True
net.net_opts['l1_learning_rate'] = np.asarray(0.005, theano.config.floatX)
net.reset_opts['min_lr'] = np.asarray(0.005, dtype=theano.config.floatX)
net.reset_opts['max_lr'] = net.net_opts['l1_learning_rate']
# Construct the network
net.layer_opts['num_emb'] = 512
net.content['dim_swap'] = SwapDim(net, net.content['input_img'], 1, 2)
net.content['iwe'] = WordEmbLayer(net, net.content['dim_swap'],
(trainer.opts['batch_size'], 1, num_cnn_features, 1))
net.content['we'] = WordEmbLayer(net, net.content['input_sen'],
(trainer.opts['batch_size'], max_len - 1, n_word, 1))
net.content['cat'] = Concat(net, net.content['iwe'], net.content['we'], 1)
net.layer_opts['num_lstm_node'] = 1024
net.content['lstm'] = LSTM(net, net.content['cat'],
(trainer.opts['batch_size'], max_len - 1, net.layer_opts['num_emb'], 1))
net.layer_opts['num_affine_node'] = n_word
net.content['affine'] = AffineLayer(net, net.content['lstm'],
(trainer.opts['batch_size'],
max_len - 1,
net.layer_opts['num_lstm_node'],
1))
#X1 = train_X[0:2,:,:,:]
#Y1 = train_Y[0:2,:-1,:,:]
#h_m1=np.zeros((1, 2, net.layer_opts['num_lstm_node']), dtype=theano.config.floatX)
#c_m1=np.zeros((1, 2, net.layer_opts['num_lstm_node']), dtype=theano.config.floatX)
#lstm_out = net.content['affine'].output.eval({net.input[0]:X1, net.input[1]:Y1,
# net.content['lstm'].h_m1_sym:h_m1,
# net.content['lstm'].c_m1_sym:c_m1})
#print(lstm_out.shape)
net.content['lstm_r'] = LSTMRemove(net, net.content['affine'], 1)
#net.content['lstm_r'] = LSTMRemove(net, net.content['lstm'], 1)
net.layer_opts['softmax_norm_dim'] = 2
#net.content['softmax'] = SoftmaxLayer(net, net.content['lstm_r'])
net.layer_opts['l2_term'] = 0.0125
#net.content['l2'] = L2WeightDecay(net, net.content['lstm_r'])
net.content['cost'] = SoftmaxLogLoss(net, net.content['lstm_r'])
#net.content['cost'] = AggregateSumLoss([net.content['l2'], net.content['smloss']])
memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
print('Memory: %.2f avail after creating network' % (memory[0]/1024./1024/1024))
net.InitLR(0.2)
trainer.InitParams(net)
train_update_rule = trainer.InitUpdateRule(net)
additional_output = ['input_sen', 'lstm_r']
e = 0
last_big_e = 0
else:
snapshot_list = sorted(snapshot_list)
print('Loading latest snapshot at %s' % snapshot_list[-1])
net, trainer, last_big_e = LoadList(snapshot_list[-1])
trainer.opts['save_freq'] = 3
trainer.opts['validation'] = True
print('Finished loading snapshot')
#trainer.opts['dzdw_norm_thres'] = 4
#trainer.opts['dzdb_norm_thres'] = 0.04
net.net_opts['l1_learning_rate'] = np.asarray(0.0008, theano.config.floatX)
net.reset_opts['min_lr'] = np.asarray(0.0008, dtype=theano.config.floatX)
net.reset_opts['max_lr'] = net.net_opts['l1_learning_rate']
net.InitLR(0.2)
trainer.InitParams(net)
train_update_rule = trainer.InitUpdateRule(net)
additional_output = ['input_sen', 'lstm_r']
for big_e in range(last_big_e, num_big_epoch):
for j in range(0, num_big_batch):
big_batch_range = np.arange(j*big_batch_size, (j+1)*big_batch_size)
if ((j+1)*big_batch_size > num_sample):
big_batch_range = np.arange(j * big_batch_size, num_sample)
trainer.opts['num_sample'] = big_batch_range.shape[0]
big_batch_range = np.asarray(big_batch_range, dtype=np.uint32)
memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
print('Memory: %.2f avail before putting train data to shared' % (memory[0]/1024./1024/1024))
train_Xj = theano.shared(train_X[big_batch_range, :, :, :])
train_Yj = theano.shared(train_Y[big_batch_range, :, :, :])
# Calculate hash value to avoid reading dupplicated data
hash_weight = np.asarray([1.3**t for t in range(max_len)])
hash_value = np.sum(np.argmax(train_Yj[0,:,:,0].eval(), axis=1)*hash_weight)
print("Hash value: %f" % hash_value)
train_weightj = theano.shared(train_weight[big_batch_range, :, :, :])
memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
print('Memory: %.2f avail after' % (memory[0]/1024./1024/1024))
net.InitTrainFunction(train_update_rule, [train_Xj, train_Yj[:, :-1, :, :]], train_Yj[:, 1:, :, :],
additional_output, train_weightj)
trainer.opts['validation'] = False
trainer.opts['train'] = True
main_loop = SGDRMainLoop(net, trained_path)
main_loop.run(net, trainer, 0)
train_Xj = None
train_Yj = None
train_weightj = None
net.train_function = None
print('Finished iteration %d of big epoch %d' % (j, big_e))
trainer.opts['validation'] = True
if (trainer.opts['validation']): #((big_e+1)%10 == 0) and
print('Starting validation...')
for j in range(0, num_big_val_batch):
big_batch_range = np.arange(j * big_batch_size, (j + 1) * big_batch_size)
if ((j + 1) * big_batch_size > num_val_sample):
big_batch_range = np.arange(j * big_batch_size, num_val_sample)
big_batch_range = np.asarray(big_batch_range, dtype=np.uint32)
trainer.opts['num_val_sample'] = big_batch_range.shape[0]
val_Xj = theano.shared(val_X[big_batch_range, :, :, :])
val_Yj = theano.shared(val_Y[big_batch_range, :, :, :])
val_weightj = theano.shared(val_weight[big_batch_range, :, :, :])
net.InitValFunction([val_Xj, val_Yj[:, :-1, :, :]], val_Yj[:, 1:, :, :],
additional_output, val_weightj)
trainer.opts['validation'] = True
trainer.opts['train'] = False
main_loop = SGDRMainLoop(net, trained_path)
main_loop.run(net, trainer, 0)
val_Xj = None
val_Yj = None
val_weightj = None
net.val_function = None
print("Finished validation iteration %d of big epoch %d" % (big_e, j))
# Epoch is done
main_loop.LRRestart(net)
if (big_e%trainer.opts['save_freq'] == 0):
net1 = net.NNCopy()
SaveList([net1, trainer, big_e], '../../data/trained_model/%s_e-%05d.dat' % (net.name, big_e))
def InferCOCO(idx, beam_size, train=True):
"""Infer caption from given COCO data
:type idx: int
:param idx: index of COCO image in either train or val data
:type train: bool
:param train: indicate whether the sample will be taken from train or validation data
"""
net = ShowTellNet()
net.name = 'ShowTellCOCO6'
# Load train and test data
train_X, train_Y, train_weight, train_urls, num_sample, \
val_X, val_Y, val_weight, val_urls, num_val_sample, \
vocab, n_word, max_len, num_cnn_features = CreateData()
vocab = np.asarray(vocab)
# Take a sample out of the data for inference
if (train):
X = train_X[idx,:,:,:]
Y = train_Y[idx, :, :, :]
url = train_urls[idx]
else:
X = val_X[idx,:,:,:]
Y = val_Y[idx, :, :, :]
url = val_urls[idx]
# pdb.set_trace()
#X = X.reshape((X.shape[0], X.shape[1], X.shape[2], -1))
X = np.reshape(X, (1, X.shape[0], X.shape[1], X.shape[2]))
Y = np.reshape(Y, (1, Y.shape[0], Y.shape[1], Y.shape[2]))
# pdb.set_trace()
# get some memory back
del train_X
del train_Y
del train_weight
del val_weight
del val_X
# Put sample into GPU mem
#val_sample = theano.shared(val_sample)
#val_label = theano.shared(val_label)
start_token = 1
stop_token = 2
trained_path = '../../data/trained_model/ShowTellCOCO/model_weight/'
snapshot_list = glob.glob(trained_path + net.name + '*.dat')
assert len(snapshot_list) != 0, ('Can\'t find net data at %s' % trained_path)
snapshot_list = sorted(snapshot_list)
print('Loading latest snapshot at %s' % snapshot_list[-1])
net, trainer, e = LoadList(snapshot_list[-1])
iwe_out = net.content['iwe'].output.eval({net.input[0]: X})
beam_search = BeamSearch(net, iwe_out, net.content['lstm'], net.content['we'], net.content['affine'], max_len, beam_size, start_token, stop_token)
print('Numerical output:\n %s' % beam_search.output)
print('Caption output:\n %s' % " ".join(list(vocab[np.asarray(beam_search.output, dtype=np.uint16)])))
# print('Actual numerical output: \n %s' % Y)
# print('Actual caption:\n %s' % " ".join(list(vocab[np.asarray(Y, dtype=np.uint16)])))
print('Image URL (load for visualization):\n %s' % url)
def InferImage(img_path):
net = ShowTellNet()
net.name = 'ShowTellCOCO6'
# Load train and test data
train_X, train_Y, train_weight, train_urls, num_sample, \
val_X, val_Y, val_weight, val_urls, num_val_sample, \
vocab, n_word, max_len, num_cnn_features = CreateData()
vocab = np.asarray(vocab)
# get some memory back
del train_X
del train_Y
del train_weight
del val_weight
del val_X
start_token = 1
stop_token = 2
trained_path = '../../data/trained_model/ShowTellCOCO/model_weight/'
snapshot_list = glob.glob(trained_path + net.name + '*.dat')
assert len(snapshot_list) != 0, ('Can\'t find net data at %s' % trained_path)
snapshot_list = sorted(snapshot_list)
print('Loading latest snapshot at %s' % snapshot_list[-1])
net, trainer, e = LoadList(snapshot_list[-1])
VGGNet = LoadVGG()
# X = mpimg.imread(img_path)
# X = scipy.misc.imresize
if(type(img_path) == list):
for path in img_path:
X = mpimg.imread(path)
Infer(X, VGGNet, net, vocab, beam_size = 20, max_len = max_len, start_token = start_token, stop_token = stop_token)
else:
# Preprocess image
X = mpimg.imread(img_path)
Infer(X, VGGNet, net, vocab, beam_size = 20, max_len = max_len, start_token = start_token, stop_token = stop_token)
def Infer(X, VGGNet, net, vocab, **kwargs):
beam_size = kwargs.pop('beam_size', 20)
max_len = kwargs.pop('max_len', 40)
start_token = kwargs.pop('start_token', 1)
stop_token = kwargs.pop('stop_token', 2)
X = scipy.misc.imresize(X, [224, 224], 'bicubic')
X = np.reshape(X, (1,224,224,3))
# Change RGB to BGR
X = X[:,:,:,[2,1,0]]
X = np.transpose(X, (0, 3,2,1))
X = VGG_preprocess(X)
X = X.astype(theano.config.floatX)
X = VGGNet.content['fc7'].output.eval({VGGNet.input[0]: X})
iwe_out = net.content['iwe'].output.eval({net.input[0]: X})
beam_search = BeamSearch(net, iwe_out, net.content['lstm'], net.content['we'], net.content['affine'], max_len, beam_size, start_token, stop_token)
print('Numerical output:\n %s' % beam_search.output)
print('Caption output:\n %s' % " ".join(list(vocab[np.asarray(beam_search.output, dtype=np.uint16)])))
if __name__ == '__main__':
#train()
beam_size = 20
# InferImage('../../data/random_test_data/dog.jpg')
# InferImage('../../data/random_test_data/IMG_1313.JPG')
# InferImage('../../data/random_test_data/IMG_3073.JPG')
# InferImage('../../data/random_test_data/IMG_8719.JPG')
list_image_path = ['../../data/random_test_data/Family_2.jpg',
'../../data/random_test_data/Family_3.jpg',
'../../data/random_test_data/Family_4.jpg',
'../../data/random_test_data/Korea.jpg']
InferImage(list_image_path)
#InferImage('../../data/random_test_data/grad1.jpg')
#InferImage('../../data/random_test_data/IMG_3151.JPG')
#InferCOCO(5, beam_size, False)
#InferCOCO(6, beam_size, False)
# InferCOCO(50, beam_size, False)
#InferCOCO(60, beam_size, False)
# InferCOCO(987, beam_size, False) #987: a big church with a clock in front of the church
# InferCOCO(2511, beam_size, False) #2511: an elephant is walking behind a wall at the zoo
#InferCOCO(777, beam_size, False) #164: a large group of people in a desert on horses #777: a pepperoni pizza with UNK and it END
#InferCOCO(300, beam_size, False) #6384: two white sheep in a grassy field #300: a train is parked on a train track
#InferCOCO(6384, beam_size, False)
# for i in np.random.randint(2000, 6000, size = 10):
# InferCOCO(i, beam_size, False)
#bp = 1
|
#RBF
import math
import random
import vectorEntrenamiento as vE
import numpy as np
MAX_INT = 100000
class Cluster(object):
"""docstring for Cluster"""
def __init__(self, dimensiones, coordenadas):
super(Cluster, self).__init__()
self.dimensiones = dimensiones
#self.centro = [-1 ] * dimensiones
self.centro = coordenadas
self.radio = -1
self.setCluster = []
self.sigma = 0
self.beta = 0
def getCentro(self): return self.centro
def calcularDistancia(self, entrada):
sumatoria = 0
for i in ( range ( len( self.centro ) ) ):
sumatoria += ( self.centro[i] - entrada[ i ] ) ** 2
return math.sqrt( sumatoria )
def reset( self ): self.setCluster = []
def getPromedio( self ):
resultado = [0] * self.dimensiones
for i in range( self.dimensiones ):
suma = 0
for cluster in self.setCluster:
suma += cluster.getCoordenadas()[i]
if(len(self.setCluster)) == 0: pass
else: resultado[i] = suma / float( len( self.setCluster ) )
return resultado
def setRadio(self):
# PROMEDIO
# suma = 0
# for cluster in self.setCluster:
# suma += self.calcularDistancia( cluster.getCoordenadas() )
#
# if len( self.setCluster ) > 0 :
# self.radio = suma / len( self.setCluster )
#
# else:
# self.radio = 0.1
#MAS LEJANO
aux = [ 0.1 ]
for cluster in self.setCluster:
aux.append( self.calcularDistancia( cluster.getCoordenadas() ) )
self.radio = max(aux)
#
# if len( self.setCluster ) > 0 :
# self.radio = suma / len( self.setCluster )
#
# else:
# self.radio = 0.1
#self.sigma = self.calcularDistancia( masLejano.getCoordenadas() )
# self.beta = 1 / ( 2 * self.sigma ** 2 )
return self.radio
class RBF(object):
"""docstring for RBF"""
def __init__(self, trainingSet, grafica, clusterSet = None):
super(RBF, self).__init__()
self.trainingSet = trainingSet
self.dimensiones = len( trainingSet[0].getCoordenadas() )
print("Dimensiones: ", self.dimensiones)
self.setRBF = clusterSet
self.grafica = grafica
#self.numRbf = int ( 2 * len( trainingSet ) / 3 )
# #for i in range( self.dimensiones ):
# for i in range( self.numRbf ):
# self.setRBF.append( Cluster( self.dimensiones ) )
# for index,vector in enumerate ( random.sample( trainingSet, self.numRbf ) ):
# self.setRBF[ index ].centro = vector.getCoordenadas()
print( self.setRBF[0].centro )
self.iniciarClustering()
def iniciarClustering(self):
detectoCambios = True
epocas = 1
while detectoCambios:
detectoCambios = False
for vector in self.trainingSet:
rbfGanador = self.setRBF[0]
distanciaMin = MAX_INT
for rbf in self.setRBF:
#print( 'Distancia: ' , rbf.centro , ' --> ' ,vector.getCoordenadas() )
distancia = rbf.calcularDistancia( vector.getCoordenadas() )
#print( distancia )
if distancia < distanciaMin:
rbfGanador = rbf
distanciaMin = distancia
rbfGanador.setCluster.append( vector )
#print('Algo')
for rbf in self.setRBF:
nuevoCentro = rbf.getPromedio()
print( rbf.centro )
print( nuevoCentro )
print()
if nuevoCentro != rbf.centro:
rbf.centro = nuevoCentro
detectoCambios = True
rbf.setRadio()
print('Sigue')
rbf.reset()
epocas += 1
print('Epoca')
self.grafica.clear()
for rbf in self.setRBF:
print('Ploteando')
self.grafica.plotCircle( rbf.getCentro(), rbf.radio )
self.grafica.plotMapeo( rbf.getCentro()[0], rbf.getCentro()[1], 'xr' )
for v in self.trainingSet:
self.grafica.plotMapeo( v.getCoordenadas()[0], v.getCoordenadas()[1], 'og' )
self.grafica.canvas.draw()
print( 'Epocas: ', epocas )
def gaussiana( x, c , r ):
math.exp( - ( ( x - c ) ** 2 / ( r ** 2 ) ) )
#Resultado de la gaussiana es --> 0(rj)
#Wkj --> random
#Omega --> 1
#Incremento de los pesos --> lr( deseada - zk ) gaussiana( rj )
def gaussian(beta, x, centro):
x_array = np.array([value for value in x])
centro_array = np.array([value for value in centro])
return np.exp((-1*beta)*(np.linalg.norm(x_array - centro_array)**2))
|
from responses.models import Response
from rest_framework import serializers
from .news_org_type import NewsOrgTypeSerializer
from .tool import ToolSerializer
from .tool_task import ToolTaskSerializer
class ResponseSerializer(serializers.ModelSerializer):
news_org_type = serializers.SerializerMethodField()
tools_used = serializers.SerializerMethodField()
most_important_tool = serializers.SerializerMethodField()
tasks_used = serializers.SerializerMethodField()
def get_news_org_type(self, obj):
return NewsOrgTypeSerializer(obj.news_org_type).data
def get_tools_used(self, obj):
parsed_tools = []
for tool in obj.tools_used.all():
parsed_tools.append(ToolSerializer(tool).data)
return parsed_tools
def get_most_important_tool(self, obj):
return ToolSerializer(obj.most_important_tool).data
def get_tasks_used(self, obj):
parsed_tasks = []
for task in obj.tasks_used.all():
parsed_tasks.append(ToolTaskSerializer(task).data)
return parsed_tasks
class Meta:
model = Response
fields = (
"date_submitted",
"job_title",
"job_duties",
"news_org_name",
"news_org_type",
"news_org_age",
"tools_used",
"most_important_tool",
"tasks_used",
"tool_satisfaction",
"tool_recommendation",
"tool_recommendation_why_not",
"stopped_using",
"why_stopped_using",
"org_struggle",
"org_struggle_other",
"org_comparison",
"org_communication",
"org_sustainability",
"talk_more",
"email",
)
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
::
export OPTICKS_ANA_DEFAULTS=cat=tboolean-box,det=tboolean-box,src=torch,tag=1,pfx=tboolean-box
main.py
unset OPTICKS_ANA_DEFAULTS
export LV=box
main.py
"""
from __future__ import print_function
import numpy as np
import os, sys, re, logging, argparse, platform
from opticks.ana.num import slice_, _slice
from opticks.ana.log import init_logging
from opticks.ana.env import opticks_environment
from opticks.ana.OpticksQuery import OpticksQuery
from opticks.ana.nload import tagdir_
log = logging.getLogger(__name__)
def isIPython():
try:
__IPYTHON__
except NameError:
return False
else:
return True
class OK(argparse.Namespace):
pass
#ipython = property(lambda self:sys.argv[0].endswith("ipython"))
ipython = isIPython()
brief = property(lambda self:"pfx %s tag %s src %s det %s c2max %s ipython %s " % (self.pfx, self.utag,self.src,self.det, self.c2max, self.ipython))
def _get_ctx(self):
return dict(tag=self.tag, utag=self.utag, src=self.src, det=self.det)
ctx = property(_get_ctx)
def _get_tagdir(self):
return tagdir_(self.det, self.src, self.tag, pfx=self.pfx )
tagdir = property(_get_tagdir)
def _get_ntagdir(self):
itag = int(self.tag)
return tagdir_(self.det, self.src, str(-itag), pfx=self.pfx )
ntagdir = property(_get_ntagdir)
def _get_catdir(self):
return tagdir_(self.det, self.src, None, pfx=self.pfx )
catdir = property(_get_catdir)
def _get_username(self):
"""
Same approach as SSys::username
"""
k = "USERNAME" if platform.system() == "Windows" else "USER"
return os.environ[k]
username = property(_get_username)
def _get_tmpdefault(self):
return os.path.join("/tmp", self.username, "opticks")
tmpdefault = property(_get_tmpdefault)
def resolve(self, arg):
"""
:return: path with $TMP tokens replaced with a TMP envvar OR the default of /tmp/USERNAME/opticks
"""
token = "$TMP"
tmpd = os.environ.get(token[1:], self.tmpdefault )
if arg.find(token) > -1:
path = arg.replace(token,tmpd)
else:
path = os.path.expandvars(arg)
pass
assert path.find("$") == -1, "failed to resolve tokens in arg %s path %s " % (arg, path )
#print("resolve arg %s to path %s " % (arg, path))
return path
def opticks_args(**kwa):
oad_key = "OPTICKS_ANA_DEFAULTS"
oad = os.environ.get(oad_key,"det=g4live,cat=g4live,src=torch,tag=1,pfx=.")
defaults = dict(map(lambda ekv:ekv.split("="), oad.split(",")))
lv = os.environ.get("LV", None)
if lv is not None:
lv_is_int = re.compile("\d+").match(lv) is not None
lvn = lv if not lv_is_int else "proxy-%d" % int(lv)
defaults["pfx"] = "tboolean-%s" % lvn
defaults["cat"] = defaults["pfx"]
log.info("override pfx, cat defaults as LV=%s envvar defined, %s " % (lv, defaults["pfx"]))
pass
log.info("envvar %s -> defaults %s " % (oad_key, repr(defaults)))
det = kwa.get("det", defaults["det"])
cat = kwa.get("cat", defaults["cat"])
src = kwa.get("src", defaults["src"])
tag = kwa.get("tag", defaults["tag"])
pfx = kwa.get("pfx", defaults["pfx"])
#print("defaults det %s cat %s src %s tag %s pfx %s " % (det, cat, src, tag, pfx), file=sys.stderr)
llv = kwa.get("loglevel", "info")
llv2 = kwa.get("log-level", "info")
mrc = kwa.get("mrc", 101)
doc = kwa.get("doc", None)
tagoffset = kwa.get("tagoffset", 0)
multievent = kwa.get("multievent", 1)
stag = kwa.get("stag", None)
ptag = kwa.get("ptag", None)
show = kwa.get("show", False)
plot = kwa.get("plot", True)
terse = kwa.get("terse", False)
mat = kwa.get("mat", "GdDopedLS")
msli = kwa.get("msli", "0:100k") # 0:1M mmap_mode slice for quick analysis
sli = kwa.get("sli", "::")
sel = kwa.get("sel", "0:5:1")
qwn = kwa.get("qwn", "XYZT,ABCW")
c2max = kwa.get("c2max", "1.5,2.0,2.5")
rdvmax = kwa.get("rdvmax", "0.01,0.10,1.0")
#pdvmax = kwa.get("pdvmax", "0.0010,0.0200,0.1000")
pdvmax = kwa.get("pdvmax", "0.10,0.25,0.50")
#dveps = kwa.get("dveps", 0.0002)
pfxseqhis = kwa.get("pfxseqhis", "")
pfxseqmat = kwa.get("pfxseqmat", "")
dbgseqhis = kwa.get("dbgseqhis", "0")
dbgseqmat = kwa.get("dbgseqmat", "0")
dbgmskhis = kwa.get("dbgmskhis", "0")
dbgmskmat = kwa.get("dbgmskmat", "0")
mask = kwa.get("mask", None)
smry = kwa.get("smry", False)
dbgzero = kwa.get("dbgzero", False)
lmx = kwa.get("lmx", 20)
cmx = kwa.get("cmx", 0)
prohis = kwa.get("prohis", False)
promat = kwa.get("promat", False)
rehist = kwa.get("rehist", False)
chi2sel = kwa.get("chi2sel", False)
chi2selcut = kwa.get("chi2selcut", 1.1)
statcut = kwa.get("statcut", 1000)
nointerpol = kwa.get("nointerpol", False)
figsize = kwa.get("figsize", "18,10.2" )
size = kwa.get("size", "1920,1080,1" )
position = kwa.get("position", "100,100" )
yes = kwa.get("yes", False )
gltfsave = kwa.get("gltfsave", False )
lvnlist = kwa.get("lvnlist", "" )
addpath = kwa.get("addpath", "$LOCAL_BASE/env/dyb/NuWa-trunk/dybgaudi/Detector/XmlDetDesc/DDDB/dayabay.xml" )
apmtddpath = kwa.get("apmtddpath", "$LOCAL_BASE/env/dyb/NuWa-trunk/dybgaudi/Detector/XmlDetDesc/DDDB/PMT/hemi-pmt.xml" )
apmtpathtmpl = kwa.get("apmtpathtmpl", "$OPTICKS_INSTALL_PREFIX/opticksdata/export/DayaBay/GPmt/%(apmtidx)s/GPmt.npy" )
apmtidx = kwa.get("apmtidx", 2 )
csgname = kwa.get("csgname", "tboolean-dummy")
csgpath = kwa.get("csgpath", None)
#gltfpath = kwa.get("gltfpath", "$TMP/tgltf/tgltf-gdml--.gltf")
container = kwa.get("container","Rock//perfectAbsorbSurface/Vacuum")
testobject = kwa.get("testobject","Vacuum///GlassSchottF2" )
autocontainer = kwa.get("autocontainer","Rock//perfectAbsorbSurface/Vacuum")
autoobject = kwa.get("autoobject","Vacuum/perfectSpecularSurface//GlassSchottF2" )
autoemitconfig = kwa.get("autoemitconfig","photons:600000,wavelength:380,time:0.2,posdelta:0.1,sheetmask:0x3f,umin:0.25,umax:0.75,vmin:0.25,vmax:0.75" )
autoseqmap = kwa.get("autoseqmap","TO:0,SR:1,SA:0" )
gsel = kwa.get("gsel", "/dd/Geometry/PMT/lvPmtHemi0x" )
gidx = kwa.get("gidx", 0 )
gmaxnode = kwa.get("gmaxnode", 0 )
gmaxdepth = kwa.get("gmaxdepth", 0 )
cfordering = kwa.get("cfordering", "sum_code" )
#cfordering = kwa.get("cfordering", "code" )
dumpenv = kwa.get("dumpenv", False)
parser = argparse.ArgumentParser(doc)
parser.add_argument( "--tag", default=tag, help="tag identifiying a simulation within a specific source and detector geometry, negated tag for Geant4 equivalent. Default %(default)s" )
parser.add_argument( "--det", default=det, help="detector geometry: eg g4live, PmtInBox, dayabay. Default %(default)s. " )
parser.add_argument( "--cat", default=cat, help="category that overrides det. Will replace det, to match C++. Default %(default)s. " )
parser.add_argument( "--src", default=src, help="photon source: torch, natural, scintillation OR cerenkov. Default %(default)s " )
parser.add_argument( "--pfx", default=pfx, help="either \"source\" for 1st executable or the name of the executable for subsequent eg \"OKG4Test\". Default %(default)s " )
parser.add_argument( "--noshow", dest="show", default=show, action="store_false", help="switch off dumping commandline " )
parser.add_argument( "--noplot", dest="plot", default=plot, action="store_false", help="switch off plotting" )
parser.add_argument( "--show", default=show, action="store_true", help="dump invoking commandline " )
parser.add_argument( "--loglevel", default=llv, help=" set logging level : DEBUG/INFO/WARNING/ERROR/CRITICAL. Default %(default)s." )
parser.add_argument( "--log-level", default=llv2, help=" mirror ipython level option to avoid complications with splitting options. Default %(default)s." )
parser.add_argument( "--profile", default=None, help="Unused option allowing argparser to cope with remnant ipython profile option" )
parser.add_argument( "-i", dest="interactive", action="store_true", default=False, help="Unused option allowing argparser to cope with remnant ipython -i option" )
parser.add_argument( "--tagoffset", default=tagoffset, type=int, help="tagoffset : unsigned offset from tag, identifies event in multivent running. Default %(default)s " )
parser.add_argument( "--multievent", default=multievent, type=int, help="multievent : unsigned number of events to handle. Default %(default)s " )
parser.add_argument( "--stag", default=stag, help="S-Polarization tag : identifying a simulation within a specific source and detector geometry, negated tag for Geant4 equivalent" )
parser.add_argument( "--ptag", default=ptag, help="P-Polarization tag : identifying a simulation within a specific source and detector geometry, negated tag for Geant4 equivalent" )
parser.add_argument( "--mrc", default=mrc, type=int, help="script return code resulting from missing event files. Default %(default)s " )
parser.add_argument( "--mat", default=mat, help="material name, used for optical property dumping/plotting. Default %(default)s" )
parser.add_argument( "--sli", default=sli, help="slice specification delimited by colon. Default %(default)s" )
parser.add_argument( "--msli", default=msli, help="photon np.load mmap_mode slice specification delimited by colon. Default %(default)s" )
parser.add_argument( "--sel", default=sel, help="selection slice specification delimited by colon. Default %(default)s" )
parser.add_argument( "--qwn", default=qwn, help="Quantity by single char, pages delimited by comma eg XYZT,ABCR. Default %(default)s" )
parser.add_argument( "--c2max", default=c2max, help="Admissable total chi2 deviation in comparisons. Comma delimited triplet of floats for warn/error/fatal levels. Default %(default)s" )
parser.add_argument( "--rdvmax", default=rdvmax, help="For compressed record data : admissable total absolute deviation in DvTab comparisons. Comma delimited triplet of floats for warn/error/fatal levels. Default %(default)s" )
parser.add_argument( "--pdvmax", default=pdvmax, help="For uncompressed final photon data : admissable total absolute deviation in DvTab comparisons. Comma delimited triplet of floats for warn/error/fatal levels. Default %(default)s" )
parser.add_argument( "--pfxseqhis", default=pfxseqhis, help="Seqhis hexstring prefix for spawned selection. Default %(default)s" )
parser.add_argument( "--pfxseqmat", default=pfxseqmat, help="Seqmat hexstring prefix for spawned selection. Default %(default)s" )
parser.add_argument( "--dbgseqhis", default=dbgseqhis, help="Seqhis hexstring prefix for dumping. Default %(default)s" )
parser.add_argument( "--dbgseqmat", default=dbgseqmat, help="Seqmat hexstring prefix for dumping. Default %(default)s" )
parser.add_argument( "--dbgmskhis", default=dbgmskhis, help="History mask hexstring for selection/dumping. Default %(default)s" )
parser.add_argument( "--dbgmskmat", default=dbgmskmat, help="Material mask hexstring for selection/dumping. Default %(default)s" )
parser.add_argument( "--mask", default=mask, help="For analysis of masked events. Default %(default)s" )
parser.add_argument( "--figsize", default=figsize, help="Comma delimited figure width,height in inches. Default %(default)s" )
parser.add_argument( "--size", default=size, help="Comma delimited figure width,height in inches. Default %(default)s" )
parser.add_argument( "--position", default=position, help="Comma delimited window position. Default %(default)s" )
parser.add_argument( "--dbgzero", default=dbgzero, action="store_true", help="Dump sequence lines with zero counts. Default %(default)s" )
parser.add_argument( "--terse", action="store_true", help="less verbose, useful together with --multievent ")
parser.add_argument( "--smry", default=smry, action="store_true", help="smry option gives less detailed seqmat and seqhis tables, including the hex strings, useful for dbgseqhis")
parser.add_argument( "--pybnd", action="store_true", help="Avoid error from op binary selection flag. ")
parser.add_argument( "--gdml2gltf", action="store_true", help="Avoid error from op binary selection flag. ")
parser.add_argument( "--prohis", default=prohis, action="store_true", help="Present progressively masked seqhis frequency tables for step by step checking. Default %(default)s ")
parser.add_argument( "--promat", default=promat, action="store_true", help="Present progressively masked seqmat frequency tables for step by step checking. Default %(default)s ")
parser.add_argument( "--rehist", default=rehist, action="store_true", help="Recreate hists rather than loading persisted ones. Default %(default)s ")
parser.add_argument( "--chi2sel", default=chi2sel, action="store_true", help="Select histograms by their chi2 sum exceeding a cut, see cfh.py. Default %(default)s ")
parser.add_argument( "--chi2selcut", default=chi2selcut, type=float, help="chi2 per degree of freedom cut used to select histograms when using --chi2sel option, see cfh-vi. Default %(default)s ")
parser.add_argument( "--statcut", default=statcut, type=int, help="Statistics cut used with --chi2sel option, see cfh-vi Default %(default)s ")
parser.add_argument( "--nointerpol", dest="interpol", default=not nointerpol, action="store_false", help="See cfg4/tests/CInterpolationTest.py. Default %(default)s ")
parser.add_argument( "--lmx", default=lmx, type=int, help="Maximum number of lines to present in sequence frequency tables. Default %(default)s " )
parser.add_argument( "--cmx", default=cmx, type=float, help="When greater than zero used as minimum line chi2 to present in sequence frequency tables. Default %(default)s " )
parser.add_argument( "--apmtpathtmpl", default=apmtpathtmpl, help="Template Path to analytic PMT serialization, see pmt- and ana/pmt/analytic.py. %(default)s ")
parser.add_argument( "--apmtidx", default=apmtidx, type=int, help="PmtPath index used to fill in the template, see pmt- and ana/pmt/analytic.py. %(default)s ")
parser.add_argument( "--apmtddpath", default=apmtddpath, help="Path to detdesc xml file with description of DayaBay PMT, which references other files. %(default)s ")
parser.add_argument( "--addpath", default=addpath, help="Path to detdesc xml file for topdown testing. %(default)s ")
parser.add_argument( "--yes", action="store_true", help="Confirm any YES dialogs. %(default)s ")
parser.add_argument( "--csgpath", default=csgpath, help="Directory of the NCSG input serialization. %(default)s ")
parser.add_argument( "--csgname", default=csgname, help="Name of the Directory of the NCSG input serialization. %(default)s ")
#parser.add_argument( "--gltfpath", default=gltfpath, help="Path to glTF json file. %(default)s ")
parser.add_argument( "--container", default=container, help="Boundary specification for container. %(default)s ")
parser.add_argument( "--testobject", default=testobject, help="Boundary specification for testobject. %(default)s ")
parser.add_argument( "--autocontainer", default=autocontainer, help="Boundary specification for test container used with --testauto. %(default)s ")
parser.add_argument( "--autoobject", default=autoobject, help="Boundary specification for test object used with --testauto. %(default)s ")
parser.add_argument( "--autoemitconfig", default=autoemitconfig, help="Emit config from test container used with --testauto. %(default)s ")
parser.add_argument( "--autoseqmap", default=autoseqmap, help="Seqmap for NCSGIntersect testing with --testauto geometry. %(default)s ")
parser.add_argument( "--cfordering", default=cfordering, help="Sort ordering of cf tables, one of max/self/other. %(default)s ")
parser.add_argument( "--gsel", default=gsel, help="GDML node selection, either tree node index integer or LV name prefix, see tboolean-gdml . %(default)s ")
parser.add_argument( "--gmaxdepth", default=gmaxdepth, type=int, help="GDML node depth limit, 0 for no limit, see tboolean-gdml. %(default)s ")
parser.add_argument( "--gmaxnode", default=gmaxnode, type=int, help="GDML node limit including target node, 0 for no limit, see tboolean-gdml. %(default)s ")
parser.add_argument( "--gidx", default=gidx, type=int, help="GDML index to pick target node from within gsel lvn selection, see tboolean-gdml. %(default)s ")
parser.add_argument( "--gltfsave", default=gltfsave, action="store_true", help="Save GDML parsed scene as glTF, see analytic/sc.py. %(default)s ")
parser.add_argument( "--lvnlist", default=lvnlist, help="Path to file containing list of lv names. %(default)s ")
parser.add_argument( "--j1707", action="store_true", help="Bash level option passthru. %(default)s ")
parser.add_argument( "--ip", action="store_true", help="Bash level option passthru. %(default)s ")
parser.add_argument( "--pdb", action="store_true", help="ipython level option passthru. %(default)s ")
parser.add_argument( "--extras", action="store_true", help="Bash level option passthru. %(default)s ")
parser.add_argument( "--dumpenv", default=dumpenv, action="store_true", help="Dump enviroment. %(default)s ")
parser.add_argument( "--disco", action="store_true", help="Disable container, investigate suspected inefficient raytrace of objects inside spacious containers. %(default)s ")
parser.add_argument('nargs', nargs='*', help='nargs : non-option args')
ok = OK()
args = parser.parse_args(namespace=ok)
# dont write to stdout here it messes up tboolean picking ip TESTCONFIG
cflog = True
init_logging(level=args.loglevel,cflog=cflog)
if args.multievent > 1 and args.tagoffset > 0:
log.fatal("use either --multievent n or --tagoffset o to pick one from multi, USING BOTH --multievent and --tagoffset NOT SUPPORTED ")
sys.exit(1)
ok.allowed_cfordering = "max self other sum sum_code code".split()
assert args.cfordering in ok.allowed_cfordering
args.det = args.cat # todo update use of det for cat, to avoid this
#if args.det != "g4live" and args.pfx != ".":
# args.det = args.pfx
#pass
args.c2max = list(map(float, args.c2max.split(",")))
args.rdvmax = list(map(float, args.rdvmax.split(",")))
args.pdvmax = list(map(float, args.pdvmax.split(",")))
if args.multievent > 1:
args.utags = map(lambda offset:int(args.tag) + offset, range(args.multievent))
args.utag = args.utags[0] # backward compat for scripts not supporting multievent yet
else:
try:
tag = int(args.tag)
except ValueError:
tag = list(map(int,args.tag.split(",")))
pass
if type(tag) is int:
args.utag = tag + args.tagoffset
args.utags = [args.utag]
else:
args.utag = None
args.utags = tag
pass
pass
args.qwns = args.qwn.replace(",","")
os.environ.update(OPTICKS_MAIN_QWNS=args.qwns) # dirty trick to avoid passing ok to objects that need this
# hexstring -> hexint
args.dbgseqhis = int(str(args.dbgseqhis),16)
args.dbgseqmat = int(str(args.dbgseqmat),16)
args.dbgmskhis = int(str(args.dbgmskhis),16)
args.dbgmskmat = int(str(args.dbgmskmat),16)
args.figsize = list(map(float, args.figsize.split(",")))
args.msli = slice_(args.msli) # from string to slice
args.sli = slice_(args.sli)
args.sel = slice_(args.sel)
args.apmtpath = args.apmtpathtmpl % dict(apmtidx=str(args.apmtidx))
log.debug("args.apmtpathtmpl %s args.apmtidx %d -> args.apmtpath %s " % ( args.apmtpathtmpl, args.apmtidx, args.apmtpath ) )
log.debug("args.dbgseqhis [%x] " % args.dbgseqhis)
log.debug("args.smry : %s " % args.smry )
if args.show:
sys.stderr.write("args: " + " ".join(sys.argv) + "\n")
pass
ok.query = OpticksQuery(os.environ.get("OPTICKS_QUERY",""))
return ok
def opticks_main(**kwa):
ok = opticks_args(**kwa)
opticks_environment(ok)
np.set_printoptions(suppress=True, precision=4, linewidth=200)
return ok
if __name__ == '__main__':
ok = opticks_main(doc=__doc__)
log.info(ok)
log.info(ok.brief)
|
'''
Code developed by - Narender Kumar
This code requires Python 3.0, and for other versions, the code will not compile.
Please make sure that all the required contstraints are met in the input text file,
the code will not check for the required constraints. That is 1 ≤ N ≤ 200,000 days
and 1 ≤ K ≤ N days.
This code creates an output text file(output.txt), in the same directory as this program.
'''
# The code starts from here
input_file_name = "input.txt"
output_file_name = "ouput.txt"
def read_text_file(fn):
'''
This function reads the text file and stores the N value and K value in data structure.
This function also stores the sale price from the input text file
'''
with open(fn) as f:
read_data = f.readlines()
i=1
N_values = list()
for x in read_data:
if i==1:
K_values = [int(y) for y in x.split()]
i+=1
else:
x = [int(x) for x in x.split()]
N_values.extend(x)
return K_values,N_values
def number_of_increasing_Subranges(window):
'''
This function returns the number of increasing subranges.
'''
increasing_subrange = 0
for j in range(len(window)-1):
for i in range(len(window)-1):
new_window = window[j:len(window)-i]
if len(new_window)<2:continue
if all(new_window[p] < new_window[p+1] for p in range(len(new_window)-1)):
increasing_subrange +=1
return increasing_subrange
def number_of_decreasing_Subranges(window):
'''
This function returns the number of decreasing subranges.
'''
decreasing_subrange = 0
for j in range(len(window)-1):
for i in range(len(window)-1):
new_window = window[j:len(window)-i]
if len(new_window)<2:continue
if all(new_window[p] > new_window[p+1] for p in range(len(new_window)-1)):
decreasing_subrange +=1
return decreasing_subrange
def do_computation(k,n_list):
'''
This function returns a list of increasing subranges within
the window minus the number of decreasing subranges within the window.
'''
j = 0
computed_list = list()
for x in range(k[0]- k[1]+1):
num_inc = number_of_increasing_Subranges(n_list[j:k[1]+j])
num_dec = number_of_decreasing_Subranges(n_list[j:k[1]+j])
j+=1
#print(num_inc-num_dec)
computed_list.append(num_inc-num_dec)
return computed_list
def print_to_textfile(fn,list_of_values):
'''
This function prints the answer to a output text file as well to the consol.
'''
with open(fn,'wt') as f:
for s in list_of_values:
print(s)
print(s, file=f)
K_values,N_values = read_text_file(input_file_name)
print_to_textfile(output_file_name,do_computation(K_values,N_values))
#End of code
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
class SinaPipeline(object):
def __init__(self):
self.conn = None
self.cursor = None
def open_spider(self,spider):
self.conn = pymysql.connect(host='111.230.169.107',user='root',password='20111673',database='sina', port=3306,charset='utf8')
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
# sql = 'insert into sina_news(newsTitle,newsUrl,newsTime,content) VALUES (%r,%r,%r,%r)'%(item['newsTitle'], item['newsUrl'], item['newsTime'], item['content'])
# self.cursor.execute(sql)
# self.conn.commit()
cols, values = zip(*item.items())
sql = "INSERT INTO `%s` (%s) VALUES (%s)" % \
(
'sina_news',
','.join(cols),
','.join(['%s'] * len(values))
)
self.cursor.execute(sql, values)
self.conn.commit()
print(self.cursor._last_executed)
return item
def close_spider(self,spider):
self.cursor.close()
self.conn.close() |
import torch
import torch.nn as nn
import torch.nn.functional as F
import gensim
from crf import CRFDevice, CRF
from conll_vectorizer import Const
class CNNEmbeddings(nn.Module):
def __init__(self, vocab_size, embedding_dim, in_channels, padding_idx=Const.PAD_ID):
super().__init__()
self.vocab_size = vocab_size
self.embeddings = nn.Embedding(self.vocab_size, embedding_dim, padding_idx=padding_idx)
self.conv1 = nn.Conv1d(embedding_dim, in_channels, kernel_size=3)
self.conv2 = nn.Conv1d(in_channels, in_channels, kernel_size=4)
self.conv3 = nn.Conv1d(in_channels, in_channels, kernel_size=5)
self.convs = [self.conv1, self.conv2, self.conv3]
def init_weights(self):
nn.init.xavier_uniform(self.conv1.weight.data)
nn.init.xavier_uniform(self.conv2.weight.data)
nn.init.xavier_uniform(self.conv3.weight.data)
def forward(self, x):
x = self.embeddings(x)
x = x.permute(0, 2, 1)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
# concatted = torch.cat(conved, dim=2)
x = x.permute(0, 2, 1)
pooled = torch.max(x, dim=1)
# pooled = nn.functional.max_pool1d(concatted,dim=1, kernel_size=1)
return pooled[0]
class LstmCrf(nn.Module):
def __init__(self, weighted_matrix,
vocab_size,
nb_labels,
embedding_dim,
hidden_dim,
char_vocab_size,
char_embedding_dim,
char_in_channels,
device):
super().__init__()
self.device = device
self.hidden_dim = hidden_dim
self.embeddings = nn.Embedding.from_pretrained(torch.Tensor(weighted_matrix))
self.lstm = nn.LSTM(embedding_dim+char_in_channels, hidden_dim, bidirectional=True, batch_first=True)
self.cnn_embeddings = CNNEmbeddings(char_vocab_size, char_embedding_dim, char_in_channels)
self.fc = nn.Linear(hidden_dim * 2, nb_labels)
#self.crf = CRFDevice(nb_labels, Const.BOS_TAG_ID, Const.EOS_TAG_ID, device=device)
self.crf = CRF(nb_labels, True)
def _init_hidden(self, batch_size):
return (torch.randn(2, batch_size, self.hidden_dim, device=self.device),
torch.randn(2, batch_size, self.hidden_dim, device=self.device))
def _get_mask(self, tokens):
return (tokens != Const.PAD_ID).float()
def _lstm(self, x, x_char):
emb =self.embeddings(x)
char_emb = self.cnn_embeddings(x_char)
char_emb = char_emb.unsqueeze(1).repeat(1, x.shape[1], 1)
emb_cat = torch.cat([emb, char_emb], dim=2)
hidden = self._init_hidden(x.shape[0])
x, _ = self.lstm(emb_cat, hidden)
emissions = self.fc(x)
emissions = F.softmax(emissions, dim=1)
return emissions
def forward(self, features):
x, x_char = features
mask = self._get_mask(x)
emissions = self._lstm(x, x_char)
#score, path = self.crf.decode(emissions, mask=mask)
path = self.crf.decode(emissions, mask=mask)
#return score, path
return path
def loss(self, features, y):
x, x_char = features
mask = self._get_mask(x)
emissions = self._lstm(x, x_char)
nll = -self.crf(emissions, y, mask=mask)
return nll |
import numpy as np
import graphlab as gl
from scipy.stats import multivariate_normal
def log_sum_exp(Z):
""" Compute log(\sum_i exp(Z_i)) for some array Z."""
return np.max(Z) + np.log(np.sum(np.exp(Z - np.max(Z))))
def loglikelihood(data, weights, means, covs):
""" Compute the loglikelihood of the data for a Gaussian mixture model with the given parameters. """
num_clusters = len(means)
num_dim = len(data[0])
ll = 0
for d in data:
Z = np.zeros(num_clusters)
for k in range(num_clusters):
# Compute (x-mu)^T * Sigma^{-1} * (x-mu)
delta = np.array(d) - means[k]
exponent_term = np.dot(delta.T, np.dot(np.linalg.inv(covs[k]), delta))
# Compute loglikelihood contribution for this data point and this cluster
Z[k] += np.log(weights[k])
Z[k] -= 1/2. * (num_dim * np.log(2*np.pi) + np.log(np.linalg.det(covs[k])) + exponent_term)
# Increment loglikelihood contribution of this data point across all clusters
ll += log_sum_exp(Z)
return ll
def compute_responsibilities(data, weights, means, covariances):
'''E-step: compute responsibilities, given the current parameters'''
num_data = len(data)
num_clusters = len(means)
resp = np.zeros((num_data, num_clusters))
# Update resp matrix so that resp[i,k] is the responsibility of cluster k for data point i.
for i in range(num_data):
for k in range(num_clusters):
# YOUR CODE HERE
resp[i, k] = weights[k] * multivariate_normal.pdf(data[i], mean=means[k], cov=covariances[k])
# Add up responsibilities over each data point and normalize
row_sums = resp.sum(axis=1)[:, np.newaxis]
resp = resp / row_sums
return resp
def compute_soft_counts(resp):
# Compute the total responsibility assigned to each cluster
counts = np.sum(resp, axis=0)
return counts
def compute_weights(counts):
num_clusters = len(counts)
weights = [0.] * num_clusters
N = counts.sum()
for k in range(num_clusters):
# Update the weight for cluster k using the M-step update rule for the cluster weight
weights[k] = counts[k] / N
return weights
def compute_means(data, resp, counts):
num_clusters = len(counts)
num_data = len(data)
means = [np.zeros(len(data[0]))] * num_clusters
for k in range(num_clusters):
# Update means for cluster k using the M-step update rule for the mean variables.
weighted_sum = 0.
for i in range(num_data):
weighted_sum += resp[i, k]*data[i]
means[k] = weighted_sum / counts[k]
return means
def compute_covariances(data, resp, counts, means):
num_clusters = len(counts)
num_dim = len(data[0])
num_data = len(data)
covariances = [np.zeros((num_dim,num_dim))] * num_clusters
for k in range(num_clusters):
# Update covariances for cluster k using the M-step update rule for covariance variables.
weighted_sum = np.zeros((num_dim, num_dim))
for i in range(num_data):
dist = data[i] - means[k]
weighted_sum += resp[i, k] * np.outer(dist, dist)
covariances[k] = weighted_sum / counts[k]
return covariances
def EM(data, init_means, init_covariances, init_weights, maxiter=1000, thresh=1e-4):
# Make copies of initial parameters, which we will update during each iteration
means = init_means[:]
covariances = init_covariances[:]
weights = init_weights[:]
# Infer dimensions of dataset and the number of clusters
num_data = len(data)
num_dim = len(data[0])
num_clusters = len(means)
# Initialize some useful variables
resp = np.zeros((num_data, num_clusters))
ll = loglikelihood(data, weights, means, covariances)
ll_trace = [ll]
for it in range(maxiter):
if it % 5 == 0:
print("Iteration %s" % it)
# E-step: compute responsibilities
resp = compute_responsibilities(data, weights, means, covariances)
# M-step
# Compute the total responsibility assigned to each cluster
counts = compute_soft_counts(resp)
# Update the weight for cluster k using the M-step update rule for the cluster weight
weights = compute_weights(counts)
# Update means for cluster k using the M-step update rule for the mean variables.
means = compute_means(data, resp, counts)
# Update covariances for cluster k using the M-step update rule for covariance variables.
covariances = compute_covariances(data, resp, counts, means)
# Compute the loglikelihood at this iteration
ll_latest = loglikelihood(data, weights, means, covariances)
ll_trace.append(ll_latest)
# Check for convergence in log-likelihood and store
if (ll_latest - ll) < thresh and ll_latest > -np.inf:
break
ll = ll_latest
if it % 5 != 0:
print("Iteration %s" % it)
out = {'weights': weights, 'means': means, 'covs': covariances, 'loglik': ll_trace, 'resp': resp}
return out
def compute_image_assignments(images, em_result):
weights = em_result['weights']
means = em_result['means']
covariances = em_result['covs']
rgb = images['rgb']
N = len(images) # number of images
K = len(means) # number of clusters
assignments = [0]*N
probs = [0]*N
for i in range(N):
# Compute the score of data point i under each Gaussian component:
p = np.zeros(K)
for k in range(K):
p[k] = weights[k]*multivariate_normal.pdf(rgb[i], mean=means[k], cov=covariances[k])
# Compute assignments of each data point to a given cluster based on the above scores:
assignments[i] = np.argmax(p)
# For data point i, store the corresponding score under this cluster assignment:
probs[i] = np.max(p)
assignments = gl.SFrame({'assignments':assignments, 'probs':probs, 'image': images['image']})
return assignments
def get_top_images(assignments, cluster, k=5):
# YOUR CODE HERE
images_in_cluster = assignments[assignments['assignments'] == cluster]
top_images = images_in_cluster.topk('probs', k)
return top_images['image']
|
from sklearn.base import BaseEstimator,BiclusterMixin
class fill_na(BaseEstimator,BiclusterMixin):
def __init__(self,fillna={'fillna_assign_str':None,'fillna_default_str':None}):
self.fillna = fillna
def fit(self,X,y=None):
return self
def transform(self,X,convertList=True):
print('-----------------begin-fillna--------------')
if self.fillna['fillna_assign_str'] or self.fillna['fillna_default_str']:
if self.fillna['fillna_assign_str']:
for f in self.fillna['fillna_assign_str'].split(','):
column = f.split(':')[0]
value = f.split(':')[1]
dtype = X[column].dtypes
if dtype=='int':
X[column] = X[column].fillna(int(value))
elif dtype=='float':
X[column] = X[column].fillna(float(value))
else:
X[column] = X[column].fillna(value)
if self.fillna['fillna_default_str']:
for c in X.columns:
if c not in X.select_dtypes('object').columns:
X = X.fillna(int(self.fillna['fillna_default_str']))
print('no_objective features have been filled with',self.fillna['fillna_default_str'])
else:
for o in X.select_dtypes('object').columns:
X[o] = X[o].fillna('None')
print('object features have been filled with "None"')
return X
class get_dummies(BaseEstimator,BiclusterMixin):
import numpy as np
def __init__(self,convertList_str=None,featuresList=None,n=10):
self.n = n
self.convertList_str = convertList_str
self.featuresList = featuresList
self.convertList = None
def fit(self,X,y=None):
return self
def transform(self,X,re_convertList=False):
if self.convertList_str:
self.convertList = self.convertList_str.split(',')
else:
self.convertList = [c for c in self.featuresList if ('int' not in str(X[c].dtypes) and 'float' not in str(X[c].dtypes))]
for o in self.convertList:
print(o)
if X[o].nunique()<=self.n:
pass
else:
print('!!!column',o,'has more than',self.n,'values',',choose top',self.n,'values to get dummies')
convert_values = list(X.groupby(o).size().sort_values(ascending=False).index[:self.n-1])
X[o] = X.apply(lambda l:l[o] if l[o] in convert_values else np.nan,axis=1)
X_tmp = pd.get_dummies(X[o])
X_tmp.columns = [o+'_'+str(c) for c in X_tmp.columns]
print('add columns:',X_tmp.columns)
X = X.merge(X_tmp,left_index=True,right_index=True)
print('------------------------------------conclusion----------------------------------')
print(self.convertList,'have been converted to dummies')
if re_convertList==True:
return X,self.convertList
else:
return X
class log(BaseEstimator,BiclusterMixin):
import numpy as np
def __init__(self,convertList_str=None,featuresList=None,skew=0.5):
self.skew = skew
self.convertList_str = convertList_str
self.featuresList = featuresList
self.convertList = None
def fit(self,X,y=None):
return self
def transform(self,X,re_convertList=False):
if self.convertList_str:
self.convertList = self.convertList_str.split(',')
if self.featuresList is not None:
self.convertList = [c for c in self.featuresList if c not in X.select_dtypes('object').columns and X[c].skew()>self.skew]
print('add columns',end=': ')
for c in self.convertList:
X[c+'_log'] = np.log(X[c]+1)
print(c+'_log',end=',')
if re_convertList==True:
return X,self.convertList
else:
return X
class del_samples(BaseEstimator,BiclusterMixin):
def __init__(self):
pass
def fit(self,X,y=None):
return self
def transform(self,X):
tmp = X.shape[0]
X.drop(X[:1460][X.MiscVal>=6000].index,inplace = True)
X.drop(X[:1460][X.LotFrontage>=250].index,inplace = True)
X.drop(X[:1460][X.BsmtFinSF1>=5000].index,inplace = True)
X.drop(X[:1460][X.TotalBsmtSF>=5000].index,inplace = True)
X.drop(X[:1460][X['1stFlrSF']>=4000].index,inplace = True)
X.drop(X[:1460][(X.GrLivArea>4000)&(X.SalePrice<300000)].index,inplace = True)
if tmp>X.shape[0]:
print('remove',tmp-X.shape[0],'samples')
return X
class remove_high_relevance(BaseEstimator,BiclusterMixin):
def __init__(self,featuresList,method='pearson',threshold=0.9):
self.featuresList = featuresList
self.method = method
self.threshold = threshold
def fit(self,X,y=None):
return self
def transform(self,X,re_convertList=False):
from tqdm import tqdm
numeric_features = [c for c in self.featuresList if ('int' in str(X[c].dtypes) or 'float' in str(X[c].dtypes))]
print('the length of numeric features is:',len(numeric_features))
removeSet=set()
iSet = set()
for i in tqdm(numeric_features):
if i not in removeSet:
iSet.add(i)
for j in numeric_features:
if j not in removeSet and i not in removeSet and j not in iSet:
pearsonr = (X[[i,j]].corr(method=self.method).loc[i,j])
if abs(pearsonr)>self.threshold:
print('pearsonr of',i,'and',j,'is',pearsonr,end='! ')
if X[i].count()>=X[j].count():
removeSet.add(j)
print('remove',j)
else:
removeSet.add(i)
print('remove',i)
featuresList = [c for c in X.columns if c not in removeSet]
X = X[featuresList]
if re_convertList==True:
return X,list(removeSet)
else:
return X
|
total_price_with_taxes = 0
total_price = 0
total_taxes = 0
while True:
token = input()
if token == "special" or token == "regular":
break
price = float(token)
if price < 0:
print("Invalid price!")
continue
taxes = price * 20/100
total_price += price
total_taxes += taxes
total_price_with_taxes += (price + taxes)
if total_price_with_taxes == 0:
print("Invalid order!")
else:
if token == "special":
total_price_with_taxes *= 90/100
print(f"Congratulations you've just bought a new computer!\n"
f"Price without taxes: {total_price:.2f}$\n"
f"Taxes: {total_taxes:.2f}$\n"
"-----------\n"
f"Total price: {total_price_with_taxes:.2f}$")
|
from django.db import models
from django.utils import timezone
from mywing.angel.models import Angel
class Task(models.Model):
description = models.CharField(max_length=256)
cost = models.FloatField()
owner = models.ForeignKey(Angel, on_delete=models.SET_NULL, null=True, related_name='owned_tasks')
helper = models.ForeignKey(Angel, on_delete=models.SET_NULL, null=True, related_name='helped_tasks')
contribution = models.FloatField(default=0.0)
created_at = models.DateTimeField(blank=True, null=True, default=timezone.now())
accepted_at = models.DateTimeField(blank=True, null=True)
finished_at = models.DateTimeField(blank=True, null=True)
completed_at = models.DateTimeField(blank=True, null=True)
canceled_at = models.DateTimeField(blank=True, null=True)
CREATED = 0
ACCEPTED = 1
FINISHED = 2
COMPLETED = 3
INVALID = -1
CANCELED = -2
STATUS_CHOICES = [
(CREATED, 'created'),
(ACCEPTED, 'accepted'),
(FINISHED, 'finished'),
(COMPLETED, 'completed'),
(INVALID, 'invalid'),
(CANCELED, 'canceled'),
]
status = models.IntegerField(choices=STATUS_CHOICES, default=CREATED)
class Meta:
ordering = ['-id']
def __str__(self):
return f'Task#{self.id}: {self.description}'
|
from django.db import models
class MiningPool(models.Model):
name = models.CharField(
max_length=50
)
url = models.URLField()
def __str__(self):
return f'{self.name}'
class BlockExplorer(models.Model):
name = models.CharField(
max_length=50
)
url = models.URLField()
def __str__(self):
return f'{self.name}'
|
from pycolate.Percolation import Percolation, PercolationExperiment, CRIT_PROB
from pycolate.CoarseGraining import coarse_graining_estimate, percolates, majority
|
#!/usr/bin/env python3
i = 10000000
def get_sequence_length_and_sum(i, length=0, sum=0):
length += 1
sum += i
if i > 1:
if i % 2:
return get_sequence_length_and_sum(i * 3 + 1, length, sum)
else:
return get_sequence_length_and_sum(i / 2, length, sum)
return length, sum
num_length = {}
num_sum = {}
for x in range(1, i + 1):
num_length[x], num_sum[x] = get_sequence_length_and_sum(x)
sorted_num_length = sorted(((v,k) for k,v in num_length.items()))
num = sorted_num_length[-2][1]
print("Number: ", num)
print("Length: ", num_length[num])
print("Sum: ", num_sum[num])
|
import os
import sys
import errno
import socket
import logging
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(
os.path.join(SCRIPT_DIR, PACKAGE_PARENT, PACKAGE_PARENT)))
from src.utils import get_logger
from src.ztransfer.profiler import Profiler
from src.ztransfer.packets import (ZTConnReqPacket, ZTDataPacket,
ZTAcknowledgementPacket, ZTFinishPacket,
deserialize_packet, ZT_RAW_DATA_BYTES_SIZE)
from src.ztransfer.errors import (ZTVerificationError, ERR_VERSION_MISMATCH,
ERR_ZTDATA_CHECKSUM, ERR_MAGIC_MISMATCH,
ERR_PTYPE_DNE)
class ZTransferTCPServer(object):
STATE_INIT = 0
STATE_WAIT_CCREQ = 1
STATE_TRANSFER = 2
STATE_FIN = 3
def __init__(self, bind_host: str, port_pool: list, logger_verbose: bool = False):
self.bind_host = bind_host
self.port_pool = port_pool
self.port_occupied = None
self.recv_bytes_data = b""
self.file_overall_checksum = None
self.file_name = None
self.last_data_packet_seq = None
self.last_data_packet_data_size = None
self.client_socket = None
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.logger = get_logger("ZTransferTCPServer", logger_verbose)
self.logger.debug(f"Constructed ZTransferTCPServer({bind_host}, {port_pool})")
def listen_for_transfer(self):
state = self.STATE_INIT
while state != self.STATE_FIN:
if state == self.STATE_INIT:
for port in self.port_pool:
try:
self.socket.bind((self.bind_host, port))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
continue
else:
self.port_occupied = port
break
if self.port_occupied is None:
self.logger.error(f"Could not bind to any ports from: {self.port_pool}")
self.clear()
return
self.socket.listen()
self.client_socket, client_addr = self.socket.accept()
# Formed connection, start profiling
self.profiler = Profiler()
state = self.STATE_WAIT_CCREQ
elif state == self.STATE_WAIT_CCREQ:
recv_data = b""
while len(recv_data) < 1000:
recv_data += self.client_socket.recv(1000 - len(recv_data))
self.logger.debug(f"Received {len(recv_data)} bytes from the client")
try:
self.logger.debug(f"Deserializing received packet data...")
packet = deserialize_packet(recv_data)
except ZTVerificationError as e:
if e.err_code == ERR_MAGIC_MISMATCH:
self.logger.warning(f"Wrong magic number '{e.extras['magic']}' (seq: {e.extras['seq']}, ptype: {e.extras['ptype']}, ts: {e.extras['ts']})")
self.clear()
return
if e.err_code == ERR_VERSION_MISMATCH:
self.logger.warning(f"Mismatched version number '{e.extras['version']}' (seq: {e.extras['seq']}, ptype: {e.extras['ptype']}, ts: {e.extras['ts']})")
self.clear()
return
if e.err_code == ERR_PTYPE_DNE:
self.logger.warning(f"Not known packet type '{e.extras['ptype']}' (seq: {e.extras['seq']}, ts: {e.extras['ts']})")
self.clear()
return
self.logger.debug(f"Packet OK: {packet.__class__.__name__} ({packet.sequence_number})")
self.profiler.pkt_tick(packet)
if not isinstance(packet, ZTConnReqPacket):
self.logger.warning(f"Was waiting for CREQ, got '{packet.ptype}'")
self.clear()
return
self.file_name = packet.filename
self.file_overall_checksum = packet.checksum
self.last_data_packet_seq = packet.last_seq
self.last_data_packet_data_size = packet.data_size - (ZT_RAW_DATA_BYTES_SIZE * (packet.last_seq - 1))
ack_packet = ZTAcknowledgementPacket(1, packet.sequence_number)
self.client_socket.sendall(ack_packet.serialize())
state = self.STATE_TRANSFER
elif state == self.STATE_TRANSFER:
recv_data = b""
while len(recv_data) < 1000:
recv_data += self.client_socket.recv(1000 - len(recv_data))
self.logger.debug(f"Received {len(recv_data)} bytes from the client")
try:
self.logger.debug(f"Deserializing received packet data...")
packet = deserialize_packet(recv_data)
except ZTVerificationError as e:
if e.err_code == ERR_MAGIC_MISMATCH:
self.logger.warning(f"Wrong magic number '{e.extras['magic']}' (seq: {e.extras['seq']}, ptype: {e.extras['ptype']}, ts: {e.extras['ts']})")
self.clear()
return
if e.err_code == ERR_VERSION_MISMATCH:
self.logger.warning(f"Mismatched version number '{e.extras['version']}' (seq: {e.extras['seq']}, ptype: {e.extras['ptype']}, ts: {e.extras['ts']})")
self.clear()
return
if e.err_code == ERR_PTYPE_DNE:
self.logger.warning(f"Not known packet type '{e.extras['ptype']}' (seq: {e.extras['seq']}, ts: {e.extras['ts']})")
self.clear()
return
if e.err_code == ERR_ZTDATA_CHECKSUM:
self.logger.warning(f"Data packet checksum failed (seq: {e.extras['seq']}, ts: {e.extras['ts']})")
self.clear()
return
self.logger.debug(f"Packet OK: {packet.__class__.__name__} ({packet.sequence_number})")
self.profiler.pkt_tick(packet)
if isinstance(packet, ZTDataPacket):
if packet.sequence_number == self.last_data_packet_seq:
self.recv_bytes_data += packet.file_data[:self.last_data_packet_data_size]
else:
self.recv_bytes_data += packet.file_data
elif isinstance(packet, ZTFinishPacket):
state = self.STATE_FIN
else:
self.logger.warning(f"Was waiting for DATA, got '{packet.ptype}'")
self.clear()
return
self.clear()
def clear(self):
self.socket.close()
if self.client_socket is not None:
self.client_socket.close()
|
from camelcase import CamelCase
c = CamelCase()
txt = "a aa aaa aaaaa aa a aaa a aaa a_b a.aa am is be go"
print(c.hump(txt))
|
from gui.MainApp import MainApp
MainApp().run()
|
def total_bill(s):
num = s.count('r')
return num * 2 if num <5 else (num - num // 5) * 2
'''
Sam has opened a new sushi train restaurant - a restaurant where sushi is served
on plates that travel around the bar on a conveyor belt and customers take the plate that they like.
Sam is using Glamazon's new visual recognition technology that allows a computer
to record the number of plates at a customer's table and the colour of those plates.
The number of plates is returned as a string. For example, if a customer has eaten 3
plates of sushi on a red plate the computer will return the string 'rrr'.
Currently, Sam is only serving sushi on red plates as he's trying to attract customers
to his restaurant. There are also small plates on the conveyor belt for condiments such
as ginger and wasabi - the computer notes these in the string that is returned as a space
('rrr r' //denotes 4 plates of red sushi and a plate of condiment).
Sam would like your help to write a program for the cashier's machine to read the string
and return the total amount a customer has to pay when they ask for the bill. The current
price for the dishes are as follows:
Red plates of sushi ('r') - $2 each, but if a customer eats 5 plates the 5th one is free.
Condiments (' ') - free.
Input: String
Output: Number
Examples:
Input: 'rr' Output: 4
Input: 'rr rrr' Output: 8
Input: 'rrrrr rrrrr' Output: 16
'''
|
# (c) 2016-2017 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
import re
from os.path import join
from distutils.core import setup
# read version from anaconda_verify/__init__.py
pat = re.compile(r'__version__\s*=\s*(\S+)', re.M)
data = open(join('anaconda_verify', '__init__.py')).read()
version = eval(pat.search(data).group(1))
setup(
name = "anaconda-verify",
version = version,
author = "Ilan Schnell",
author_email = "ilan@continuum.io",
url = "https://github.com/ContinuumIO/anaconda-verify",
license = "BSD",
description = "tool for validating conda recipes and conda packages",
long_description = open('README.md').read(),
packages = ['anaconda_verify'],
)
|
import unittest
from katas.beta.first_character_that_repeats import first_dup
class FirstDuplicateTestCase(unittest.TestCase):
def test_none(self):
self.assertIsNone(first_dup('like'))
def test_none_2(self):
self.assertIsNone(first_dup('bar'))
def test_equals(self):
self.assertEqual(first_dup('tweet'), 't')
def test_equals_2(self):
self.assertEqual(first_dup('Ode to Joy'), ' ')
def test_equals_3(self):
self.assertEqual(first_dup('ode to joy'), 'o')
def test_equals_4(self):
self.assertEqual(first_dup('123123'), '1')
def test_equals_5(self):
self.assertEqual(first_dup('!@#$!@#$'), '!')
def test_equals_6(self):
self.assertEqual(first_dup('1a2b3a3c'), 'a')
|
'''
class Shape(object):
pass
class Triangle(Shape):
def draw(self):
print("三角形")
class Square(Shape):
def draw(self):
print("正方形")
s1 = Triangle()
s2 = Square()
s1.draw()
s2.draw()
'''
class Shape(object):
def draw(self):
raise NotImplementedError
class Triangle(Shape):
def draw(self):
print("三角形")
class Square(Shape):
def draw(self):
print("正方形")
class ShapeFactory(object):
def create(self, shape):
if shape == 'Tri':
return Triangle()
elif shape == 'Squ':
return Square()
else:
return None
fac = ShapeFactory()
obj = fac.create('Tri')
obj.draw()
|
import csv
import numpy as np
import matplotlib.pyplot as plt
import math
import scipy.stats
from math import *
from scipy import interpolate
import scipy.signal
from scipy.integrate import simps
thresholds = np.arange(70)
def heaviside(actual):
return thresholds >= actual
def erfcc(x):
"""Complementary error function."""
z = abs(x)
t = 1. / (1. + 0.5*z)
r = t * exp(-z*z-1.26551223+t*(1.00002368+t*(.37409196+
t*(.09678418+t*(-.18628806+t*(.27886807+
t*(-1.13520398+t*(1.48851587+t*(-.82215223+
t*.17087277)))))))))
if (x >= 0.):
return r
else:
return 2. - r
def normcdf(x, mu, sigma):
t = x-mu;
y = 0.5*erfcc(-t/(sigma*sqrt(2.0)));
if y>1.0:
y = 1.0;
return y
def gauss(mean, l, v=1):
xs = np.arange(l)
return [normcdf(x, mean, v) for x in xs]
def _calc_crps(predictions, actuals):
obscdf = np.array([heaviside(i) for i in actuals])
crps = np.mean(np.mean((predictions - obscdf) ** 2))
return crps
def calc_crps(predictions, actuals):
crps = 0
for i, a in enumerate(actuals):
s = 0
for j, p in enumerate(predictions[i]):
if j >= a:
s +=(p-1)**2
else:
s += p**2
s = s/70.
crps+=s
return crps/float(len(actuals))
def step(center, length=70):
x = [1.]*length
for i in range(0, int(center)+1):
x[i]=0.
return np.array(x)
def sigmoid(center, length):
xs = np.arange(length)
return 1. / (1 + np.exp(-(xs - center)))
def cdfs(means):
cdfs = []
for estimated_mean_rr in means:
if estimated_mean_rr <= 0:
cdfs.append([1]*70)
elif estimated_mean_rr>70:
a = [0]*69
a.append(1)
cdfs.append(a)
else:
s = gauss(estimated_mean_rr, 70)
cdfs.append(s)
return cdfs
def parse_floats(row, col_ind):
return np.array(row[col_ind].split(' '), dtype='float')
def parse_rr(row, rr_ind, default=None):
if default:
a = parse_floats(row, rr_ind)
for i, v in enumerate(a):
if v<0 or v>1000:
a[i]=default
return a
else:
return parse_floats(row, rr_ind)
def split_radars(distances, times):
T = []
j=1
s=0
while j<len(distances):
if distances[j]!=distances[j-1] or times[j]>=times[j-1]:
T.append(range(s,j))
s = j
j+=1
T.append(range(s,j))
return T
def mean_without_zeros(a):
filtered = a[a!=0]
if len(filtered)==0:
return 0
return filtered.mean()
def clean_radar_q(w, filler=0):
clean = []
for x in w:
if x>=0 and x<=1:
clean.append(x)
else:
clean.append(filler)
return w
def hmdir_(times, rr, w, x, d):
valid_t = times[(rr>=0)&(rr<100)]
valid_r = rr[(rr>=0)&(rr<100)]
q = [0.5]*len(valid_t)
for ai, a in enumerate(w[(rr>=0)&(rr<100)]):
if a==1:
q[ai]=1
valid_r = valid_r*q
if len(valid_t)==0: return 0
if len(valid_t)<2: return valid_r[0]/60.
f = interpolate.interp1d(valid_t, valid_r)
ra = range(int(valid_t.min()), int(valid_t.max()+1))
tl = f(ra)
#plt.plot(tl)
if len(tl)>=11:
tl = scipy.signal.savgol_filter(tl, min(len(tl), 11), 4)
#plt.plot(tl)
#plt.show()
est = sum(tl)/60.
return est
def hmdir(times, rr, w, hts, distances, ey, defaults):
hour = [0.]*61
for i in range(1, len(times)):
for j in range(int(times[len(times)-i]), int(times[len(times)-i-1])):
v = rr[len(times)-i-1]
q = w[len(times)-i-1]
ht = hts[len(times)-i-1]
if q!=1: q = 0.5
if v>=0 and v<100 and not ht in [6, 8]:
hour[j]=v*q
elif ht == 1:
hour[j]=defaults[0]*q
elif ht == 2:
hour[j]=defaults[1]*q
elif ht == 3:
hour[j]=defaults[2]*q
est = sum(hour)/60.
return est
def all_good_estimates(rr, distances, radar_indices, w, times, hts, ey, defaults, compos):
age = []
for radar in radar_indices:
rain = rr[radar]
q = w[radar]
rr_error_rate = len(rain[(rain<0)])/float(len(rain))
if rr_error_rate<0.5:
est = hmdir(times[radar], rr[radar], w[radar], hts[radar], distances[radar], ey, defaults)
age.append(est)
return age
def mean(x, default=0):
if len(x)==0: return default
return np.mean(x)
def is_cdf_valid(case):
if case[0] < 0 or case[0] > 1:
return False
for i in xrange(1, len(case)):
if case[i] > 1 or case[i] < case[i-1]:
return False
return True
def avg_cdf(h):
h = np.reshape(h, (len(h), 70))
total = np.average(h, axis=0)
return total
def estimate_cdf(good):
cdf = None
if len(good)>0:
if np.mean(good)==0:
cdf = [1]*70
else:
h = []
for j, x in enumerate(good):
s = sigmoid(round(x), 70)
h.append(s)
total = avg_cdf(h)
cdf = total
else:
cdf = [1]*70
return cdf
def radar_features(rr, hts, w, d, waters, composites):
m = float(len(rr))
composite_neg_rate = len(composites[(composites!=-99900)&(composites<0)])/m
#error_rate = len(rr[rr<0])/m
#zero_rate = len(rr[rr==0])/m
#oor_rate = len(rr[rr>2000])/m
#rain_rate = len(rr[(rr>10)&(rr<=100)])/m
#bad_q = len(w[w==0])/m
oor_q = len(w[w>1])/m
#good_q = len(w[w==1])/m
ok_q = len(w[(w>0)&(w<1)])/m
#distance = d[0]
#ht0 = len(hts[hts==0])
#ht1 = len(hts[hts==1])
ht2 = len(hts[hts==2])
#ht3 = len(hts[hts==3])
#ht4 = len(hts[hts==4])
#ht5 = len(hts[hts==5])
ht6 = len(hts[hts==6])
#ht7 = len(hts[hts==7])
#ht8 = len(hts[hts==8])
#ht9 = len(hts[hts==9])
ht13 = len(hts[hts==13])
#ht14 = len(hts[hts==14])
return [composite_neg_rate, ht13/m, np.sqrt(ok_q), oor_q, ht6/m, ht2/m, m, -1]
#0.00895660879826
#0.00895629729627
#0.0089555104762
#0.00894224941284
#0.00893217139966
#0.00893150685717
#0.00892673459363
#0.00892397448774
#0.00892321039254
#0.00888351595589
#0.00886016566214
#0.00872495266795
#0.00846279777448
#0.00864597950536
#0.00749225371431
def data_set(file_name):
reader = csv.reader(open(file_name))
header = reader.next()
id_ind = header.index('Id')
rr1_ind = header.index('RR1')
rr2_ind = header.index('RR2')
rr3_ind = header.index('RR3')
time_ind = header.index('TimeToEnd')
rad_q_ind = header.index('RadarQualityIndex')
try:
expected_ind = header.index('Expected')
except ValueError:
# no label
expected_ind = -1
composite_ind = header.index('Composite')
distance_ind = header.index('DistanceToRadar')
hydro_type_ind = header.index('HydrometeorType')
water_ind = header.index('LogWaterVolume')
mwm_ind = header.index('MassWeightedMean')
y = []
ids = []
avgs = []
errors = []
error_distances = []
g = 0
rain_types1 = []
rain_types2 = []
rain_types3 = []
X = []
all_waters = []
for i, row in enumerate(reader):
ids.append(row[id_ind])
times = parse_floats(row, time_ind)
distances = parse_floats(row, distance_ind)
rr1 = parse_rr(row, rr1_ind)
rr2 = parse_rr(row, rr2_ind)
rr3 = np.fabs(parse_rr(row, rr3_ind))
w = parse_floats(row, rad_q_ind)
hidro_types = parse_floats(row, hydro_type_ind)
waters = parse_floats(row, water_ind)
mwms = parse_floats(row, mwm_ind)
composites = parse_floats(row, composite_ind)
if expected_ind >= 0:
ey = float(row[expected_ind])
y.append(ey)
else:
ey = -1
radar_indices = split_radars(distances, times)
rr1_estimates = all_good_estimates(rr1, distances, radar_indices, w, times, hidro_types, ey, [0.33, 33.31, 33.31], composites)
rr2_estimates = all_good_estimates(rr2, distances, radar_indices, w, times, hidro_types, ey, [1.51, 36.37, 81.17], composites)
rr3_estimates = all_good_estimates(rr3, distances, radar_indices, w, times, hidro_types, ey, [4.52, 38.60, 42.34], composites)
cdfs = []
cdfs.append(estimate_cdf(rr1_estimates))
cdfs.append(estimate_cdf(rr2_estimates))
cdfs.append(estimate_cdf(rr3_estimates))
cdf = avg_cdf(cdfs)
avgs.append(cdf)
radar_f = []
for radar in radar_indices:
rf = radar_features(rr1[radar], hidro_types[radar], w[radar], distances[radar], waters[radar], composites[radar])
radar_f.append(rf)
total = np.mean(radar_f, axis=0)
total[-1] = mean(rr1_estimates, -1)
X.append(total)
if i % 10000 == 0:
print "Completed row %d" % i
return ids, np.array(X), np.array(y), np.array(avgs)
def as_labels(y):
labels = np.array([1]*len(y))
for i, yi in enumerate(y):
if yi == 0:
labels[i]=0
return labels
def split(X, y):
from sklearn.cross_validation import StratifiedShuffleSplit
labels = as_labels(y)
sss = StratifiedShuffleSplit(labels, 1, test_size=0.3, random_state=0)
for a, b in sss:
train_X = X[a]
val_X = X[b]
train_y = y[a]
val_y = y[b]
train_labels = labels[a]
val_labels = labels[b]
return train_X, train_y, val_X, val_y, train_labels, val_labels, a, b
def apply_classifier(labels, cdfs, threshold):
filtered = []
for i, l in enumerate(labels):
if l[0] > threshold:
filtered.append((cdfs[i]+1.)/2.)
else:
filtered.append(cdfs[i])
return filtered
#0.00904234862754
#0.00904150831178
#0.00904983861228
#0.00901613263585
#0.00900412724833
#0.00900165157547
#0.00900155415651
#0.00900142821889
#0.00899566077666
#0.00899140245563
#0.00899136162509
#0.00899121498571
#0.00898516450647
#0.00898631930177
#0.00898616252983 --
#0.00894938332555
#0.00894852729502
#0.00894846604764
#0.00894788853756
#0.00894671310461
#0.00894636668274
#0.00894535250385
#0.0089344109825
#0.00893531092568
#0.00898349408228
#0.00899329563108
#0.00896273995689
#0.00895327069295
#0.00895322370697
#0.00895317650512
#0.00893217139966
#0.00892321039254
#0.00892313857954
#0.00892310519344
#0.00992382187229 -> 0.00971819
#0.00983595164706 -> 0.00962434
#0.00957061504447 -> 0.00924509
#0.00952959922595 -> 0.00918081
#0.0095278045182
#0.00945252983071
#0.0094347918118 -> 0.00900467
#0.00941938258085 -> 0.00893021
#0.00938841086168
#0.00923516814223
#0.00923510027563
#0.00922980704588 -> 0.00871121
#0.00922233467044
#0.0092045862579
#0.00920457357894 -> 0.00867324
#0.0092016208212
#0.00920147312222 -> 0.00867015
#0.00920130043796
#0.00919861298415
#0.00919647579626
#0.00919475970769
#0.00919475679584
#0.00916480687816 -> 0.00861711
#0.00915106704337
#0.00913163690433 -> 0.00855668
#0.00912739404781
#0.00912342542954
#0.00912337214931
#0.00912001249288 -> 0.00853307
#0.00910320309268 -> 0.00849574
#0.00907174536772 ***
#0.00907146849158 -> 0.00849297
#0.00907144426707 -> 0.00849229
#0.00910805136467 -> 0.00846733
#0.00908048569227 -> 0.00844376
#0.00907063607574
#0.00907063537653
#0.00904911201717 => 0.00842818
#0.00904491469844
#0.00904368557564 -> 0.00841398
#0.00871254534169 -> 0.00843470
#0.00897335994716 -> 0.00842433
#0.00897277319576 -> 0.00846709
#0.00899464110064 -> 0.00843748
#Baseline CRPS: 0.00965034244803
#1126695 training examples
#987398 0s
#133717 valid no 0
#5580 invalid
_, X, y, avgs = data_set('train.csv')
print 'Basic CRPS: ', calc_crps(avgs, y)
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X)
X = scaler.transform(X)
y_labels = as_labels(y)
train_X, train_y, val_X, val_y, train_labels, val_labels, ti, vi = split(X, y)
val_avgs = avgs[vi]
val_crps = calc_crps(val_avgs, val_y)
print 'Val CRPS: ', val_crps
from sklearn import linear_model
clf = linear_model.LogisticRegression(tol=1e-8, C=128)
#from sklearn.ensemble import RandomForestClassifier
#clf = RandomForestClassifier(n_estimators=100)
clf.fit(train_X, train_labels)
#clf.fit(X, y_labels)
print 'Training Accuracy', clf.score(train_X, train_labels)
print 'Validation Accuracy', clf.score(val_X, val_labels)
from sklearn.metrics import classification_report
#print classification_report(y_labels, clf.predict(X))
print classification_report(val_labels, clf.predict(val_X))
val_X_p = clf.predict_proba(val_X)
best = 0
best_crps = 1000000000
for threshold in np.arange(0.5, 0.9, 0.05):
cl_val_crps = calc_crps(apply_classifier(val_X_p, val_avgs, threshold), val_y)
if cl_val_crps<best_crps:
best = threshold
best_crps=cl_val_crps
print 'Best classification Threshold: ', best
print 'Best classification CRPS: ', best_crps
'''
for i, x in enumerate(avgs):
print calc_crps([avgs[i]], [y[i]])
plt.plot(x)
if y[i]<70:
plt.plot(step(y[i]))
else:
a = [0]*70
a[-1]=1
plt.plot(a)
plt.show()
s = []
for i, x in enumerate(avgs):
e =calc_crps([avgs[i]], [y[i]])
s.append(e)
# if e>0.8:
# plt.plot(avgs[i])
# print y[i], avgs[i]
# plt.show()
plt.hist(s, bins=50, log=True)
plt.show()
'''
print 'Predicting for sumbission...'
print 'Loading test file...'
ids, X, y, avgs = data_set('test_2014.csv')
for threshold in np.arange(0.5, 0.9, 0.05):
apply_classifier(clf.predict_proba(X), avgs, threshold)
cdfs = avgs
print 'Writing submision file...'
writer = csv.writer(open('classifier-cdfavg-sub.csv', 'w'))
solution_header = ['Id']
solution_header.extend(['Predicted{0}'.format(t) for t in xrange(0, 70)])
writer.writerow(solution_header)
for i, id in enumerate(ids):
prediction = cdfs[i]
solution_row = [id]
solution_row.extend(prediction)
writer.writerow(solution_row)
if i % 10000 == 0:
print "Completed row %d" % i
|
class Solution(object):
def isValid(self, s):
open_stack = []
for char in s:
if char in ['(', '{', '[']:
open_stack.append(char);
elif char in [')', '}', ']']:
if len(open_stack) == 0: return False
should_match = open_stack.pop()
if char == ')' and should_match != '(': return False
elif char == '}' and should_match != '{': return False
elif char == ']' and should_match != '[': return False
else: return False
return len(open_stack)==0
print(Solution().isValid('[')) |
# Python program to reverse the user provided input
#Accept a word from user and save it in word variable
word = input("Input a word to reverse: ")
for char in range(len(word) - 1, -1, -1):
print(word[char], end="")
#Print the word in reverse format
print("\n") |
import re
import sys
import requests
import socket
from struct import *
socket.setdefaulttimeout(10000)
reload(sys)
sys.setdefaultencoding("utf-8")
def visitPhones(phone_url, phone_id):
result = []
headers = {
'User-Agent': 'Mozilla/5.0'
}
response = requests.get(phone_url, headers= headers)
if response.status_code == 200:
name = re.search('<span id="productTitle" class="a-size-large">(.*)</span>', response.text)
picture = re.search('" data-old-hires="(.*)" class=', response.text)
desc = re.findall('<li><span class="a-list-item"> (.*)<\/span>', response.text)
rating = re.search('class="reviewCountTextLinkedHistogram noUnderline" title="(\d*\.?\d*) out of 5 stars">', response.text)
price = re.search('<span class="currencyINR"> <\/span> (.*)<\/span><\/span>', response.text)
warranty = re.search('Warranty Details:<\/strong> (.*)\W*<\/span>', response.text)
asin = re.search('asin=([A-Z0-9]*)"',response.text)
if asin.group(1) in phone_id :
return False
else :
phone_id.append(asin.group(1))
if name and price and picture:
result.append(name.group(1))
result.append(picture.group(1))
if desc:
result.append(desc)
else:
result.append('Description not available.')
result.append(str(price.group(1)).strip())
if rating:
result.append(rating.group(1))
else :
result.append('Rating not available!')
if warranty:
result.append(warranty.group(1))
else :
result.append('Warranty not available.')
return result
else :
return False
else:
return False
def action(brand, keyword):
phone_list = []
phone_id = []
for x in xrange(1,1000):
url = 'http://www.amazon.in/s/ref=sr_pg_'+str(x)+'?fst=as%3Aoff&rh=n%3A976419031%2Cn%3A1389401031%2Cn%3A1389432031%2Cn%3A1805560031%2Ck%3A'+keyword+'%2Cp_89%3A'+brand+'&page=2&keywords='+keyword+'&ie=UTF8&qid=1430076551'
headers = {
'User-Agent': 'Mozilla/5.0'
}
response = requests.get(url, headers = headers)
if response.status_code == 200 :
p = re.compile('a-text-normal" title=".*" href="(.*)"><h2')
match = re.findall(p, response.text)
if match :
for phone in match:
data = visitPhones(phone, phone_id)
if data:
print data
phone_list.append(phone)
else :
print 'Done!/Bad connectivity!'
def amazon():
brand = raw_input('Enter brand name\n')
keyword =raw_input('Enter Keyword\n')
action(brand, keyword)
|
import hashlib
import pymongo
import random
from pymongo.errors import DuplicateKeyError, PyMongoError
import string
from blog.repo import errors
__author__ = 'tyerq'
class User:
"""
Users DAO Class
"""
def __init__(self, db):
self.db = db
self.coll = self.db.users
def validate_user(self, username, passw):
try:
user = self.coll.find_one({'_id': username})
except PyMongoError as err:
raise errors.SomethingWentWrong(err)
if not user:
raise errors.EntryNotFound()
hashed, salt = user['passw'].split(';')
if user['passw'] != _make_hashed_passw(passw,salt):
raise errors.WrongCredentials()
return user
def get_user(self, username):
try:
user = self.coll.find_one({'_id': username})
except PyMongoError as err:
raise errors.SomethingWentWrong(err)
if not user:
raise errors.EntryNotFound()
return user
def create_user(self, username, passw, name=None, email=None):
user = {
'_id': username,
'passw': _make_hashed_passw(passw)
}
if name:
user['name'] = name
if email:
user['email'] = email
try:
self.coll.insert_one(user)
except DuplicateKeyError:
raise errors.EntryExists()
except PyMongoError as err:
raise errors.SomethingWentWrong(err)
class Session:
"""
Sessions DAO Class
"""
def __init__(self, db):
self.db = db
self.sessions = self.db.sessions
def start_session(self, username):
_id = _make_salt(32)
session = {'_id': _id, 'username': username}
try:
self.sessions.insert_one(session)
except PyMongoError as err:
raise errors.SomethingWentWrong(err)
except DuplicateKeyError:
print('session: got duplicate id. repeating...')
return self.start_session(username)
return session['_id']
def end_session(self, session_id):
if session_id is None:
return
try:
self.sessions.remove_one({'_id': session_id})
except PyMongoError as err:
raise errors.SomethingWentWrong(err)
return
# get the username of the current session, or None if the session is not valid
def get_username(self, session_id):
try:
session = self.sessions.find_one(session_id)
except PyMongoError as err:
raise errors.SomethingWentWrong(err)
if not session:
return None
else:
return session['username']
def _make_salt(length=8):
salt = []
for i in range(length):
salt.append(random.choice(string.ascii_letters))
return ''.join(salt)
def _make_hashed_passw(passw, salt=None):
if not salt:
salt = _make_salt()
hashed = hashlib.sha256('{passw}{salt}'.format(passw=passw, salt=salt).encode('utf-8')).hexdigest()
return '{hashed};{salt}'.format(hashed=hashed, salt=salt)
class db:
_connection_string = "mongodb://localhost"
_connection = pymongo.MongoClient(_connection_string)
_db = _connection.pyramid_blog
posts = None # TODO define posts
users = User(_db)
sessions = Session(_db) |
"""
Heber Cooke 10/8/2019
Chapter 2 Exercise 10
The Credit Plan calculates the payments for the life as a loan
with a 10% down payment and payments 5% of price after down payment
annual interest rate of 12%
input: price of item
output: table
month number (start at 1)
current total balance owed
intrest owed for the month
amount of principal owed for the month
the payment for the month
the balance remaining after payment
"""
price = float(input("Enter the price: "))
down = price * .1
balance = price - down
payment = balance * .05
rate = (0.12 / 12)
month = 0
print("%-8s%-12s%-12s%-12s%-12s%-12s" % ("Month", "Balance", "Interest","Principal", "Payment", "Balance after Payment"))
while balance > payment:
month += 1
interest = balance * rate
principal = payment - interest
bal = balance - principal
print("%-8d%-12.2f%-12.2f%-12.2f%-12.2f%-12.2f"% (month, balance, interest, principal, payment, bal))
balance = bal
payment = balance
interest = 0
principal = payment
bal = 0
month += 1
print("%-8d%-12.2f%-12.2f%-12.2f%-12.2f%-12.2f"% (month, balance, interest, principal, payment, bal)) |
# Generated using https://godot-build-options-generator.github.io
optimize = "size"
disable_advanced_gui = "yes"
deprecated = "no"
minizip = "no"
module_arkit_enabled = "no"
module_bmp_enabled = "no"
module_bullet_enabled = "no"
module_camera_enabled = "no"
module_csg_enabled = "no"
module_dds_enabled = "no"
module_enet_enabled = "no"
module_etc_enabled = "no"
module_gdnative_enabled = "no"
module_gdnavigation_enabled = "no"
module_gridmap_enabled = "no"
module_hdr_enabled = "no"
module_mbedtls_enabled = "no"
module_mobile_vr_enabled = "no"
module_ogg_enabled = "no"
module_opensimplex_enabled = "no"
module_opus_enabled = "no"
module_regex_enabled = "no"
module_stb_vorbis_enabled = "no"
module_tga_enabled = "no"
module_theora_enabled = "no"
module_tinyexr_enabled = "no"
module_upnp_enabled = "no"
module_visual_script_enabled = "no"
module_vorbis_enabled = "no"
module_webm_enabled = "no"
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('query', '0016_auto_20160203_1324'),
]
operations = [
migrations.AddField(
model_name='daystatistic',
name='article_type',
field=models.ForeignKey(blank=True, to='query.ArticleType', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='daystatistic',
name='distribution',
field=models.ForeignKey(blank=True, to='query.Distribution', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='daystatistic',
name='date',
field=models.DateField(),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='daystatistic',
unique_together=set([('date', 'distribution', 'article_type')]),
),
]
|
"""Device HA Pair Services Classes."""
import logging
from .ftddevicehapairs import FTDDeviceHAPairs
from .ftddevicehapairs import DeviceHAPairs
from .failoverinterfacemacaddressconfigs import FailoverInterfaceMACAddressConfigs
from .failoverinterfacemacaddressconfigs import DeviceHAFailoverMAC
from .monitoredinterfaces import DeviceHAMonitoredInterfaces
from .monitoredinterfaces import MonitoredInterfaces
logging.debug("In the device_ha_pair_services __init__.py file.")
__all__ = [
"FTDDeviceHAPairs",
"DeviceHAPairs",
"FailoverInterfaceMACAddressConfigs",
"DeviceHAFailoverMAC",
"DeviceHAMonitoredInterfaces",
"MonitoredInterfaces",
]
|
from rest_framework import serializers
from .models import SongGroup, Song, SongList
from django.shortcuts import get_object_or_404
from users.models import User
class SongGroupSerializer(serializers.ModelSerializer):
class Meta:
model = SongGroup
fields = ["id", "name", "user_id" ]
extra_kwargs ={
"id":{
"read_only":True
}
}
def validate(self, data):
name = data.get("name")
if len(name) < 2 or len(name) > 20:
raise serializers.ValidationError("名称过长!")
return data
def create(self, validate_data):
name=validate_data.get("name")
user = self.context['request'].user
group = SongGroup.objects.create(
name=name,
user = user
)
return group
class SongSerializer(serializers.ModelSerializer):
group_id = serializers.IntegerField(required=False, label="分组id", write_only=True,allow_null=True)
class Meta:
model = Song
fields = ["id", "name", "singer", "group_id", "group"]
depth = 1
extra_kwargs = {
"singer":{
"allow_null":True
},
"group":{
"read_only":True
}
}
def validate(self, data):
name=data.get("name")
singer=data.get("data")
if len(name) < 1 or len(name)>30:
raise serializers.ValidationError("名称过长或过短")
return data
def create(self, validate_data):
name=validate_data.get("name")
singer=validate_data.get("singer")
user=self.context['request'].user
group_id = validate_data.get("group_id")
try:
group=SongGroup.objects.get(pk=group_id)
except SongGroup.DoesNotExist:
group=None
song = Song.objects.create(
name=name,
singer=singer,
user=user,
group=group
)
return song
class SongDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Song
fields = ["name", "group_id", "singer"]
class OrderSongSerializer(serializers.ModelSerializer):
class Meta:
model=Song
fields = ["id", "name", "singer", "group"]
depth=1
class SongListSerializer(serializers.ModelSerializer):
class Meta:
model=SongList
fields=[ "song", "create_time", "sang_time", "sponsor", "money", "is_sang"]
def create(self, validated_data):
try:
user=User.objects.get(pk=self.context['view'].kwargs.get('pk'))
except User.DoesNotExist:
raise serializers.ValidationError("请输入歌手id")
song = SongList.objects.create(
user=user,
song=validated_data.get("song"),
sponsor=validated_data.get("sponsor"),
money=validated_data.get("money")
)
return song |
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'LotteryKiller.settings')
django.setup()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
import numpy as np
import pandas as pd
from django.forms.models import model_to_dict
from killer.models import Result
from collections import Counter
def judge_sum_in(arrLike):
result = arrLike[7] in list(arrLike[:6])
return result
def way1(killnum_data):
# 1
r1r2sum = (killnum_data['red1'] + killnum_data['red2']).shift(periods=1, axis=0)
killnum_data1 = killnum_data.copy()
killnum_data1['r1r2sum'] = r1r2sum
killnum_data1['way1result'] = killnum_data1.apply(judge_sum_in, axis=1)
return killnum_data1[['r1r2sum', 'way1result']]
def way2(killnum_data):
killnum_data2 = killnum_data.copy()
l = list(killnum_data.iloc[:, 0])
length = len(l)
p1p2sum = [np.NaN, np.NaN]
for i, elem in enumerate(l):
if (i + 2) < length:
p1p2sum.append(elem + l[i + 1])
killnum_data2['p1p2sum'] = p1p2sum
killnum_data2['way2result'] = killnum_data2.apply(judge_sum_in, axis=1)
return killnum_data2[['p1p2sum', 'way2result']]
# 3
def way3(killnum_data):
killnum_data3 = killnum_data.copy()
l = list(killnum_data.iloc[:, 1])
length = len(l)
p1p2sum_red2 = [np.NaN, np.NaN]
for i, elem in enumerate(l):
if (i + 2) < length:
p1p2sum_red2.append(elem + l[i + 1])
killnum_data3['p1p2sum_red2'] = p1p2sum_red2
killnum_data3['way3result'] = killnum_data3.apply(judge_sum_in, axis=1)
return killnum_data3[['p1p2sum_red2', 'way3result']]
# 4
def way4(killnum_data):
killnum_data4 = killnum_data.copy()
l = list(killnum_data.iloc[:, 6])
length = len(l)
p1p2sum_blue = [np.NaN, np.NaN]
for i, elem in enumerate(l):
if (i + 2) < length:
p1p2sum_blue.append(elem + l[i + 1])
killnum_data4['p1p2sum_blue'] = p1p2sum_blue
killnum_data4['way4result'] = killnum_data4.apply(judge_sum_in, axis=1)
return killnum_data4[['p1p2sum_blue', 'way4result']]
def get_digits(x):
return x % 10
def map2times(arrLike):
c = Counter(arrLike)
target = range(0, 10)
result = []
for i in target:
result.append(c[i])
return result
def process_tail(killnum_data):
tails = killnum_data.copy()
tail_num = tails.iloc[:, :6].apply(get_digits)
tail_exist = pd.DataFrame(tail_num.apply(map2times, axis=1).to_dict()).T
tail_exist = tail_exist.reindex(index=killnum_data.index)
tail_exist = tail_exist.reset_index()
return tail_exist.iloc[2:, :]
def process_results():
model_list = [model_to_dict(model) for model in Result.objects.all()[0:32]]
model_list = model_list[::-1]
begin = model_list[0]['period']
end = model_list[-1]['period']
killnum_data = pd.DataFrame(model_list, index=list(range(0, 32)))
killnum_data = killnum_data.set_index('period').drop('id', axis='columns')
killnum_data = killnum_data.reindex(
columns=['red1', 'red2', 'red3', 'red4', 'red5', 'red6', 'blue']
)
processed = pd.concat([killnum_data, way1(killnum_data), way2(killnum_data), way3(killnum_data), way4(killnum_data)], axis=1)
processed = processed.reset_index().iloc[2:, :]
# print(process_tail(killnum_data).values)
return processed.values, process_tail(killnum_data).values, begin, end
if __name__ == '__main__':
process_results()
|
import logging
from restless.dj import DjangoResource
from restless.preparers import FieldsPreparer
from restless.exceptions import BadRequest
from django.db import IntegrityError
from postmon.responser import PostmonResponse
from zipcodes.models import ZipCode
# Get an instance os a logger
logger = logging.getLogger(__name__)
class ZipCodeResource(DjangoResource):
preparer = FieldsPreparer(fields={
'address': 'address',
'neighborhood': 'neighborhood',
'city': 'city',
'state': 'state',
'zip_code': 'zip_code',
})
def is_authenticated(self):
return True
def list(self):
if not self.request.GET.get('limit'):
logger.info('[GET] List zip codes')
return ZipCode.objects.all()
limit = int(self.request.GET.get('limit'))
logger.info('[GET] List zip codes limited: %s' % limit)
return ZipCode.objects.all()[:limit]
def create(self):
zip_code = self.data.get('zip_code')
try:
postmon = PostmonResponse(zip_code)
except:
logger.error(
'[Error] Incorrect zip code format: %s' % zip_code)
raise BadRequest('Incorrect zip code format')
response = postmon.response()
try:
created = ZipCode.objects.create(
address=response['logradouro'],
neighborhood=response['bairro'],
city=response['cidade'],
state=response['estado'],
zip_code=response['cep']
)
logger.info('[API] New zip code created: %s' % zip_code)
return created
except KeyError:
created = ZipCode.objects.create(
city=response['cidade'],
state=response['estado'],
zip_code=response['cep']
)
logger.info('[API] New zip code created: %s' % zip_code)
return created
except IntegrityError:
logger.error('[Error] Zip code already created: %s' % zip_code)
raise BadRequest('Zip code already created')
def delete(self, pk):
logger.info('[API] Delete zip code: %s' % pk)
ZipCode.objects.get(zip_code=pk).delete()
def detail(self, pk):
logger.info('[API] Get zip code: %s' % pk)
return ZipCode.objects.get(zip_code=pk)
|
import re
freq = {}
def get_and_process_input():
unprocessed_data = input("Enter the data in given format CL1-CL2=f,CL2-CL3=f1 eg:(100-200=40,200-300=10): ")
split_unprocessed_data = unprocessed_data.split(',')
for a in split_unprocessed_data:
pattern = r"^([0-9]*)-([0-9]*)=(\d*\.?\d*|[0-9]+)$"
match = re.search(pattern, a)
freq.update({f"{match.group(1)}-{match.group(2)}": float(match.group(3))})
class Mode:
def __init__(self, dictionary):
self.cl1 = None
self.f1 = None
self.f0 = None
self.f2 = None
self.h = None
self.max_index = None
self.dictionary = dictionary
def f(self):
value_list = list(self.dictionary.values())
self.f1 = max(value_list)
self.max_index = value_list.index(self.f1)
self.f0 = 0 if value_list[0] == self.f1 else value_list[self.max_index - 1]
self.f2 = 0 if value_list[-1] == self.f1 else value_list[self.max_index + 1]
def l_and_h(self):
key_list = list(self.dictionary.keys())
modal_class = key_list[self.max_index]
pattern = '^([0-9]+)-([0-9]+)$'
match = re.match(pattern, modal_class)
self.cl1 = float(match.group(1))
l1 = float(match.group(2))
self.h = l1 - self.cl1
def value(self):
self.f()
self.l_and_h()
top = self.f1 - self.f0
bottom = (2 * self.f1) - self.f0 - self.f2
return '{:.2f}'.format(self.cl1 + (top / bottom * self.h))
try:
get_and_process_input()
print(f"Your Mode for \n{freq} is:\n" + "%.2f" % float(Mode(freq).value()))
except AttributeError:
print('\n')
print("Oops your type of input is wrong, try again.")
|
"""structure models"""
class Products:
"""product model"""
def __init__(self, product_name, category, unit_price, quantity, measure):
self.product_name = product_name
self.category = category
self.unit_price = unit_price
self.quantity = quantity
self.measure = measure
class Sales:
"""sale model"""
def __init__(self, user_id):
self.user_id = user_id
class SalesHasProducts:
"""sales has products"""
def __init__(self, sale_id, product_id, quantity, total):
self.sale_id = sale_id
self.product_id = product_id
self.quantity =quantity
self.total = total
class Users:
"""user model"""
def __init__(self, name, user_name, password, role):
self.name = name
self.user_name = user_name
self.password = password
self.role = role
class Login:
"""lgin model"""
def __init__(self, user_name, password, role):
self.user_name = user_name
self.password = password
self.role = role
|
import itertools
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from fit_d2p import vddm_params, tdm_params, Tdm, Vddm, model_params, mangle_tau, actgrid
import hikersim
from hikersim import braking_spec
import scipy.optimize
import scipy.interpolate
#START_TIME = -3
#END_TIME = 20
leader_start = 100
DT = 1/30
def analyze_pedestrian_time_loss(model):
ttas = np.linspace(2, 5, 10)
speeds = np.array([25, 30, 35])/2.237
ts = np.arange(0, 20, DT)
fig, (tax, pax) = plt.subplots(nrows=2)
for speed in speeds:
vanillas = []
ehmis = []
for tta in ttas:
decel = get_minimum_decel(tta, speed)
traj = get_trajectory(speed, tta, decel, False)
dist = model(traj).ps
mean_time = np.dot(traj.time, dist/np.sum(dist))
vanillas.append(mean_time)
traj = get_trajectory(tta, speed, True, True, ts)
dist = model(traj).ps
mean_time = np.dot(traj.time, dist/np.sum(dist))
ehmis.append(mean_time)
vanillas = np.array(vanillas)
ehmis = np.array(ehmis)
tax.plot(ttas, vanillas - ehmis, label=f"Initial vehicle speed {speed:.1f} m/s")
pax.plot(ttas, (1 - ehmis/vanillas)*100, label=f"Initial vehicle speed {speed:.1f} m/s")
fig.suptitle("eHMI effect on pedestrian crossing duration")
tax.set_ylabel("eHMI efficiency gain (seconds)")
pax.set_ylabel("eHMI efficiency gain (percent)")
pax.set_xlabel("Initial TTA (seconds)")
tax.legend()
plt.show()
cross_dur = 3.0
acceleration = 1.3
@np.vectorize
def yield_time_loss_wtf(target_speed, time_gap, t_cross, **kwargs):
b, t_brake, t_stop = braking_spec(time_gap, target_speed, **kwargs)
a = acceleration
yield_dur = max(0, t_cross + cross_dur - t_brake)
# TODO: Ugly sympy generated code
return ((1/2)*b*t_brake**2/target_speed - b*t_brake*min(t_brake + yield_dur, t_brake - target_speed/b)/target_speed + (1/2)*b*min(t_brake + yield_dur, t_brake - target_speed/b)**2/target_speed - t_brake - yield_dur + ((-1/2*target_speed/a) if (b*yield_dur + target_speed <= 0) else (-1/2*b**2*yield_dur**2/(a*target_speed))) + min(t_brake + yield_dur, t_brake - target_speed/b))
@np.vectorize
def yield_time_loss(target_speed, time_gap, t_cross, **kwargs):
v0 = target_speed
b, t_brake, t_stop = braking_spec(time_gap, target_speed, **kwargs)
assert t_brake < t_stop
assert b <= 0
assert target_speed > 0
a = acceleration
t_passed = t_cross + cross_dur
dur_yield = t_passed - t_brake
if dur_yield < 0:
return 0.0
if t_passed > t_stop:
wtf = dur_yield + v0/(2*b) + v0/(2*a)
return wtf
#wtf = -(a*b*dur_yield**2 + 3*b**2*dur_yield**2 + v0*(4*b*dur_yield + v0))/(2*a*v0)
wtf = b*dur_yield**2*(-a + b)/(2*a*v0)
return wtf
@np.vectorize
def vehicle_time_loss(v0, t_brake, t_stop, t_cross):
a = acceleration
t_passed = t_cross + cross_dur
dur_yield = t_passed - t_brake
dur_brake = t_stop - t_brake
b = -v0/dur_brake
if dur_yield < 0:
return 0.0
if t_passed > t_stop:
wtf = dur_yield + v0/(2*b) + v0/(2*a)
return wtf
wtf = b*dur_yield**2*(-a + b)/(2*a*v0)
return wtf
def get_trajectory(speed, tta, decel, ehmi, ts=None):
if ts is None:
ts = np.arange(0, 20, DT)
b = -decel
x0 = -speed*tta
dt = np.mean(np.diff(ts))
v = np.maximum(0, speed + b*ts)
x = x0 + np.cumsum(v*dt)
tau = -x/v
tau_dot = np.gradient(tau, dt)
ehmi = np.repeat(ehmi, len(ts))
return np.rec.fromarrays(
(ts, -x, v, tau, tau_dot, ehmi),
names="time,distance,speed,tau,tau_dot,ehmi")
def tta_time_loss(predict, ts, dt, tta, speed, decel, ehmi):
t_stop = speed/decel
traj = get_trajectory(speed, tta, decel, ehmi, ts)
cd = predict(traj)
l = vehicle_time_loss(speed, 0.0, t_stop, traj.time)
return np.dot(l, np.array(cd.ps)*dt), np.dot(ts - ts[0], np.array(cd.ps)*dt)
tta_time_loss = np.vectorize(tta_time_loss, excluded=(0, 1))
#linear_param = [-9.05167401e-01, -1.10384155e-05, 1.10375652e-05, 4.72265220e-01]
#linear_param = [-3.21072425, -0.2429603, 0.24295947, 3.77101283]
#linear_param = [-1.83690295, 0.4502298, -0.28016829, 1.36037307]
#linear_param = [-1.90056245, 0.79925133, -0.27979611, 1.46707382]
linear_param = [0.0, 0.0, 0.0, 0.0]
#linear_param = [ -6.68590288, -13.12009103, 0.05437497, 9.00117041]
stop_margin = 2.5
def get_linear_decel(tta, speed, ehmi, tta_c, speed_c, ehmi_c, ic):
d0 = tta*speed
logdecel = tta_c*np.log(tta) + speed_c*np.log(speed) * ehmi_c*ehmi + ic
stop_decel = speed**2/(2*(d0 - stop_margin))
return (np.exp(logdecel) + 1)*stop_decel
def get_minimum_decel(tta, speed):
d0 = tta*speed
stop_decel = speed**2/(2*(d0 - stop_margin))
return stop_decel
def fit_linear_decel(params, dt):
model = Vddm(dt=dt, **model_params(params))
def predict(traj):
tau = mangle_tau(traj, **params)
return model.decisions(actgrid, tau)
ts = np.arange(0, 20, dt)
ttas = np.linspace(2, 8, 5)
speeds = np.linspace(10/3.6, 80/3.6, 5)
ehmis = np.array([0.0, 1.0])
def loss(args):
losses = []
for tta, speed, ehmi in itertools.product(ttas, speeds, ehmis):
decel = get_linear_decel(tta, speed, ehmi, *args)
loss = tta_time_loss(predict, ts, dt, tta, speed, decel, ehmi)
losses.append(loss)
print(args)
print(np.mean(losses))
return losses
#fit = scipy.optimize.least_squares(loss, [0.0, 0.0, 0.0, 0.0])
fit = scipy.optimize.minimize(lambda args: np.sum(loss(args)), linear_param, method='powell')
print(fit)
print(fit.x)
def fit_optimal_decel(predict, dt):
#params['pass_threshold'] = -np.inf
#model = Vddm(dt=dt, **model_params(params))
#def predict(traj):
# tau = mangle_tau(traj, **params)
# return model.decisions(actgrid, tau)
ts = np.arange(0, 20, dt)
ttas = np.linspace(2, 8, 50)
#speeds = np.linspace(5, 30, 5)
#speeds = np.linspace(30, 50, 80)
speeds = np.array([5, 10, 15, 20])
def tta_time_loss(tta, speed, decel, ehmi=False):
t_stop = speed/decel
traj = get_trajectory(speed, tta, decel, ehmi, ts)
cd = predict(traj)
l = vehicle_time_loss(speed, 0.0, t_stop, traj.time)
return np.dot(l, np.array(cd.ps)*dt)
print("tta,speed,ehmi,overdecel,stop_decel,loss,stop_decel_loss")
for si, speed in enumerate(speeds):
overdecels = []
eoverdecels = []
for tta in ttas:
ehmi = False
def loss(overdecel):
#d0 = tta*speed
#stop_decel = speed**2/(2*(d0 - stop_margin))
stop_decel = get_minimum_decel(tta, speed)
decel = stop_decel + overdecel
return tta_time_loss(tta, speed, decel, ehmi=ehmi)
fit = scipy.optimize.minimize(lambda x: loss(*np.exp(x)), np.log(1.0))
fit.x = np.exp(fit.x)
print(",".join(map(str, (tta, speed, ehmi, fit.x[0], get_minimum_decel(tta, speed), fit.fun, loss(0)))))
overdecels.append(fit.x[0])
ehmi = True
fit = scipy.optimize.minimize(lambda x: loss(*np.exp(x)), np.log(1.0))
fit.x = np.exp(fit.x)
print(",".join(map(str, (tta, speed, ehmi, fit.x[0], get_minimum_decel(tta, speed), fit.fun, loss(0)))))
eoverdecels.append(fit.x[0])
overdecels = np.array(overdecels)
d0 = ttas*speed
stop_decels = speed**2/(2*(d0 - stop_margin))
decels = stop_decels + overdecels
edecels = np.array(eoverdecels) + stop_decels
ldecels = get_linear_decel(ttas, speed, 0.0, *linear_param)
plt.plot(ttas, decels, '--', label=f"Speed {speed:.1f} m/s", color=f'C{si}')
#plt.plot(ttas, overdecels, '--', label=f"Speed {speed:.1f} m/s", color=f'C{si}')
#plt.plot(ttas, ldecels, '--', label=f"Speed {speed:.1f} m/s", alpha=0.5, color=f'C{si}')
ldecels = get_linear_decel(ttas, speed, 1.0, *linear_param)
plt.plot(ttas, edecels, '-', label=f"Speed {speed:.1f} m/s, eHMI", color=f'C{si}')
plt.plot(ttas, stop_decels, ':', color=f'C{si}')
#plt.plot(ttas, eoverdecels, '-', label=f"Speed {speed:.1f} m/s, eHMI", color=f'C{si}')
#plt.plot(ttas, ldecels, '-', label=f"Speed {speed:.1f} m/s, eHMI", alpha=0.5, color=f'C{si}')
plt.loglog()
plt.xlabel("Initial TTA (seconds)")
plt.ylabel("Optimal deceleration (m/s²)")
plt.legend()
plt.show()
def fig6(params, dt):
ttas = np.linspace(2.0, 10, 11)
#ttas = 1/np.linspace(1, 1/6, 20)
overdecels = np.linspace(1.0, 5.0, 50)
decels = np.linspace(0.1, 10, 50)
stop_margins = np.linspace(0.0, 30, 50)
speed = 10/2.237
ts = np.arange(0, 20, 1/30)
res = []
#params['pass_threshold'] = -np.inf
model = Vddm(dt=dt, **model_params(params))
def predict(traj):
tau = mangle_tau(traj, **params)
return model.decisions(actgrid, tau)
def stopping_margin_to_decel(tta, margin):
d0 = tta*speed
decel_stop = speed**2/(2*(d0 - margin))
return decel_stop
def decel_to_stopping_margin(tta, decel):
d0 = tta*speed
return d0 - speed**2/(2*decel)
losses = []
margin = 2.5
for overdecel in overdecels:
#for margin in margins:
#for decel in decels:
for tta in ttas:
d0 = tta*speed
#decel_stop = speed**2/(2*(d0 - margin))
decel_stop = get_minimum_decel(tta, speed)
#decel = decel_stop
decel = overdecel + decel_stop
if decel < decel_stop:
losses.append(np.nan)
continue
t_stop = speed/decel
#x_stop = -tta*speed + speed*t_stop - t_stop**2*decel/2
ehmi = False
traj = get_trajectory(speed, tta, decel, ehmi, ts)
cd = predict(traj)
l = vehicle_time_loss(speed, 0.0, t_stop, traj.time)
#plt.plot(ts, cd.ps)
#plt.twinx()
#plt.plot(ts, traj.tau, color='black')
#plt.plot(ts, l, color='red')
#plt.ylim(0, 10)
#plt.show()
losses.append(np.dot(l, np.array(cd.ps)*dt))
#losses.append(tta)
#y = margins
y = overdecels
#y = decels
X, Y = np.meshgrid(ttas, y)
losses = np.array(losses).reshape(X.shape)
#D = speed**2/(2*(tta*speed))*Y
#losses /= losses[0]
#decel_stop = speed**2/(2*(speed*ttas - margin))
best = np.nanargmin(losses, axis=0)
plt.pcolor(X, Y, losses, cmap='jet')
plt.xlabel("TTA (seconds)")
plt.ylabel("Acceleration over minimum (m/s²)")
plt.plot(ttas, y[best], color='black', label='Optimal constant deceleration')
#plt.loglog()
#plt.plot(ttas, decel_stop, color='white')
#plt.ylabel("Deceleration (m/s²)")
#for decel in [2.0, 3.0, 4.0, 5.0, 6.0]:
# plt.plot(ttas, od, color='white', alpha=0.7)
#plt.xlim(ttas[0], ttas[-1])
#plt.ylim(overdecels[0], overdecels[-1])
#plt.colorbar(label="Mean time loss (seconds)")
plt.colorbar(label="Mean time loss (seconds)")
plt.show()
def analyze_vehicle_time_loss(model):
ttas = np.linspace(2, 5, 30)
speeds = np.array([25, 30, 35])/2.237
fig, (tax, pax) = plt.subplots(nrows=2)
for speed in speeds:
vanillas = []
ehmis = []
for tta in ttas:
traj = get_trajectory(speed, tta, True, False)
dist = model(traj).ps
time_losses = yield_time_loss(speed, tta, traj[0].time)
assert np.all(time_losses >= 0)
mean_loss = np.dot(time_losses, dist/np.sum(dist))
vanillas.append(mean_loss)
traj = get_trajectory(speed, tta, True, True)
dist = model(traj).ps
time_losses = yield_time_loss(speed, tta, traj[0].time)
#assert np.all(time_losses >= 0)
mean_loss = np.dot(time_losses, dist/np.sum(dist))
ehmis.append(mean_loss)
vanillas = np.array(vanillas)
ehmis = np.array(ehmis)
tax.plot(ttas, vanillas - ehmis, label=f"Initial vehicle speed {speed:.1f} m/s")
pax.plot(ttas, (1 - ehmis/vanillas)*100, label=f"Initial vehicle speed {speed:.1f} m/s")
fig.suptitle("eHMI effect on vehicle crossing duration")
tax.set_ylabel("eHMI efficiency gain (seconds)")
pax.set_ylabel("eHMI efficiency gain (percent)")
pax.set_xlabel("Initial TTA (seconds)")
tax.legend()
plt.show()
def analyze_decels(model):
ttas = np.linspace(2, 5, 5)
speeds = np.array([25, 30, 35])/2.237
tta = 5.0
init_distances = np.linspace(30, 100, 5)
x_stop = hikersim.x_stop
speed = speeds[1]
#for speed in speeds:
for init_distance in init_distances:
losses = []
bds = np.linspace(-x_stop + 0.1, 60, 30)
for bd in bds:
tta = init_distance/speed
braking = bd > x_stop
traj = get_trajectory(speed, tta, braking, False, x_brake=-bd)
dist = model(traj).ps
time_losses = yield_time_loss(speed, tta, traj[0].time, x_brake=-bd)
mean_loss = np.dot(time_losses, dist/np.sum(dist))
losses.append(mean_loss)
#plt.plot(bds, losses, label=f"Initial vehicle speed {speed:.1f} m/s")
losses = np.array(losses)
losses -= losses[0]
plt.plot(bds, losses, label=f"Distance where seen {init_distance:.1f} m")
plt.suptitle(f"Braking start distance effect on vehicle time loss (init speed {speed:.1f} m/s)")
plt.gca().set_ylabel("Vehicle time loss (seconds)")
plt.gca().set_xlabel("Braking initiation (meters)")
plt.legend()
plt.show()
def vdd_predictor(params, dt):
model = Vddm(dt=dt, **model_params(params))
def predict(traj, btraj=None):
ta = mangle_tau(traj, btraj, **params)
if btraj is None:
return model.decisions(actgrid, ta)
tb = mangle_tau(btraj, **params)
return model.blocker_decisions(actgrid, ta, tb)
return predict
def tdm_predictor(params, dt):
model = Tdm(**model_params(params))
def predict(traj, btraj=None):
ta = mangle_tau(traj, btraj, **params)
if btraj is None:
return model.decisions(ta, dt)
tb = mangle_tau(btraj, **params)
return model.blocker_decisions(ta, tb, dt)
return predict
def plot_optimized_decels():
opt_decels = pd.read_csv('vddm_opt_decel.csv')
speeds = opt_decels.speed.unique()
speeds = speeds[speeds <= 15]
ttas = opt_decels.tta.unique()
opt_decels['decel'] = opt_decels.overdecel + opt_decels.stop_decel
decel_interp = scipy.interpolate.NearestNDInterpolator(
opt_decels[['tta', 'speed', 'ehmi']].values, opt_decels['decel'].values
)
def get_opt_decel(tta, speed, has_ehmi):
b = np.array(np.broadcast_arrays(tta, speed, has_ehmi)).T
return decel_interp(b)
plt.axhline(3.5, linestyle="dashed", color='black', alpha=0.5)
for i, speed in enumerate(speeds):
#decels_o = get_linear_decel(ttas, speed, 0, *linear_param)
#decels_oe = get_linear_decel(ttas, speed, 1, *linear_param)
decels_o = get_opt_decel(ttas, speed, 0)
decels_oe = get_opt_decel(ttas, speed, 1)
decels_min = get_minimum_decel(ttas, speed)
color = f"C{i}"
plt.plot(ttas, decels_o, color=color, label=f"Optimized decel, speed {speed} m/s")
plt.plot(ttas, decels_oe, '--', color=color, label=f"Optimized decel w/ eHMI, speed {speed} m/s")
plt.plot(ttas, decels_min, ':', color=color, label=f"Minimum decel, speed {speed} m/s")
plt.xlim(ttas[0], ttas[-1])
#plt.loglog()
plt.xlabel("Initial TTA (seconds)")
plt.ylabel("Deceleration (m/s²)")
plt.legend()
plt.show()
def vehicle_time_savings(predict, dt):
#speeds = [5, 10, 15]
#ttas = np.linspace(2, 8, 100)
opt_decels = pd.read_csv('vddm_opt_decel.csv')
speeds = opt_decels.speed.unique()
speeds = speeds[speeds <= 15]
ttas = opt_decels.tta.unique()
opt_decels['decel'] = opt_decels.overdecel + opt_decels.stop_decel
decel_interp = scipy.interpolate.NearestNDInterpolator(
opt_decels[['tta', 'speed', 'ehmi']].values, opt_decels['decel'].values
)
def get_opt_decel(tta, speed, has_ehmi):
b = np.array(np.broadcast_arrays(tta, speed, has_ehmi)).T
return decel_interp(b)
ts = np.arange(0, 20, dt)
for i, speed in enumerate(speeds):
label=f'Speed {speed} m/s'
color = f"C{i}"
decels = get_minimum_decel(ttas, speed)
losses, plosses = tta_time_loss(predict, ts, dt, ttas, speed, decels, 0)
plt.figure("baseline")
plt.title("Vehicle time loss with minimum constant deceleration")
plt.plot(ttas, losses, label=label, color=color)
plt.ylabel("Mean time loss (seconds)")
plt.figure("baseline_ped")
plt.title("Pedestrian time loss with minimum constant deceleration")
plt.plot(ttas, plosses, label=label, color=color)
plt.ylabel("Mean time loss (seconds)")
losses_e, plosses_e = tta_time_loss(predict, ts, dt, ttas, speed, decels, 1)
plt.figure("esave")
plt.title("Vehicle time saving with eHMI")
plt.plot(ttas, losses - losses_e, label=label, color=color)
plt.ylabel("Mean time loss reduction (seconds)")
plt.figure("esave_ped")
plt.title("Pedestrian time saving with eHMI")
plt.plot(ttas, plosses - plosses_e, label=label, color=color)
plt.ylabel("Mean time loss reduction (seconds)")
#decels_o = get_linear_decel(ttas, speed, 0, *linear_param)
decels_o = get_opt_decel(ttas, speed, 0)
losses_o, plosses_o = tta_time_loss(predict, ts, dt, ttas, speed, decels_o, 0)
plt.figure("osave")
plt.title("Vehicle time saving with optimized deceleration")
plt.plot(ttas, losses - losses_o, label=label, color=color)
plt.ylabel("Mean time loss reduction (seconds)")
plt.figure("osave_ped")
plt.title("Pedestrian time saving with optimized deceleration")
plt.plot(ttas, plosses - plosses_o, label=label, color=color)
plt.ylabel("Mean time loss reduction (seconds)")
#decels_eo = get_linear_decel(ttas, speed, 1, *linear_param)
decels_eo = get_opt_decel(ttas, speed, 1)
losses_eo, plosses_eo = tta_time_loss(predict, ts, dt, ttas, speed, decels_eo, 1)
plt.figure("eosave")
plt.title("Vehicle time saving with optimized deceleration and eHMI")
plt.plot(ttas, losses - losses_eo, label=label, color=color)
plt.ylabel("Mean time loss reduction (seconds)")
plt.figure("eosave_ped")
plt.title("Pedestrian time saving with optimized deceleration and eHMI")
plt.plot(ttas, plosses - plosses_eo, label=label, color=color)
plt.ylabel("Mean time loss reduction (seconds)")
for lbl in plt.get_figlabels():
plt.figure(lbl)
plt.legend()
plt.xlabel("Initial TTA (seconds)")
plt.xlim(ttas[0], ttas[-1])
plt.savefig(f"lossfigs/{lbl}.svg")
plt.show()
"""
plt.xlim(ttas[0], ttas[-1])
plt.xlabel("Initial TTA (seconds)")
#plt.ylabel("Vehicle time loss (seconds)")
plt.ylabel("Extra deceleration + eHMI time saving (seconds)")
#plt.ylabel("eHMI extra time saving over opt decel (seconds)")
plt.legend()
plt.show()
"""
def fig1():
#predict = vdd_predictor(vddm_params['unified'], DT)
predict = tdm_predictor(tdm_params['unified'], DT)
from fit_d2p import get_keio_trials, get_hiker_trials, ecdf
trials = get_keio_trials(include_decels=False, include_ehmi=False)
# TODO: Include HIKER
#trials += get_hiker_trials(include_decels=False, include_ehmi=False)
fig, axs = plt.subplots(nrows=1, ncols=3, constrained_layout=True)
plt.sca(axs[0])
distances = np.linspace(10, 100, 10)
cmap = plt.cm.cool
def get_tta_color(tta):
mintta = 0
maxtta = 10
return cmap((tta - mintta)/(maxtta - mintta))
def pred_early_share(speed, tta):
traj = get_trajectory(speed, tta, False, False)
pred = predict(traj)
crossed = np.cumsum(np.array(pred.ps)*DT)
vehicle_cross_time = scipy.interpolate.interp1d(traj.distance, traj.time)(0)
early_share = scipy.interpolate.interp1d(traj.time, crossed)(vehicle_cross_time)
return early_share
key = lambda x: round(x[0].tau[0], 3)
for tta, trials in itertools.groupby(sorted(trials, key=key), key=key):
early_shares = []
color = get_tta_color(tta)
for distance in distances:
speed = distance/tta
traj = get_trajectory(speed, tta, False, False)
pred = predict(traj)
crossed = np.cumsum(np.array(pred.ps)*DT)
vehicle_cross_time = scipy.interpolate.interp1d(traj.distance, traj.time)(0)
early_share = scipy.interpolate.interp1d(traj.time, crossed)(vehicle_cross_time)
early_shares.append(early_share)
for trial in trials:
traj = trial[0]
distance = traj.distance[0]
vehicle_cross_time = scipy.interpolate.interp1d(traj.distance, traj.time)(0)
early_share = ecdf(trial[-1])(vehicle_cross_time)
plt.plot(distance, early_share, 'o', color=color)
plt.plot(distances, early_shares, color=color)
plt.xlabel("Initial distance (meters)")
plt.ylabel("Early crossing share")
plt.sca(axs[1])
trials = get_keio_trials(include_decels=True, include_constants=False, include_ehmi=False)
stopping_distances = np.linspace(3.0, 9.0, 10)
cmap = plt.cm.cool
def get_stopd_color(stopd):
low = 0
high = 10
return cmap((stopd - low)/(high - low))
def pred_early_share(speed, tta):
traj = get_trajectory(speed, tta, False, False)
pred = predict(traj)
crossed = np.cumsum(np.array(pred.ps)*DT)
vehicle_cross_time = scipy.interpolate.interp1d(traj.distance, traj.time)(0)
early_share = scipy.interpolate.interp1d(traj.time, crossed)(vehicle_cross_time)
return early_share
def get_stop_distance(trial):
traj = trial[0]
return traj.distance[np.flatnonzero(traj.speed == 0)[0]]
trials = [trial for trial in trials if np.any(trial[0].speed == 0)]
trials = [trial for trial in trials if trial[0].speed[0] > 7 and trial[0].distance[0] < 95]
for tta, trials in itertools.groupby(sorted(trials, key=key), key=key):
color = get_tta_color(tta)
for trial in trials:
traj = trial[0]
distance = traj.distance[0]
mean_ct = np.median(trial[-1])
l, m, h = np.percentile(trial[-1], (25, 50, 75))
stopd = get_stop_distance(trial)
plt.plot(stopd, m, 'o', color=color)
plt.plot([stopd, stopd], [l, h], '.-', color=color, alpha=0.5)
#plt.hist(trial[-1], bins=np.arange(0, 20, 0.5), density=True)
#v0 = traj.speed[0]
#x0 = -traj.distance[0]
#print(tta, v0, x0)
#a = v0**2/(x0 - (-stopd))/2
#straj = get_trajectory(v0, tta, -a, False)
#pred = predict(straj)
#plt.plot(straj.time, np.array(pred.ps))
#plt.show()
v0 = traj.speed[0]
x0 = -traj.distance[0]
mean_cts = []
percs = []
for stopd in stopping_distances:
a = v0**2/(2*(x0 - (-stopd)))
traj = get_trajectory(v0, tta, -a, False)
pred = predict(traj)
crossed = np.cumsum(np.array(pred.ps)*DT)
perc = scipy.interpolate.interp1d(crossed, traj.time)([0.25, 0.5, 0.75])
percs.append(perc)
mean_ct = np.dot(np.array(pred.ps)*DT, traj.time)
mean_cts.append(mean_ct)
#early_shares.append(early_share)
percs = np.array(percs)
l, m, h = percs.T
plt.plot(stopping_distances, m, color=color, label=tta)
plt.fill_between(stopping_distances, l, h, color=color, label=tta, alpha=0.15)
plt.xlabel("Stopping distance (meters)")
plt.ylabel("Crossing time (seconds)")
plt.sca(axs[2])
trials = get_hiker_trials(include_decels=True, include_constants=False, include_ehmi=True, include_ehmi_controls=False)
medspeed = np.median([t[0].speed[0] for t in trials])
trials = [t for t in trials if np.abs(t[0].speed[0] - medspeed) < 0.1]
from fit_d2p import get_trajectory as get_hiker_trajectory
colors = {
False: 'blue',
True: 'green'
}
key = lambda trial: (round(trial[0].tau[0] - trial[1].tau[0], 1), np.any(trial[0].ehmi))
for (tta, has_ehmi), trials in itertools.groupby(sorted(trials, key=key), key=key):
allcts = np.concatenate([trial[-1] for trial in trials])
allcts = allcts[np.isfinite(allcts)]
l, m ,h = np.percentile(allcts, [25, 50, 75])
plt.plot(tta, m, 'o', color=colors[has_ehmi])
plt.plot([tta, tta], [l, h], '.-', color=colors[has_ehmi], alpha=0.5)
#predict = vdd_predictor(vddm_params['hiker'], DT)
predict = tdm_predictor(tdm_params['hiker'], DT)
ttas = np.linspace(1.5, 5.5, 50)
for has_ehmi in [False, True]:
means = []
percs = []
for tta in ttas:
traj, trajb = get_hiker_trajectory(tta, medspeed, True, has_ehmi)
pred = predict(traj, trajb)
crossed = np.cumsum(np.array(pred.ps)*DT)
percs.append(scipy.interpolate.interp1d(crossed, traj.time)([0.25, 0.5, 0.75]))
mean_ct = np.dot(np.array(pred.ps)*DT, traj.time)
means.append(mean_ct)
percs = np.array(percs)
plt.fill_between(ttas, percs[:,0], percs[:,-1], color=colors[has_ehmi], alpha=0.15)
#plt.plot(ttas, percs[:,0], '--', color=colors[has_ehmi])
#plt.plot(ttas, percs[:,-1], '--', color=colors[has_ehmi])
plt.plot(ttas, percs[:,1], color=colors[has_ehmi])
plt.ylabel("Crossing time (seconds)")
plt.xlabel("Initial TTA (seconds)")
plt.show()
if __name__ == '__main__':
pred = vdd_predictor(vddm_params['unified'], 1/30)
#pred = tdm_predictor(tdm_params['unified'], 1/30)
#analyze_pedestrian_time_loss(pred)
#analyze_vehicle_time_loss(pred)
#analyze_decels(pred)
#fig6(vddm_params['unified'], 1/30)
#fit_optimal_decel(vddm_params['unified'], 1/30)
#fit_optimal_decel(pred, 1/30)
#fit_linear_decel(vddm_params['unified'], 1/30)
#vehicle_time_savings(pred, 1/30)
#plot_optimized_decels()
fig1()
|
import unittest
from hylite.project import Camera
from hylite.reference.spectra import R90
from hylite.correct.panel import Panel
import numpy as np
from tests import genHeader, genCloud, genImage
class TestHyData(unittest.TestCase):
def test_header(self):
#load header from file
header = genHeader()
# check basics
self.assertEqual(header.has_band_names(), False)
self.assertEqual(header.has_wavelengths(), True)
self.assertEqual(header.has_fwhm(), False)
self.assertEqual(header.band_count(), 450)
self.assertEqual(len(header.get_wavelengths()), 450)
# check copy and set functions
header2 = header.copy()
header2.set_wavelengths( np.zeros_like(header.get_wavelengths()))
header2.set_band_names(["Band %d" for i in range(header.band_count())])
self.assertEqual( (header.get_wavelengths() == header2.get_wavelengths()[0]).any(), False )
# check drop bands
header3 = header.copy()
header3.set_band_names(["Band %d" for i in range(header.band_count())])
mask = np.full( header2.band_count(), True )
mask[0:4] = False
header3.drop_bands(mask)
self.assertEqual(header3.band_count(), 4)
self.assertEqual(len(header3.get_wavelengths()), 4)
self.assertEqual(len(header3.get_band_names()), 4)
# check set Camera
# define camera properties and initial location/orientation estimate
cam = Camera(pos=np.asarray([665875.0, 4162695, 272]), # np.array([666290.454, 4162697.93, 268.521235])
ori=np.array([43, 80, 130]), # np.array([50.0,-83.0,-137.0])
proj='pano', fov=32.3, step=0.084,
dims=(1464, 401))
header.set_camera(cam)
cam2 = header.get_camera()
self.assertEqual((cam2.pos == cam.pos).all(), True)
self.assertEqual((cam2.ori == cam.ori).all(), True)
self.assertEqual(cam2.proj, cam.proj)
self.assertEqual(cam2.dims, cam.dims)
self.assertEqual(cam2.fov, cam.fov)
self.assertEqual(cam2.step, cam.step)
# check set panel
panel = Panel( R90, np.zeros( header.band_count() ), wavelengths=header.get_wavelengths() )
header.add_panel(panel)
self.assertEqual( len(header.get_panel_names()), 1)
panel2 = header.get_panel('R90')
self.assertEqual( np.sum( panel2.get_mean_radiance() ), 0 )
self.assertEqual(panel2.get_mean_radiance().shape[0], header.band_count())
self.assertEqual( panel2.material.get_name().lower(), R90.get_name().lower())
def test_data(self):
# check functions for images and cloud data
lines = [401, 1]
samples = [1464, 1000]
for i,data in enumerate( [genImage(dimx = 1464, dimy=401, nbands=10), genCloud(npoints = 1000, nbands=10)] ):
# check basics
self.assertEqual(data.has_wavelengths(), True)
self.assertEqual(data.has_band_names(), True)
self.assertEqual(data.has_fwhm(), True)
self.assertEqual(data.band_count(), 10)
self.assertEqual(data.samples(), samples[i])
self.assertEqual(data.lines(), lines[i])
self.assertEqual(data.is_int(), False)
self.assertEqual(data.is_float(), True)
# check export (which also checks copy etc.)
data2 = data.export_bands( (0,5) )
self.assertEqual(len(data2.get_wavelengths()), 5)
self.assertEqual(len(data2.get_fwhm()), 5)
self.assertEqual(data2.data.shape[-1], 5)
# nans
data2.mask_bands(3,-1) # mask bands from 3rd to last
self.assertEqual(data2.data.shape[-1], 5) # bands should still exist
self.assertEqual( np.isfinite(data2.data[...,3:]).any(), False ) # all of last bands should be nan
data2.delete_nan_bands()
self.assertEqual(data2.data.shape[-1], 3) # bands should have been deleted
self.assertEqual(len(data2.get_wavelengths()), 3) # as should associated header data
self.assertEqual(len(data2.get_fwhm()), 3) # as should associated header data
data2.data[..., 2] = 0
data2.set_as_nan(0)
self.assertEqual( np.isfinite( data2.data[...,2] ).any(), False )
# get band
self.assertEqual( np.isfinite( data2.get_band(2)).any(), False)
self.assertEqual( data2.get_band_grey(0).dtype, np.uint8 )
self.assertEqual( data2.get_band_index(500.0), 0 )
# check compression
tv = data.data.ravel()[0]
data.compress()
self.assertEqual(data.data.dtype, np.uint16)
data.decompress()
self.assertEqual(data.data.dtype, np.float32)
self.assertAlmostEqual(data.data.ravel()[0], tv, 3)
# check smoothing works with nan bands
data.mask_bands(1, 3)
data.mask_bands(8, -1)
data.smooth_median()
data.smooth_savgol()
# normalise
data.normalise()
if __name__ == '__main__':
unittest.main()
|
# _*_ coding: utf-8 _*_
import django_filters
from .models import Goods
from django.db.models import Q
class GoodsFilter(django_filters.rest_framework.FilterSet):
'''Pdocut filter'''
pricemin = django_filters.NumberFilter(field_name='shop_price', help_text="lower bound of price", lookup_expr='gte')
pricemax = django_filters.NumberFilter(field_name='shop_price', help_text="higher bound of price",
lookup_expr='lte')
name = django_filters.CharFilter(field_name='name', lookup_expr='iexact')
top_category = django_filters.NumberFilter(method='top_category_filter')
def top_category_filter(self, querytset, name, value):
querytset = querytset.filter(Q(category_id=value) | Q(category__parent_category_id=value) | Q(
category__parent_category__parent_category_id=value))
return querytset
class Meta:
model = Goods
fields = ['pricemin', 'pricemax', 'name', 'is_hot', 'is_new']
|
"""Test universal resolver with http bindings."""
from typing import Dict, Union
import pytest
from asynctest import mock as async_mock
from aries_cloudagent.resolver.base import DIDNotFound, ResolverError
from universal_resolver import resolver as test_module
from universal_resolver.resolver import UniversalResolver
# pylint: disable=redefined-outer-name
@pytest.fixture
def resolver():
"""Resolver fixture."""
uni_resolver = UniversalResolver()
uni_resolver.configure(
{
"endpoint": "https://dev.uniresolver.io/1.0/identifiers",
"methods": [
"sov",
"abt",
"btcr",
"erc725",
"dom",
"stack",
"ethr",
"web",
"v1",
"key",
"ipid",
"jolo",
"hacera",
"elem",
"seraphid",
"github",
"ccp",
"work",
"ont",
"kilt",
"evan",
"echo",
"factom",
"dock",
"trust",
"io",
"bba",
"bid",
"schema",
"ion",
"ace",
"gatc",
"unisot",
"icon",
],
}
)
yield uni_resolver
@pytest.fixture
def profile():
"""Profile fixture."""
yield async_mock.MagicMock()
class MockResponse:
"""Mock http response."""
def __init__(self, status: int, body: Union[str, Dict]):
self.status = status
self.body = body
async def json(self):
return self.body
async def text(self):
return self.body
async def __aenter__(self):
"""For use as async context."""
return self
async def __aexit__(self, err_type, err_value, err_exc):
"""For use as async context."""
class MockClientSession:
"""Mock client session."""
def __init__(self, response: MockResponse = None):
self.response = response
def __call__(self):
return self
async def __aenter__(self):
"""For use as async context."""
return self
async def __aexit__(self, err_type, err_value, err_exc):
"""For use as async context."""
def get(self, endpoint):
"""Return response."""
return self.response
@pytest.fixture
def mock_client_session():
temp = test_module.aiohttp.ClientSession
session = MockClientSession()
test_module.aiohttp.ClientSession = session
yield session
test_module.aiohttp.ClientSession = temp
@pytest.mark.asyncio
async def test_resolve(profile, resolver, mock_client_session):
mock_client_session.response = MockResponse(
200,
{
"didDocument": {
"id": "did:example:123",
"@context": "https://www.w3.org/ns/did/v1",
}
},
)
doc = await resolver.resolve(profile, "did:sov:WRfXPg8dantKVubE3HX8pw")
assert doc.get("id") == "did:example:123"
@pytest.mark.asyncio
async def test_resolve_not_found(profile, resolver, mock_client_session):
mock_client_session.response = MockResponse(404, "Not found")
with pytest.raises(DIDNotFound):
await resolver.resolve(profile, "did:sov:1234567")
@pytest.mark.asyncio
async def test_resolve_unexpeceted_status(profile, resolver, mock_client_session):
mock_client_session.response = MockResponse(
500, "Server failed to complete request"
)
with pytest.raises(ResolverError):
await resolver.resolve(profile, "did:sov:123")
|
import unittest
def remove(s):
ss = '.'
for c in s:
previous = ss[-1]
if previous != c and c.upper() == previous.upper():
ss = ss[:-1]
else:
ss += c
return ss[1:]
class TestStringMethods(unittest.TestCase):
def test(self):
self.assertEqual( remove('aA'), '')
self.assertEqual( remove('abBA'), '')
self.assertEqual( remove('abAB'), 'abAB')
self.assertEqual( remove('aabAAB'), 'aabAAB')
self.assertEqual( remove('dabAcCaCBAcCcaDA'), 'dabCBAcaDA')
#if __name__ == '__main__':
# unittest.main()
with open('aoc5.txt') as f:
print("Result", len(remove(f.read()))) |
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 12-04-2021 #
# Author: Lorenzo Pellegrini, Vincenzo Lomonaco #
# E-mail: contact@continualai.org #
# Website: continualai.org #
################################################################################
"""
CUB200 Pytorch Dataset: Caltech-UCSD Birds-200-2011 (CUB-200-2011) is an
extended version of the CUB-200 dataset, with roughly double the number of
images per class and new part location annotations. For detailed information
about the dataset, please check the official website:
http://www.vision.caltech.edu/visipedia/CUB-200-2011.html.
"""
import csv
import gdown
import os
from collections import OrderedDict
from os.path import expanduser
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import extract_archive
from avalanche.benchmarks.utils import PathsDataset
class CUB200(PathsDataset):
""" Basic CUB200 PathsDataset to be used as a standard PyTorch Dataset.
A classic continual learning benchmark built on top of this dataset
can be found in 'benchmarks.classic', while for more custom benchmark
design please use the 'benchmarks.generators'."""
images_folder = 'CUB_200_2011/images'
official_url = 'http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/'\
'CUB_200_2011.tgz'
gdrive_url = "https://drive.google.com/u/0/uc?id=" \
"1hbzc_P1FuxMkcabkgn9ZKinBwW683j45"
filename = 'CUB_200_2011.tgz'
tgz_md5 = '97eceeb196236b17998738112f37df78'
def __init__(
self, root=expanduser("~") + "/.avalanche/data/CUB_200_2011/",
train=True, transform=None, target_transform=None,
loader=default_loader, download=True):
"""
:param root: root dir where the dataset can be found or downloaded.
Default to '~/.avalanche/data/CUB_200_2011'.
:param train: train or test subset of the original dataset. Default
to True.
:param transform: eventual input data transformations to apply.
Default to None.
:param target_transform: eventual target data transformations to apply.
Default to None.
:param loader: method to load the data from disk. Default to
torchvision default_loader.
:param download: default set to True. If the data is already
downloaded it will skip the download.
"""
self.root = os.path.expanduser(root)
self.train = train
# we create the dir if it does not exists
if not os.path.exists(self.root):
os.makedirs(self.root)
if not self._check_integrity():
if download:
self._download()
else:
raise RuntimeError('Dataset not found or corrupted. ')
super().__init__(
os.path.join(self.root, CUB200.images_folder), self._images,
transform=transform, target_transform=target_transform,
loader=loader)
def _load_metadata(self):
""" Main method to load the CUB200 metadata """
cub_dir = os.path.join(self.root, 'CUB_200_2011')
self._images = OrderedDict()
with open(os.path.join(cub_dir, 'train_test_split.txt')) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
img_id = int(row[0])
is_train_instance = int(row[1]) == 1
if is_train_instance == self.train:
self._images[img_id] = []
with open(os.path.join(cub_dir, 'images.txt')) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
img_id = int(row[0])
if img_id in self._images:
self._images[img_id].append(row[1])
with open(os.path.join(cub_dir, 'image_class_labels.txt')) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
img_id = int(row[0])
if img_id in self._images:
# CUB starts counting classes from 1 ...
self._images[img_id].append(int(row[1]) - 1)
with open(os.path.join(cub_dir, 'bounding_boxes.txt')) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
img_id = int(row[0])
if img_id in self._images:
box_cub = [int(float(x)) for x in row[1:]]
box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]
# PathsDataset accepts (top, left, height, width)
self._images[img_id].append(box_avl)
images_tuples = []
for _, img_tuple in self._images.items():
images_tuples.append(tuple(img_tuple))
self._images = images_tuples
def _check_integrity(self):
""" Checks if the data is already available and intact """
try:
self._load_metadata()
except Exception as _:
return False
for row in self._images:
filepath = os.path.join(self.root, CUB200.images_folder, row[0])
if not os.path.isfile(filepath):
print('[CUB200] Error checking integrity of:', filepath)
return False
return True
def _download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
try:
filepath = os.path.join(self.root, self.filename)
gdown.download(self.gdrive_url, filepath, quiet=False)
gdown.cached_download(
self.gdrive_url, filepath, md5=self.tgz_md5
)
except Exception as e:
print('[CUB200] Direct download may no longer be supported!')
raise e
extract_archive(filepath, to_path=self.root)
if __name__ == "__main__":
""" Simple test that will start if you run this script directly """
import matplotlib.pyplot as plt
dataset = CUB200(train=False, download=True)
print("test data len:", len(dataset))
img, _ = dataset[14]
plt.imshow(img)
plt.show()
dataset = CUB200(train=True)
print("train data len:", len(dataset))
img, _ = dataset[700]
plt.imshow(img)
plt.show()
__all__ = [
'CUB200'
]
|
#The prime factors of 13195 are 5, 7, 13 and 29.
#What is the largest prime factor of the number 600851475143 ?
number=int(input('Enter the number: '))
factor=2
while factor*factor<number:
while number%factor==0:
number=number/factor
factor+=1
print(number)
|
# config/__init__.py
# Copyright (C) 2011-2014 Andrew Svetlov
# andrew.svetlov@gmail.com
#
# This module is part of BloggerTool and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .config import Config
__all__ = ['Config']
|
import warnings
from typing import Any, Tuple, Union
from phiml.math import wrap, expand, non_batch, extrapolation, spatial
from phi import math
from phi.geom import Geometry, GridCell, Box, Point
from ._field import SampledField, resample
from ..geom._stack import GeometryStack
from phiml.math import Tensor, instance, Shape
from phiml.math._tensors import may_vary_along
from phiml.math.extrapolation import Extrapolation, ConstantExtrapolation, PERIODIC
from phiml.math.magic import slicing_dict
class PointCloud(SampledField):
"""
A `PointCloud` comprises:
* `elements`: a `Geometry` representing all points or volumes
* `values`: a `Tensor` representing the values corresponding to `elements`
* `extrapolation`: an `Extrapolation` defining the field value outside of `values`
The points / elements of the `PointCloud` are listed along *instance* or *spatial* dimensions of `elements`.
These dimensions are automatically added to `values` if not already present.
When sampling or resampling a `PointCloud`, the following keyword arguments can be specified.
* `soft`: default=False.
If `True`, interpolates smoothly from 1 to 0 between the inside and outside of elements.
If `False`, only the center position of the new representation elements is checked against the point cloud elements.
* `scatter`: default=False.
If `True`, scattering will be used to sample the point cloud onto grids. Then, each element of the point cloud can only affect a single cell. This is only recommended when the points are much smaller than the cells.
* `outside_handling`: default='discard'. One of `'discard'`, `'clamp'`, `'undefined'`.
* `balance`: default=0.5. Only used when `soft=True`.
See the description in `phi.geom.Geometry.approximate_fraction_inside()`.
See the `phi.field` module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html
"""
def __init__(self,
elements: Union[Tensor, Geometry],
values: Any = 1.,
extrapolation: Union[Extrapolation, float] = 0.,
add_overlapping=False,
bounds: Box = None):
"""
Args:
elements: `Tensor` or `Geometry` object specifying the sample points and sizes
values: values corresponding to elements
extrapolation: values outside elements
add_overlapping: True: values of overlapping geometries are summed. False: values between overlapping geometries are interpolated
bounds: (optional) size of the fixed domain in which the points should get visualized. None results in max and min coordinates of points.
"""
SampledField.__init__(self, elements, expand(wrap(values), non_batch(elements).non_channel), extrapolation, bounds)
assert self._extrapolation is PERIODIC or isinstance(self._extrapolation, ConstantExtrapolation), f"Unsupported extrapolation for PointCloud: {self._extrapolation}"
self._add_overlapping = add_overlapping
@property
def shape(self):
return self._elements.shape.without('vector') & self._values.shape
def __getitem__(self, item):
item = slicing_dict(self, item)
if not item:
return self
item_without_vec = {dim: selection for dim, selection in item.items() if dim != 'vector'}
elements = self.elements[item_without_vec]
values = self._values[item]
extrapolation = self._extrapolation[item]
bounds = self._bounds[item_without_vec] if self._bounds is not None else None
return PointCloud(elements, values, extrapolation, self._add_overlapping, bounds)
def with_elements(self, elements: Geometry):
return PointCloud(elements=elements, values=self.values, extrapolation=self.extrapolation, add_overlapping=self._add_overlapping, bounds=self._bounds)
def shifted(self, delta):
return self.with_elements(self.elements.shifted(delta))
def with_values(self, values):
return PointCloud(elements=self.elements, values=values, extrapolation=self.extrapolation, add_overlapping=self._add_overlapping, bounds=self._bounds)
def with_extrapolation(self, extrapolation: Extrapolation):
return PointCloud(elements=self.elements, values=self.values, extrapolation=extrapolation, add_overlapping=self._add_overlapping, bounds=self._bounds)
def with_bounds(self, bounds: Box):
return PointCloud(elements=self.elements, values=self.values, extrapolation=self.extrapolation, add_overlapping=self._add_overlapping, bounds=bounds)
def __value_attrs__(self):
return '_values', '_extrapolation'
def __variable_attrs__(self):
return '_values', '_elements'
def __expand__(self, dims: Shape, **kwargs) -> 'PointCloud':
return self.with_values(expand(self.values, dims, **kwargs))
def __replace_dims__(self, dims: Tuple[str, ...], new_dims: Shape, **kwargs) -> 'PointCloud':
elements = math.rename_dims(self.elements, dims, new_dims)
values = math.rename_dims(self.values, dims, new_dims)
extrapolation = math.rename_dims(self.extrapolation, dims, new_dims, **kwargs)
return PointCloud(elements, values, extrapolation, self._add_overlapping, self._bounds)
def __eq__(self, other):
if not type(self) == type(other):
return False
# Check everything but __variable_attrs__ (values): elements type, extrapolation, add_overlapping
if type(self.elements) is not type(other.elements):
return False
if self.extrapolation != other.extrapolation:
return False
if self._add_overlapping != other._add_overlapping:
return False
if self.values is None:
return other.values is None
if other.values is None:
return False
if not math.all_available(self.values) or not math.all_available(other.values): # tracers involved
if math.all_available(self.values) != math.all_available(other.values):
return False
else: # both tracers
return self.values.shape == other.values.shape
return bool((self.values == other.values).all)
@property
def bounds(self) -> Box:
if self._bounds is not None:
return self._bounds
else:
from phi.field._field_math import data_bounds
bounds = data_bounds(self.elements.center)
radius = math.max(self.elements.bounding_radius())
return Box(bounds.lower - radius, bounds.upper + radius)
def _sample(self, geometry: Geometry, soft=False, scatter=False, outside_handling='discard', balance=0.5) -> Tensor:
if geometry == self.elements:
return self.values
if isinstance(geometry, GeometryStack):
sampled = [self._sample(g, soft, scatter, outside_handling, balance) for g in geometry.geometries]
return math.stack(sampled, geometry.geometries.shape)
if self.extrapolation is extrapolation.PERIODIC:
raise NotImplementedError("Periodic PointClouds not yet supported")
if isinstance(geometry, GridCell) and scatter:
assert not soft, "Cannot soft-sample when scatter=True"
return self.grid_scatter(geometry.bounds, geometry.resolution, outside_handling)
else:
assert not isinstance(self._elements, Point), "Cannot sample Point-like elements with scatter=False"
if may_vary_along(self._values, instance(self._values) & spatial(self._values)):
raise NotImplementedError("Non-scatter resampling not yet supported for varying values")
idx0 = (instance(self._values) & spatial(self._values)).first_index()
outside = self._extrapolation.value if isinstance(self._extrapolation, ConstantExtrapolation) else 0
if soft:
frac_inside = self.elements.approximate_fraction_inside(geometry, balance)
return frac_inside * self._values[idx0] + (1 - frac_inside) * outside
else:
return math.where(self.elements.lies_inside(geometry.center), self._values[idx0], outside)
def grid_scatter(self, bounds: Box, resolution: math.Shape, outside_handling: str):
"""
Approximately samples this field on a regular grid using math.scatter().
Args:
outside_handling: `str` passed to `phiml.math.scatter()`.
bounds: physical dimensions of the grid
resolution: grid resolution
Returns:
`CenteredGrid`
"""
closest_index = bounds.global_to_local(self.points) * resolution - 0.5
mode = 'add' if self._add_overlapping else 'mean'
base = math.zeros(resolution)
if isinstance(self._extrapolation, ConstantExtrapolation):
base += self._extrapolation.value
scattered = math.scatter(base, closest_index, self.values, mode=mode, outside_handling=outside_handling)
return scattered
def __repr__(self):
try:
return "PointCloud[%s]" % (self.shape,)
except:
return "PointCloud[invalid]"
def __and__(self, other):
assert isinstance(other, PointCloud)
assert instance(self).rank == instance(other).rank == 1, f"Can only use & on PointClouds that have a single instance dimension but got shapes {self.shape} & {other.shape}"
from ._field_math import concat
return concat([self, other], instance(self))
def nonzero(field: SampledField):
indices = math.nonzero(field.values, list_dim=instance('points'))
elements = field.elements[indices]
return PointCloud(elements, values=math.tensor(1.), extrapolation=math.extrapolation.ZERO, add_overlapping=False, bounds=field.bounds)
def distribute_points(geometries: Union[tuple, list, Geometry, float],
dim: Shape = instance('points'),
points_per_cell: int = 8,
center: bool = False,
radius: float = None,
extrapolation: Union[float, Extrapolation] = math.NAN,
**domain) -> PointCloud:
"""
Transforms `Geometry` objects into a PointCloud.
Args:
geometries: Geometry objects marking the cells which should contain points
dim: Dimension along which the points are listed.
points_per_cell: Number of points for each cell of `geometries`
center: Set all points to the center of the grid cells.
radius: Sphere radius.
extrapolation: Extrapolation for the `PointCloud`, default `NaN` used for FLIP.
Returns:
PointCloud representation of `geometries`.
"""
warnings.warn("distribute_points() is deprecated. Construct a PointCloud directly.", DeprecationWarning)
from phi.field import CenteredGrid
if isinstance(geometries, (tuple, list, Geometry)):
from phi.geom import union
geometries = union(geometries)
geometries = resample(geometries, CenteredGrid(0, extrapolation, **domain), scatter=False)
initial_points = _distribute_points(geometries.values, dim, points_per_cell, center=center)
if radius is None:
from phi.field._field_math import data_bounds
radius = math.mean(data_bounds(initial_points).size) * 0.005
from phi.geom import Sphere
return PointCloud(Sphere(initial_points, radius=radius), extrapolation=geometries.extrapolation, bounds=geometries.bounds)
def _distribute_points(mask: math.Tensor, dim: Shape, points_per_cell: int = 1, center: bool = False) -> math.Tensor:
"""
Generates points (either uniformly distributed or at the cell centers) according to the given tensor mask.
Args:
mask: Tensor with nonzero values at the indices where particles should get generated.
points_per_cell: Number of particles to generate at each marked index
center: Set points to cell centers. If False, points will be distributed using a uniform
distribution within each cell.
Returns:
A tensor containing the positions of the generated points.
"""
indices = math.to_float(math.nonzero(mask, list_dim=dim))
temp = []
for _ in range(points_per_cell):
if center:
temp.append(indices + 0.5)
else:
temp.append(indices + (math.random_uniform(indices.shape)))
return math.concat(temp, dim=dim)
|
import berserk
import json
from Game import Game
import os
with open("./envs.json", "r") as f:
TOKEN = json.load(f).get("lichess_token")
session = berserk.TokenSession(TOKEN)
bot = berserk.clients.Bots(session)
users = berserk.clients.Users(session)
def game_listener():
for event in bot.stream_incoming_events():
if event.get("type") == "challenge":
bot.accept_challenge(event.get("challenge").get("id"))
elif event.get("type") == "gameStart":
if os.path.exists("./moves.txt"):
os.remove("./moves.txt")
game = Game(bot, users, event.get("game").get("id"))
game.run()
game_listener()
|
lista = [1,2,3,4]
soma = 0
soma = (lista[0]*10) +(lista[1]*10)+(lista[2]*30)+(lista[3]*50)
media = soma/100
print(media)
|
#!/usr/bin/env python
#Bao Dang
#Assignment 2
class node:
def __init__(self):
self.label = None
self.leftmost_child = None
self.right_sibling = None
self.parent = None
class tree:
def __init__(self):
self.cellspace = [None]*maxnodes
self.root = None
#PARENT: Find the parent of a node. Return None if the node is the root
def PARENT(n, T):
try:
return T.cellspace[n].parent
except:
return None
#LEFTMOST_CHILD: Find the first child of a node.
def LEFTMOST_CHILD(n, T):
try:
return T.cellspace[n].leftmost_child
except TypeError:
return None
#RIGHT SIBLING: Find the right sibling of a node.
def RIGHT_SIBLING(n , T):
try:
return T.cellspace[n].right_sibling
except TypeError:
return None
#LABEL: Print the label
def LABEL(n, T):
return T.cellspace[n].label
#CREATE0: Return a tree that has 1 node
def CREATE0(v):
temp = tree()
temp.root = v
temp.cellspace[temp.root] = node()
temp.cellspace[temp.root].label = v
return temp
#CREATE1: Return a tree that has another tree as its child
def CREATE1(v, T):
temp = tree()
temp.root = v
temp.cellspace[temp.root] = node()
temp.cellspace[temp.root].label = v
temp.cellspace[temp.root].leftmost_child = T.root
temp.cellspace[T.root] = node()
temp.cellspace[T.root] = T.cellspace[T.root]
temp.cellspace[T.root].parent = temp.root
return temp
#CREATE2: Return a tree that has two other trees as its children
def CREATE2(v, T1, T2):
temp = tree()
temp.root = v
temp.cellspace[temp.root] = node()
temp.cellspace[temp.root].label = v
temp.cellspace[temp.root].leftmost_child = T1.root
temp.cellspace[T1.root] = node()
temp.cellspace[T1.root] = T1.cellspace[T1.root]
temp.cellspace[T1.root].right_sibling = T2.root
temp.cellspace[T1.root].parent = temp.root
temp.cellspace[T2.root] = node()
temp.cellspace[T2.root] = T2.cellspace[T2.root]
temp.cellspace[T2.root].parent = temp.root
return temp
#CREATE3: Return a tree that has three other trees as its children
def CREATE3(v, T1, T2, T3):
temp = tree()
temp.root = v
temp.cellspace[temp.root] = node()
temp.cellspace[temp.root].label = v
temp.cellspace[temp.root].leftmost_child = T1.root
temp.cellspace[T1.root] = node()
temp.cellspace[T1.root] = T1.cellspace[T1.root]
temp.cellspace[T1.root].right_sibling = T2.root
temp.cellspace[T1.root].parent = temp.root
temp.cellspace[T2.root] = node()
temp.cellspace[T2.root] = T2.cellspace[T2.root]
temp.cellspace[T2.root].right_sibling = T3.root
temp.cellspace[T2.root].parent = temp.root
temp.cellspace[T3.root] = node()
temp.cellspace[T3.root] = T3.cellspace[T3.root]
temp.cellspace[T3.root].parent = temp.root
return temp
def ROOT(T):
return T.root
def MAKENULL(T):
T = tree()
return T
if __name__ == "__main__":
print "Create tree A with v = 8 "
maxnodes = 100
a = CREATE0(8)
print "Check label of the root: "
print LABEL(8, a)
print "Create tree B with v = 2 and tree A"
b = CREATE1(2, a)
print "Check parent of node node"
print PARENT(8, b)
print "Check most left child of B"
print LEFTMOST_CHILD(2 ,b)
print "Create tree d with v = 7 and tree A, tree B"
d = CREATE2(7, a, b)
print "Check parent of node 2"
print PARENT(2, d)
print "Check left most child of node 7"
print LEFTMOST_CHILD(7, d)
print "Check right sibling of node 8"
print RIGHT_SIBLING(8, d) |
# -*-coding:utf-8-*-
#线程之间的通信。(我们都知道线程之间是数据共享的)
import threading,queue
def run():
q.put('测试')
if __name__ == '__main__':
q = queue.Queue()
p = threading.Thread(target=run,)
p.start()
print(q.get()) |
tenThings = "Apples Oranges Crows Telephone Light Sugar"
print(tenThings)
print("Need more items")
stuff = tenThings.split(" ")
more = ["Day","night","Song","Frisbee","Corn","Banana"]
print(stuff)
# for stuff in more: #code doesnt work whren this line run.
# print(stuff)
while len(stuff) != 10:
nextOne = more.pop()
print("Adding", nextOne)
stuff.append(nextOne)
print("Items no %d " % len(stuff))
print("Done")
print(stuff[1]) #Print the first element
print(stuff[-1]) #Counts array from end point and works like pop()
print(stuff.pop())
print(' '.join(stuff))
print('#'.join(stuff[2:5]))
|
from flask import Flask, request, render_template
import codecs, pymysql
from datetime import date
app = Flask(__name__)
@app.route("/")
def show():
return render_template("main.html")
@app.route('/result', methods=["POST", "GET"])
def result():
connection = pymysql.connect(
host='database-1.cop2pvzm3623.ap-northeast-1.rds.amazonaws.com',
db='groupwork_db',
user='test',
password='111test',
charset='utf8',
cursorclass=pymysql.cursors.DictCursor
)
try:
with connection.cursor() as cursor:
name = ""
data = ""
data2 = ""
if request.method == "POST":
data = request.form["name"]
else:
if request.args.get("category") != None:
data = request.args.get("category")
sql = """SELECT recipe.title, recipe.image, recipe.recipe_id FROM recipe JOIN category_recipe ON
recipe.recipe_id = category_recipe.recipe_id JOIN category_list ON category_recipe.category_id
= category_list.category_id WHERE %s IN(category_list.category_id, category_list.parent_category_id)
GROUP BY recipe.title"""
cursor.execute(sql, data)
elif request.args.get("category") == None and request.args.get("ignore") == None:
data = request.args.get("name")
d_list = data.split(",")
name = data
sql = """SELECT recipe.title, recipe.image, recipe.recipe_id FROM recipe
JOIN material_recipe
ON recipe.recipe_id = material_recipe.recipe_id
JOIN material
ON material_recipe.material_id = material.material_id
WHERE recipe.recipe_id IN
( SELECT recipe_id
FROM material_recipe
WHERE material_id IN
( SELECT material.material_id
FROM material
WHERE material_name LIKE %s
))"""
for i in range(len(d_list)-1):
sql += """AND recipe.recipe_id IN
( SELECT recipe_id
FROM material_recipe
WHERE material_id IN
( SELECT material.material_id
FROM material
WHERE material_name LIKE %s
))"""
sql += """GROUP BY recipe.title"""
for i in range(len(d_list)):
d_list[i] = "%" + d_list[i] + "%"
cursor.execute(sql, d_list)
else:
data = request.args.get("name")
d_list = data.split(",")
data2 = request.args.get("ignore")
d2_list = data2.split(",")
name = data
# sql = """SELECT recipe.title, recipe.image, recipe.recipe_id FROM recipe
# JOIN material_recipe
# ON recipe.recipe_id = material_recipe.recipe_id
# JOIN material
# ON material_recipe.material_id = material.material_id
# WHERE recipe.recipe_id NOT IN
# ( SELECT recipe_id
# FROM material_recipe
# WHERE material_id IN
# ( SELECT material.material_id
# FROM material
# WHERE material_name LIKE %s)
# ) AND material.material_name LIKE %s
# GROUP BY recipe.title"""
# sql = """SELECT recipe.title, recipe.image, recipe.recipe_id FROM recipe
# JOIN material_recipe
# ON recipe.recipe_id = material_recipe.recipe_id
# JOIN material
# ON material_recipe.material_id = material.material_id
# WHERE recipe.recipe_id NOT IN
# ( SELECT recipe_id
# FROM material_recipe
# WHERE material_id IN
# ( SELECT material.material_id
# FROM material
# WHERE material_name LIKE %s """
# for i in range(len(d2_list)-1):
# sql += """ OR material.material_name LIKE %s """
# sql += """ )
# ) AND material.material_name LIKE %s """
# for i in range(len(d_list)-1):
# sql += """ OR material.material_name LIKE %s """
sql = """SELECT recipe.title, recipe.image, recipe.recipe_id FROM recipe
JOIN material_recipe
ON recipe.recipe_id = material_recipe.recipe_id
JOIN material
ON material_recipe.material_id = material.material_id
WHERE recipe.recipe_id NOT IN
( SELECT recipe_id
FROM material_recipe
WHERE material_id IN
( SELECT material.material_id
FROM material
WHERE material_name LIKE %s
))"""
for i in range(len(d2_list)-1):
sql += """AND recipe.recipe_id NOT IN
( SELECT recipe_id
FROM material_recipe
WHERE material_id IN
( SELECT material.material_id
FROM material
WHERE material_name LIKE %s
))"""
for i in range(len(d_list)):
sql += """AND recipe.recipe_id IN
( SELECT recipe_id
FROM material_recipe
WHERE material_id IN
( SELECT material.material_id
FROM material
WHERE material_name LIKE %s
))"""
for i in range(len(d_list)):
d_list[i] = "%" + d_list[i] + "%"
for i in range(len(d2_list)):
d2_list[i] = "%" + d2_list[i] + "%"
sql += """GROUP BY recipe.title"""
print(sql)
cursor.execute(sql, d2_list + d_list)
# cursor.execute(sql, data)
# Select結果を取り出す
results = cursor.fetchall()
if name == "":
sql = """SELECT category_name FROM category_list WHERE category_list.category_id = %s"""
cursor.execute(sql, data)
data = cursor.fetchone()
data = data["category_name"]
print(data)
cursor.close()
if data2 != "":
return render_template("result.html", name=data, results=results, ng=data2)
else :
return render_template("result.html", name=data, results=results)
finally:
connection.close()
@app.route('/recipe')
def recipe():
recipeID = request.args.get('id') # クエリの値の取得
connection = pymysql.connect(
host='database-1.cop2pvzm3623.ap-northeast-1.rds.amazonaws.com',
db='groupwork_db',
user='test',
password='111test',
charset='utf8',
cursorclass=pymysql.cursors.DictCursor
)
try:
with connection.cursor() as cursor:
sql = """SELECT distinct material_name, image, indication, cost, url, title FROM material_recipe
LEFT JOIN material
ON material_recipe.material_id = material.material_id
LEFT JOIN recipe
ON material_recipe.recipe_id = recipe.recipe_id
WHERE material_recipe.recipe_id = %s"""
cursor.execute(sql, recipeID)
cursor.close()
# Select結果を取り出す
results = cursor.fetchall()
return render_template("recipe.html", results=results)
finally:
connection.close()
@app.route('/list')
def m_list():
return render_template("list.html")
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0') |
'''7. По длинам трех отрезков, введенных пользователем,
определить возможность существования треугольника,
составленного из этих отрезков.
Если такой треугольник существует, то определить, является ли он
разносторонним, равнобедренным или равносторонним. '''
a = int(input('Введите сторону a: '))
b = int(input('Введите сторону b: '))
c = int(input('Введите сторону c: '))
if a < b + c and b < a + c and c < a + b:
if a == b and b == c:
print(f'Треугольник со сторонами {a}, {b}, {c} - равносторонний')
elif a == b or b == c or c == a:
print(f'Треугольник со сторонами {a}, {b}, {c} - равнобедренный')
else:
print(f'Треугольник со сторонами {a}, {b}, {c} - разносторонний')
else:
print(f'Треугольника со сторонами {a}, {b}, {c} не существует')
|
from selenium import webdriver
from bs4 import BeautifulSoup
driver = webdriver.Chrome("/mnt/c/Users/Peter/Documents/setup/chromedriver")
driver.get("http://www.dividend.com/ex-dividend-dates.php?from_filter=yes&ex_div_date_min=2018-01-11&ex_div_date_max=2018-01-11&common_shares=on&preferred_shares=on&adrs=on&etns=on&funds=on¬es=on&etfs=on&reits=on")
soup = BeautifulSoup(driver.page_source,"lxml")
driver.quit()
table = soup.select("table#ex-dividend-dates")[0]
list_row =[[tab_d.text.strip().replace("\n","") for tab_d in item.select('th,td')] for item in table.select('tr')]
for data in list_row[:2]:
print(' '.join(data))
|
"""
Ablation study to test the effect of the % of training data on precision and recall.
Run several times and save the output to a pandas frame
"""
from comet_ml import Experiment
import keras
import tensorflow as tf
import sys
import os
from datetime import datetime
import glob
import pandas as pd
import copy
import numpy as np
import gc
from keras_retinanet import models
from keras_retinanet .models.retinanet import retinanet_bbox
from keras_retinanet .callbacks import RedirectModel
from keras.backend.tensorflow_backend import clear_session, get_session
#insert path
sys.path.insert(0, os.path.abspath('..'))
from DeepForest.config import load_config
from DeepForest.utils.generators import load_retraining_data, create_h5_generators
from train import create_models
from prcurve import main as eval_main
#load config - clean
DeepForest_config = load_config("..")
#Keras session cleaner for training in a loop
def reset_keras():
sess = get_session()
clear_session()
sess.close()
sess = get_session()
try:
del prediction_model
except:
pass
print(gc.collect()) # if it's done something you should see a number being outputted
# use the same config as you used to create the session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.tensorflow_backend.set_session(get_session())
def train(pretrain_model_path, proportion_data, DeepForest_config):
###Log experiments
experiment = Experiment(api_key="ypQZhYfs3nSyKzOfz13iuJpj2", project_name='deeplidar', log_code=False)
#make snapshot dir
dirname = datetime.now().strftime("%Y%m%d_%H%M%S")
experiment.log_parameter("Start Time", dirname)
save_snapshot_path=DeepForest_config["save_snapshot_path"] + dirname
os.mkdir(save_snapshot_path)
##Replace config file and experiment
DeepForest_config["batch_size"] = 40
DeepForest_config["epochs"] = 40
experiment.log_parameter("mode","ablation")
DeepForest_config["evaluation_images"] = 0
#set training images, as a function of the number of training windows
DeepForest_config["training_proportion"] = proportion_data
experiment.log_parameters(DeepForest_config)
#Create model
model, training_model, prediction_model = create_models(
backbone_retinanet=backbone.retinanet,
num_classes=1,
weights=pretrain_model_path,
multi_gpu=2,
freeze_backbone=False,
nms_threshold=DeepForest_config["nms_threshold"],
input_channels=DeepForest_config["input_channels"]
)
if not proportion_data == 0:
#Run training, and pass comet experiment class
#start training
data = load_retraining_data(DeepForest_config)
train_generator, validation_generator = create_h5_generators(data, DeepForest_config=DeepForest_config)
#ensure directory created first; otherwise h5py will error after epoch.
history = training_model.fit_generator(
generator=train_generator,
steps_per_epoch=train_generator.size()/DeepForest_config["batch_size"],
epochs=DeepForest_config["epochs"],
verbose=2,
shuffle=False,
workers=DeepForest_config["workers"],
use_multiprocessing=DeepForest_config["use_multiprocessing"],
max_queue_size=DeepForest_config["max_queue_size"])
num_trees = train_generator.total_trees
else:
num_trees = 0
#Log trees
experiment.log_parameter("Number of Training Trees", num_trees)
return prediction_model, num_trees
def evaluation(prediction_model, results, DeepForest_config, num_trees):
print("Evaluation")
args = [
"--batch-size", str(DeepForest_config['batch_size']),
'--score-threshold', str(DeepForest_config['score_threshold']),
'--suppression-threshold', '0.1',
'--save-path', None
]
recall, precision = eval_main(DeepForest_config = DeepForest_config, args = args, model=prediction_model)
results.append({"Number of Trees": num_trees, "Proportion":proportion_data,"Evaluation Site" : pretraining_site, "Recall": recall,"Precision": precision})
return results
if __name__ == "__main__":
#The following models have been pretrained on all other sites except for the name in the site key
pretraining_models = {
"NIWO":"/orange/ewhite/b.weinstein/retinanet/20190719_121121/resnet50_05.h5",
"MLBS": "/orange/ewhite/b.weinstein/retinanet/20190719_120823/resnet50_05.h5",
"SJER": "/orange/ewhite/b.weinstein/retinanet/20190719_120547/resnet50_05.h5",
"TEAK": "/orange/ewhite/b.weinstein/retinanet/20190713_102002/resnet50_04.h5"
}
#For each site, match the hand annotations with the pretraining model
results = []
for pretraining_site in pretraining_models:
pretrain_model_path = pretraining_models[pretraining_site]
print("Running pretraining for {}".format(pretraining_site))
#Load retraining data
DeepForest_config["hand_annotation_site"] = [pretraining_site]
DeepForest_config["evaluation_site"] = [pretraining_site]
DeepForest_config["shuffle_training"] = True
#Load pretraining model
print('Loading model, this may take a secondkeras-retinanet.\n')
backbone = models.backbone(DeepForest_config["backbone"])
#retrain model paths
for x in [pretraining_site]:
DeepForest_config[x]["h5"] = os.path.join(DeepForest_config[x]["h5"],"hand_annotations")
print(DeepForest_config[x]["h5"])
#For each site run a portion of the training data
for x in np.arange(2):
for proportion_data in [0, 0.05,0.25,0.5,0.75,1]:
reset_keras()
prediction_model, num_trees = train(pretrain_model_path, proportion_data, DeepForest_config)
results = evaluation(prediction_model, results, DeepForest_config, num_trees)
#Wrap together the results
results = pd.DataFrame(results)
#give time stamp in case multiple running
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
results.to_csv("ablation_{}".format(timestamp) + ".csv")
print(results)
|
# Generated by Django 3.1.7 on 2021-03-14 00:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0012_auto_20210313_0040'),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True, verbose_name='Criação')),
('modified', models.DateField(auto_now=True, verbose_name='Atualização')),
('active', models.BooleanField(default=True, verbose_name='Ativo?')),
('name', models.CharField(max_length=100, verbose_name='Nome')),
('description', models.TextField(max_length=300, verbose_name='Descrição')),
('rating', models.PositiveSmallIntegerField(max_length=5, verbose_name='Avaliação')),
('occupation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.position', verbose_name='Profissão')),
],
options={
'verbose_name': 'Cliente',
'verbose_name_plural': 'Clientes',
},
),
]
|
from tools import Calaculation
import math
calc = Calaculation()
print('=========================================')
print('支持常用三角函数以及反函数(tan,sin,cos)\t支持括号优先级运算\n支持乘方及开方\tpi=π')
while True:
result = calc.main(input('>>').replace('pi',str(math.pi)))
# if len(result)>10:
# print('结果过长,自动四舍五入..')
# print(round(float(result),5))
#
# else:
# print(result)
print(result) |
#ASSIGNMENT15
#QUESTION:1 Extract the user id, domain name and suffix from the following email addresses.
# emails = "zuck26@facebook.com" "page33@google.com"
# "jeff42@amazon.com"
# desired_output = [('zuck26', 'facebook', 'com'), ('page33', 'google', 'com'), ('jeff42', 'amazon', 'com')]
#SOLUTION:
import re
emails = "zuck26@facebook.com page33@google.com jeff42@amazon.com"
p=re.compile(r"(.*)@(.*).(...) (.*)@(.*).(...) (.*)@(.*).(...)")
result=p.match(emails)
a=(result.group(1))
b=(result.group(2))
c=(result.group(3))
d=(result.group(4))
e=(result.group(5))
f=(result.group(6))
g=(result.group(7))
h=(result.group(8))
i=(result.group(9))
A=[a,b,c]
B=[d,e,f]
C=[g,h,i]
l=[tuple(A),tuple(B),tuple(C)]
print(l)
#QUESTION:2 Retrieve all the words starting with ‘b’ or ‘B’ from the following text.
# text = "Betty bought a bit of butter, But the butter was so bitter, So she bought some better butter,
# To make the bitter butter better."
#SOLUTION:
import re
text = "Betty bought a bit of butter, But the butter was so bitter, So she bought some better butter,To make the bitter butter better."
p=re.compile(r"B",re.I)
result=p.finditer(text)
for r in result:
print(r)
#QUESTION:3 Split the following irregular sentence into words
# sentence = "A, very very; irregular_sentence"
# desired_output = "A very very irregular sentence"
#SOLUTION:
import re
sentence = "A, very very; irregular_sentence"
m=re.sub(r"[^\w]"," ",sentence)
p=re.sub("_"," ",m)
print(p)
#OPTIONAL QUESTION#
#QUESTION: Clean up the following tweet so that it contains only the user’s message. That is, remove all URLs, hashtags, mentions, punctuations, RTs and CCs.
# tweet = '''Good advice! RT @TheNextWeb: What I would do differently if I was learning to code today http://t.co/lbwej0pxOd cc: @garybernhardt #rstats'''
# desired_output = 'Good advice What I would do differently if I was learning to code today'
#SOLUTION:
import re
tweet ='''Good advice! RT @TheNextWeb:What I would do differently if I was learning to code today http://t.co/lbwej0pxOd cc: @garybernhardt'''
def clean_tweet(tweet):
tweet=re.sub('http\S+\s*',' ',tweet)
tweet=re.sub('RT|cc',' ',tweet)
tweet=re.sub('#\S+',' ',tweet)
tweet=re.sub('@\S+',' ',tweet)
tweet=re.sub('[%s]'% re.escape("""!"#$%&'( )*+,-./:;<=>?@[\]^_ `{|}~"""),' ',tweet)
#tweet=re.sub('\s+',' ',tweet)
return tweet
print(clean_tweet (tweet))
|
exports = [] #__:skip
require = None #__:skip
App = require("./App")['default']
vue = require("vue")
app = vue.createApp(App)
exports['default'] = app.mount("#app")
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
import sys
import cuisine
from fabric.api import run as _run
from fabric.api import sudo as _sudo
from fabric.api import put as _put
from fabric.api import local, get, env
from revolver import contextmanager as _ctx
from revolver.decorator import inject_use_sudo
VERSION = '0.0.5'
env.sudo_forced = False
env.sudo_user = None
@inject_use_sudo
def put(*args, **kwargs):
with _ctx.unpatched_state():
return _put(*args, **kwargs)
def run(*args, **kwargs):
if not env.sudo_forced:
return _run(*args, **kwargs)
return sudo(*args, **kwargs)
def sudo(*args, **kwargs):
if env.sudo_user:
kwargs['user'] = env.sudo_user
return _sudo(*args, **kwargs)
# Monkeypatch sudo/run into fabric/cuisine
# TODO Added fabric.contrib.files because it was still wrong. Do we need to
# patch even more places? Wrong import-order used? Investigate here!
from fabric.contrib import files as _files
for module in ("fabric.api", "fabric.contrib.files",
"fabric.operations", "cuisine"):
setattr(sys.modules[module], "run", run)
setattr(sys.modules[module], "sudo", sudo)
|
import torch
import torch.nn as nn
import numpy as np
import argparse
class BasicBlock(nn.Module):
# For Resnet18 or34
expansion = 1
def __init__(self, in_channel, out_channel, stride=1):
super(BasicBlock, self).__init__()
# Main branch of the BasicBlock
self.basic = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channel),
nn.ReLU(inplace=True),
nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=1 , padding=1, bias=False),
nn.BatchNorm2d(out_channel))
# shortcut of the BasicBlock
self.short_cut = nn.Sequential()
# keep the size and channel of shortcut consistent
if in_channel != self.expansion * out_channel or stride != 1:
self.short_cut = nn.Sequential(
nn.Conv2d(in_channel, self.expansion * out_channel, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * out_channel))
def forward(self, x):
output = self.basic(x)
output += self.short_cut(x)
# keep the input a tensor
output = torch.nn.functional.relu(output)
return output
class Resnet18(nn.Module):
def __init__(self, Block, num_blocks, num_classes=10):
super(Resnet18, self).__init__()
self.in_channel = 64
self.Conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
self.Maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, return_indices=True)
self.Conv2_x = self._make_layer(Block, 64, num_blocks[0], 1)
self.Conv3_x = self._make_layer(Block, 128, num_blocks[1], 2)
self.Conv4_x = self._make_layer(Block, 256, num_blocks[2], 2)
self.Conv5_x = self._make_layer(Block, 512, num_blocks[3], 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * Block.expansion, num_classes)
def _make_layer(self, Block, out_channel, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(Block(self.in_channel, out_channel, stride))
self.in_channel = Block.expansion * out_channel
return nn.Sequential(*layers)
def forward(self, x):
output = self.Conv1(x)
output, indices = self.Maxpool(output)
output = self.Conv2_x(output)
output = self.Conv3_x(output)
output = self.Conv4_x(output)
output = self.Conv5_x(output)
output = self.avg_pool(output)
#Change the shape of the sensor, [batchsize,512]
output = output.view(output.size(0), -1)
output = self.fc(output)
return output
def visual_conv1(self, x):
visual_output = self.Conv1(x)
return visual_output
def visual_conv5(self, x):
visual_output = self.Conv1(x)
visual_output, indices = self.Maxpool(visual_output)
visual_output = self.Conv2_x(visual_output)
visual_output = self.Conv3_x(visual_output)
visual_output = self.Conv4_x(visual_output)
visual_output = self.Conv5_x(visual_output)
return visual_output, indices
def Resnet_18():
return Resnet18(BasicBlock, [2, 2, 2, 2])
|
from __future__ import absolute_import
from docutils import nodes
from docutils.parsers.rst import Directive, directives
import loremipsum
class LoremIpsumNode(Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
sentences = loremipsum.get_sentences(100)
def run(self):
paragraphs = [int(x.strip()) for x in self.arguments[0].split(',')]
return [nodes.paragraph(text=self.get_text(x)) for x in paragraphs]
def get_text(self, n_sentences):
text = self.sentences[:n_sentences]
return '\n'.join(text)
def register():
directives.register_directive('lorem-ipsum', LoremIpsumNode)
|
N = int(input())
if N == 1:
print(N, end='\r\n')
elif N == 2:
print(N, end='\n')
elif N == 3:
print(N, end='\n\r')
elif N == 4:
print(N, end='\n\n')
elif N == 5:
print(N, end='\r')
elif N == 6:
print(N, end=' ')
|
from os import environ
from fastapi_plugins import RedisSettings
class AppSettings(RedisSettings):
api_name: str = 'fun_box'
config = AppSettings()
|
def readNumber(line, index):
number = 0
while index < len(line) and line[index].isdigit():
number = number * 10 + int(line[index])
index += 1
if index < len(line) and line[index] == '.':
index += 1
keta = 0.1
while index < len(line) and line[index].isdigit():
number += int(line[index]) * keta
keta /= 10
index += 1
token = {'type': 'NUMBER', 'number': number}
return token, index
def readPlus(line, index):
token = {'type': 'PLUS'}
return token, index + 1
def readMinus(line, index):
token = {'type': 'MINUS'}
return token, index + 1
def readMul(line, index):
token = {'type': 'MUL'}
return token, index + 1
def readDiv(line, index):
token = {'type': 'DIV'}
return token, index + 1
def readL(line, index):
token = {'type': 'LEFT'}
return token, index + 1
def readR(line, index):
token = {'type': 'RIGHT'}
return token, index + 1
def tokenize(line):
tokens = []
index = 0
while index < len(line):
if line[index].isdigit():
(token, index) = readNumber(line, index)
elif line[index] == '+':
(token, index) = readPlus(line, index)
elif line[index] == '-':
(token, index) = readMinus(line, index)
elif line[index] == '*':
(token, index) = readMul(line, index)
elif line[index] == '/':
(token, index) = readDiv(line, index)
elif line[index] == '(':
(token, index) = readL(line, index)
elif line[index] == ')':
(token, index) = readR(line, index)
else:
print('Invalid character found: ' + line[index])
exit(1)
tokens.append(token)
return tokens
class Evaluation:
def __init__(self,tokens):
self.itr=0
self.tokens=tokens
self.size=len(tokens)
def num(self):
if self.tokens[self.itr]['type']=='MINUS':#優先度が一番高い、数字の符号としてのマイナス
if self.tokens[self.itr+1]['type']=='NUMBER':
self.itr+=2
return -self.tokens[self.itr-1]['number']
if self.tokens[self.itr+1]['type']=='LEFT':
self.itr+=1
return -self.bracket()
if self.tokens[self.itr]['type']=='NUMBER':#数字
self.itr+=1
return self.tokens[self.itr-1]['number']
else:
print('Invalid syntax:',self.tokens[self.itr])
exit(1)
def bracket(self):
ret=None
if self.tokens[self.itr]['type']=='LEFT':
self.itr+=1
ret=self.plusminus()
if self.itr>=self.size or self.tokens[self.itr]['type']!='RIGHT':
print('Invalid syntax: There is lack of right bracket')
if self.itr<self.size:
print(self.tokens[self.itr]['type'])
exit(1)
self.itr+=1
else:
ret=self.num()
return ret
def muldiv(self):
ans=self.bracket()
while self.itr<self.size and self.tokens[self.itr]['type'] in ['MUL','DIV']:
self.itr+=1
if self.tokens[self.itr-1]['type']=='MUL':
ans*=self.bracket()
if self.tokens[self.itr-1]['type']=='DIV':
denomi=self.bracket()
if denomi==0:
print('Divide by zero')
exit(1)
ans/=denomi
return ans
def plusminus(self):
ans=self.muldiv()
while self.itr<self.size and self.tokens[self.itr]['type'] in ['PLUS','MINUS']:
self.itr+=1
if self.tokens[self.itr-1]['type']=='PLUS':
ans+=self.muldiv()
if self.tokens[self.itr-1]['type']=='MINUS':
ans-=self.muldiv()
return ans
def calc(self):
self.itr=0
return self.plusminus()
def evaluate(tokens):
return Evaluation(tokens).calc()
def test(line):
tokens = tokenize(line)
actualAnswer = evaluate(tokens)
expectedAnswer = eval(line)
if abs(actualAnswer - expectedAnswer) < 1e-8:
print("PASS! (%s = %f)" % (line, expectedAnswer))
else:
print("FAIL! (%s should be %f but was %f)" % (line, expectedAnswer, actualAnswer))
# Add more tests to this function :)
def runTest():
print("==== Test started! ====")
test("0.3")
test("1+2")
test("1.0+2.1-3")
test("3.0+4*2-1/5*2")
test("3.0+4*2-1/5*2/3")
test("3.0+4*2-1/5*4*5")
test("-3.0+4*-2-1/5*2")#発展:マイナスの数をかける
test("-3.0+4*2-1/-5*2")#発展:マイナスの数でわる
#()が入ったテストケース
test("(2+3)")
test("(2+3)*4")
test("(3.0+4*(2-1))/5")
test("3.0+4/2/3/5*2*(2-1)/5")
test("(3.0+4*(3.0+4*(3.0+4*(3.0+4*(2-1)))))/5")
print("==== Test finished! ====\n")
print("==== 発展:括弧の前にマイナスが入ってる物を掛け算割り算するパターン(とてもきつい) ====")
test("(3.0+4*-(2-1))/5")
print("==== 発展CLEAR!!!====\n")
runTest()
while True:
print('> ', end="")
line = input()
tokens = tokenize(line)
answer = evaluate(tokens)
print("answer = %f\n" % answer) |
import atexit
import os
ip = get_ipython()
LIMIT = 100000 # limit the size of the history
def save_history():
"""save the IPython history to a plaintext file"""
histfile = os.path.join(ip.profile_dir.location, "history.txt")
print("Saving plaintext history to %s" % histfile)
lines = []
# get previous lines
# this is only necessary because we truncate the history,
# otherwise we chould just open with mode='a'
if os.path.exists(histfile):
with open(histfile, 'r') as f:
lines = f.readlines()
# add any new lines from this session
lines.extend(record[2] + '\n' for record in ip.history_manager.get_range())
with open(histfile, 'w') as f:
# limit to LIMIT entries
f.writelines(lines[-LIMIT:])
# do the save at exit
atexit.register(save_history)
|
import pygame,time,keyboard, random
pygame.init()
screen= pygame.display.set_mode((400,432))
pygame.display.set_caption("Alien_Run")
ball=pygame.image.load(r'alien.png')
enemy=pygame.image.load(r'military.png')
coin=pygame.image.load(r'coin.png')
background=pygame.image.load(r'bg.jpg')
px,py=2,2
d=1
ex=0
score=0
Highscore=0
font=pygame.font.Font('freesansbold.ttf',24)
text=font.render('Press "ENTER" to start!',True,(250,250,0),(255,0,0))
textrect=text.get_rect()
textrect.center=(200,416)
coinx,coiny=random.randint(10,360),random.randint(10,360)
st=0
try:
Highscore = int(open("score").read())
except:
file = open("score","w")
file.write(str(0))
file.close()
Highscore = 0
posx,posy,dirx,diry=[],[],[1,1,-1,-1],[1,-1,-1,1]
for i in range(4):
posx.append(random.randint(50,369))
posy.append(random.randint(50,369))
running=True
while 1:
screen.fill((205,0,255))
screen.blit(background,(0,0))
pygame.draw.line(screen,(50,180,0),(0,400),(400,400),4)
pygame.draw.line(screen,(50,180,0),(0,0),(400,0),4)
pygame.draw.line(screen,(50,180,0),(0,400),(0,0),4)
pygame.draw.line(screen,(50,180,0),(398,0),(398,400),4)
screen.blit(text,textrect)
screen.blit(ball,(px,py))
screen.blit(coin,(coinx,coiny))
for i in range(4):
screen.blit(enemy,(posx[i],posy[i]))
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
pygame.display.update()
if st==0:
if keyboard.is_pressed('enter'):
st=1
else:
continue
text=font.render(f'Score: {score} Highscore: {Highscore}',True,(250,250,0),(255,0,0))
textrect=text.get_rect()
textrect.center=(200,416)
time.sleep(0.007)
if keyboard.is_pressed('down'):
d=4
elif keyboard.is_pressed('left'):
d=1
elif keyboard.is_pressed('up'):
d=3
elif keyboard.is_pressed('right'):
d=2
if d==1 and px>=3:
px-=2
elif d==2 and px<=372:
px+=2
elif d==3 and py>=3:
py-=2
elif d==4 and py<=372:
py+=2
for i in range(4):
if posx[i]<=3 and dirx[i]==-1:
dirx[i]=1
elif posx[i]>=372 and dirx[i]==1:
dirx[i]=-1
if posy[i]<=3 and diry[i]==-1:
diry[i]=1
elif posy[i]>=373 and diry[i]==1:
diry[i]=-1
posx[i]+=dirx[i]
posy[i]+=diry[i]
if abs(px-posx[i])<=18 and abs(py-posy[i])<=18:
time.sleep(1)
ex=1
if abs(px-coinx)<=20 and abs(py-coiny)<=20:
score+=10
if score>Highscore:
Highscore=score
file = open("score","w")
file.write(str(Highscore))
file.close()
coinx,coiny=random.randint(10,360),random.randint(10,360)
if ex==1:
score=0
st=0
posx,posy,dirx,diry=[],[],[1,1,-1,-1],[1,-1,-1,1]
for i in range(4):
posx.append(random.randint(50,369))
posy.append(random.randint(50,369))
px,py=2,2
coinx,coiny=random.randint(10,360),random.randint(10,360)
ex=0
text=font.render('Press "ENTER" to start!',True,(250,250,0),(255,0,0))
textrect=text.get_rect()
textrect.center=(200,416)
|
def part_one():
with open('input') as mass:
result = []
for m in mass:
result.append(int(float(str(m).strip()) / 3) - 2)
sum_of_fuel = str(sum(result))
print(sum_of_fuel)
def part_two():
with open('input') as mass:
result = []
for m in mass:
while True:
m = (int(float(str(m).strip()) / 3) - 2)
if m <= 0:
break
else:
result.append(m)
sum_of_fuel = str(sum(result))
print(sum_of_fuel)
|
#
#
#
#Atzamis Iosif, 3094
#Dedousis Andreas , 3018
#Kardoulakis Nikos, 3086
#
#
import socket
import sys
import random
import pickle
import os
import re
import subprocess
import urllib
import time
from Crypto import Random
from Crypto.Cipher import AES
import smtplib
def email_authentication():
gmail_user = 'aaa@gmail.com'
gmail_password = 'oo000'
from_=gmail_user
to =['yyy@gmail.com','xxx@gmail.com']
code=(random.randint(1000,50000))
email_text ="Once you receive the code.\n Enter the verification code: "+str(code)
subject='AUTHENTICATION'
message='Subject: %s\n\n %s'%(subject,email_text)
try:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(gmail_user, gmail_password)
server.sendmail(from_,to,message)
server.close()
print("AUTHENTICATION SEND ")
return code
except:
print 'Something went wrong...'
def pad(s):
return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
def encrypt(message, key, key_size=256):
message = pad(message)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
return iv + cipher.encrypt(message)
def encrypt_file(file_name, key):
with open(file_name, 'rb') as fo:
plaintext = fo.read()
enc = encrypt(plaintext, key)
with open(file_name + ".enc", 'wb') as fo:
fo.write(enc)
############
# ping :
#
# Argument num :
# Argument ip2 :
#
# return argument info :
#
############
def ping(num, ip2):
ping_response = subprocess.Popen(["/bin/ping",str(ip2), "-c%d"%num, "-w100"],stdout=subprocess.PIPE).stdout.read()
traceroute=subprocess.Popen(["traceroute", "-w100",str(ip2)],stdout=subprocess.PIPE)
hops=sum(1 for _ in iter(traceroute.stdout.readline,""))
hops= hops-1
RTT = re.search("time=(.*) ms",str(ping_response))
direct_RTT = float(RTT.group(1))
info = {}
info[0] = direct_RTT
info[1] = int(hops)
return info
############
#
# download :
#
#
#
############
def download(url):
url=url.replace(" ",'')
end_file=url[-4:]
key = b'\xbf\xc0\x85)\x10nc\x94\x02)j\xdf\xcb\xc4\x94\x9d(\x9e[EX\xc8\xd5\xbfI{\xa2$\x05(\xd5\x18'
start_time = time.clock()
urllib.urlretrieve(url,'file_relay_to_end'+end_file)
#urllib.urlretrieve('https://www.youtube.com/yt/brand/media/image/YouTube-logo-full_color.png', 'file_relay.png')
print('File downloaded in %s sec' % str(time.clock() - start_time))
encrypt_file('file_relay_to_end'+end_file,key)
return key
RECV_BUFFER_SIZE = 1024
port= random.randint(1024,49151)
b=""
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (socket.gethostbyname(socket.gethostname()), int(port))
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
print >>sys.stderr, 'waiting for a connection'
connection, client_address = sock.accept()
try:
print >>sys.stderr, 'connection from', client_address
# Receive the data in small chunks and retransmit it
while True:
data =connection.recv(RECV_BUFFER_SIZE)
#print >>sys.stderr, 'received "%s"' % data[0]
b+=data
if data:
received=pickle.loads(b)
end_server_ip = received[0]
user_ping=received[1]
down=received[2]
url=received[3]
if down==0:
data=ping(user_ping,end_server_ip)
HOPS=int(data[1])
RTT=float(data[0])
#print("PING to endserver\n")
#print("HOPS: %d"%HOPS)
#print("RTT: %f\n"%RTT)
connection.sendall(pickle.dumps(data))
else:
key=download(url)
password=email_authentication()
connection.send(key)
connection.send(str(password))
end_file=url.replace(" ",'')
end_file=end_file[-4:]
with open('file_relay_to_end'+end_file+'.enc','rb')as myfile:
l=myfile.read()
size=len(l)
connection.send(str(size))
connection.sendall((l))
os.remove('file_relay_to_end'+end_file+'.enc')
os.remove('file_relay_to_end'+end_file)
else:
print >>sys.stderr, 'no more data from', client_address
b=""
break
finally:
# Clean up the connection
connection.close()
|
# Implementation of the Viterbi Algorithm
"""
viterbi_algo.py: Viterbi Algorithm
Decoding: Given as input an HMM with two hidden states (A, B) and
an observation sequence O, find the most probable sequence of states
Q = q1q2q3q4...qT
Author: Dung Le (dungle@bennington.edu)
Date: 10/17/2017
"""
import numpy as np
def viterbi(observations, states, a, b):
T = len(observations)
N = len(states)
# a probability matrix + a best_path matrix to keep track of the most probable path
viterbi_prob = np.zeros((N, T))
best_path = np.zeros((1, T))
for s in range(N):
col_index = observations[0] - 1
viterbi_prob[s, 0] = a[0, s] * b[s, col_index]
for t in range(1, T):
col_index = observations[t]-1
for s in range(N):
# since a[0, s] represent the probabilities P(H|start) or P(C|start)
# here we calculate P(H or C | H or C), therefore, must increment index s1 by 1
viterbi_prob[s, t] = max([(viterbi_prob[s1, t-1] * a[s1+1, s] * b[s, col_index]) for s1 in range(N)])
for t in range(T-1,0,-1): # states of (last-1)th to 0th time step
best_path[0, t] = np.argmax(viterbi_prob[:, t])
return (viterbi_prob, best_path)
if __name__ == "__main__":
a = np.array([[0.8, 0.2], [0.7, 0.3], [0.4, 0.6]])
b = np.array([[0.2, 0.4, 0.4], [0.5, 0.4, 0.1]])
obs = [3, 1, 3]
#obs1 = [3, 3, 1, 1, 2, 2, 3, 1, 3]
#obs2 = [3, 3, 1, 1, 2, 3, 3, 1, 2]
states = ['H', 'C']
print(viterbi(obs, states, a, b))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.